├── kafka ├── vendor │ ├── __init__.py │ └── socketpair.py ├── coordinator │ ├── __init__.py │ ├── assignors │ │ ├── __init__.py │ │ ├── sticky │ │ │ ├── __init__.py │ │ │ └── sorted_set.py │ │ ├── abstract.py │ │ └── range.py │ ├── protocol.py │ └── heartbeat.py ├── version.py ├── oauth │ ├── __init__.py │ └── abstract.py ├── serializer │ ├── __init__.py │ └── abstract.py ├── record │ ├── __init__.py │ └── README ├── consumer │ └── __init__.py ├── producer │ ├── __init__.py │ └── future.py ├── partitioner │ ├── __init__.py │ └── default.py ├── metrics │ ├── stats │ │ ├── percentile.py │ │ ├── total.py │ │ ├── count.py │ │ ├── max_stat.py │ │ ├── min_stat.py │ │ ├── __init__.py │ │ ├── avg.py │ │ ├── percentiles.py │ │ └── histogram.py │ ├── measurable_stat.py │ ├── __init__.py │ ├── stat.py │ ├── measurable.py │ ├── compound_stat.py │ ├── kafka_metric.py │ ├── metric_config.py │ ├── quota.py │ ├── metrics_reporter.py │ └── dict_reporter.py ├── protocol │ ├── abstract.py │ ├── frame.py │ ├── pickle.py │ ├── __init__.py │ └── struct.py ├── admin │ ├── __init__.py │ ├── new_partitions.py │ ├── config_resource.py │ └── new_topic.py ├── __init__.py ├── util.py ├── future.py ├── structs.py └── scram.py ├── .covrc ├── setup.cfg ├── servers ├── 1.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.0.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.1.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 1.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.1.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.2.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.3.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.4.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.5.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── trunk │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.1.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.2.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.10.2.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.1 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.2 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.11.0.3 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 2.6.0 │ └── resources │ │ ├── kafka_server_jaas.conf │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.0 │ └── resources │ │ ├── zookeeper.properties │ │ ├── log4j.properties │ │ └── kafka.properties ├── 0.8.1 │ └── resources │ │ ├── zookeeper.properties │ │ ├── log4j.properties │ │ └── kafka.properties ├── 0.8.1.1 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.0 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.1 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.8.2.2 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties ├── 0.9.0.0 │ └── resources │ │ ├── zookeeper.properties │ │ └── log4j.properties └── 0.9.0.1 │ └── resources │ ├── zookeeper.properties │ └── log4j.properties ├── docs ├── apidoc │ ├── KafkaClient.rst │ ├── KafkaConsumer.rst │ ├── KafkaProducer.rst │ ├── KafkaAdminClient.rst │ ├── BrokerConnection.rst │ ├── ClusterMetadata.rst │ └── modules.rst ├── requirements.txt ├── license.rst ├── support.rst ├── compatibility.rst ├── install.rst └── tests.rst ├── MANIFEST.in ├── pylint.rc ├── .gitignore ├── benchmarks ├── README.md ├── load_example.py ├── record_batch_compose.py └── record_batch_read.py ├── test ├── __init__.py ├── test_cluster.py ├── test_api_object_implementation.py ├── test_subscription_state.py ├── test_partition_movements.py ├── test_package.py ├── test_consumer.py ├── test_partitioner.py ├── testutil.py ├── test_sender.py ├── test_acl_comparisons.py ├── test_sasl_integration.py └── test_admin.py ├── requirements-dev.txt ├── travis_java_install.sh ├── .travis.yml ├── tox.ini ├── Makefile ├── setup.py ├── example.py └── AUTHORS.md /kafka/vendor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/coordinator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /kafka/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '2.0.3-dev' 2 | -------------------------------------------------------------------------------- /.covrc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | kafka/vendor/* 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | 4 | [metadata] 5 | license_file = LICENSE 6 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.3.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.4.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.5.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/trunk/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.6.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; 5 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaClient.rst: -------------------------------------------------------------------------------- 1 | KafkaClient 2 | =========== 3 | 4 | .. autoclass:: kafka.KafkaClient 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaConsumer.rst: -------------------------------------------------------------------------------- 1 | KafkaConsumer 2 | ============= 3 | 4 | .. autoclass:: kafka.KafkaConsumer 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaProducer.rst: -------------------------------------------------------------------------------- 1 | KafkaProducer 2 | ============= 3 | 4 | .. autoclass:: kafka.KafkaProducer 5 | :members: 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include kafka *.py 2 | include README.rst 3 | include LICENSE 4 | include AUTHORS.md 5 | include CHANGES.md 6 | -------------------------------------------------------------------------------- /kafka/oauth/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.oauth.abstract import AbstractTokenProvider 4 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaAdminClient.rst: -------------------------------------------------------------------------------- 1 | KafkaAdminClient 2 | =========== 3 | 4 | .. autoclass:: kafka.KafkaAdminClient 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/BrokerConnection.rst: -------------------------------------------------------------------------------- 1 | BrokerConnection 2 | ================ 3 | 4 | .. autoclass:: kafka.BrokerConnection 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/ClusterMetadata.rst: -------------------------------------------------------------------------------- 1 | ClusterMetadata 2 | =========== 3 | 4 | .. autoclass:: kafka.cluster.ClusterMetadata 5 | :members: 6 | -------------------------------------------------------------------------------- /kafka/serializer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.serializer.abstract import Serializer, Deserializer 4 | -------------------------------------------------------------------------------- /kafka/record/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 2 | 3 | __all__ = ["MemoryRecords", "MemoryRecordsBuilder"] 4 | -------------------------------------------------------------------------------- /kafka/consumer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.consumer.group import KafkaConsumer 4 | 5 | __all__ = [ 6 | 'KafkaConsumer' 7 | ] 8 | -------------------------------------------------------------------------------- /kafka/producer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.producer.kafka import KafkaProducer 4 | 5 | __all__ = [ 6 | 'KafkaProducer' 7 | ] 8 | -------------------------------------------------------------------------------- /pylint.rc: -------------------------------------------------------------------------------- 1 | [TYPECHECK] 2 | ignored-classes=SyncManager,_socketobject 3 | ignored-modules=kafka.vendor.six.moves 4 | generated-members=py.* 5 | 6 | [MESSAGES CONTROL] 7 | disable=E1129 8 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx_rtd_theme 3 | 4 | # Install kafka-python in editable mode 5 | # This allows the sphinx autodoc module 6 | # to load the Python modules and extract docstrings. 7 | # -e .. 8 | -------------------------------------------------------------------------------- /kafka/partitioner/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.partitioner.default import DefaultPartitioner, murmur2 4 | 5 | 6 | __all__ = [ 7 | 'DefaultPartitioner', 'murmur2' 8 | ] 9 | -------------------------------------------------------------------------------- /docs/apidoc/modules.rst: -------------------------------------------------------------------------------- 1 | kafka-python API 2 | **************** 3 | 4 | .. toctree:: 5 | 6 | KafkaConsumer 7 | KafkaProducer 8 | KafkaAdminClient 9 | KafkaClient 10 | BrokerConnection 11 | ClusterMetadata 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | .tox 4 | build 5 | dist 6 | MANIFEST 7 | env 8 | servers/*/kafka-bin* 9 | servers/*/resources/ssl* 10 | .coverage* 11 | .noseids 12 | docs/_build 13 | .cache* 14 | .idea/ 15 | integration-test/ 16 | tests-env/ 17 | .pytest_cache/ 18 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | The `record_batch_*` benchmarks in this section are written using 2 | ``pyperf`` library, created by Victor Stinner. For more information on 3 | how to get reliable results of test runs please consult 4 | https://pyperf.readthedocs.io/en/latest/run_benchmark.html. 5 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # Set default logging handler to avoid "No handler found" warnings. 4 | import logging 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | from kafka.future import Future 8 | Future.error_on_callbacks = True # always fail during testing 9 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | coveralls==2.1.2 2 | crc32c==2.1 3 | docker-py==1.10.6 4 | flake8==3.8.3 5 | lz4==3.1.0 6 | mock==4.0.2 7 | py==1.9.0 8 | pylint==2.6.0 9 | pytest==6.0.2 10 | pytest-cov==2.10.1 11 | pytest-mock==3.3.1 12 | pytest-pylint==0.17.0 13 | python-snappy==0.5.4 14 | Sphinx==3.2.1 15 | sphinx-rtd-theme==0.5.0 16 | tox==3.20.0 17 | xxhash==2.0.0 18 | -------------------------------------------------------------------------------- /kafka/record/README: -------------------------------------------------------------------------------- 1 | Module structured mostly based on 2 | kafka/clients/src/main/java/org/apache/kafka/common/record/ module of Java 3 | Client. 4 | 5 | See abc.py for abstract declarations. `ABCRecords` is used as a facade to hide 6 | version differences. `ABCRecordBatch` subclasses will implement actual parsers 7 | for different versions (v0/v1 as LegacyBatch and v2 as DefaultBatch. Names 8 | taken from Java). 9 | -------------------------------------------------------------------------------- /kafka/metrics/stats/percentile.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class Percentile(object): 5 | def __init__(self, metric_name, percentile): 6 | self._metric_name = metric_name 7 | self._percentile = float(percentile) 8 | 9 | @property 10 | def name(self): 11 | return self._metric_name 12 | 13 | @property 14 | def percentile(self): 15 | return self._percentile 16 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ------- 3 | 4 | .. image:: https://img.shields.io/badge/license-Apache%202-blue.svg 5 | :target: https://github.com/dpkp/kafka-python/blob/master/LICENSE 6 | 7 | Apache License, v2.0. See `LICENSE `_. 8 | 9 | Copyright 2016, Dana Powers, David Arthur, and Contributors 10 | (See `AUTHORS `_). 11 | -------------------------------------------------------------------------------- /docs/support.rst: -------------------------------------------------------------------------------- 1 | Support 2 | ------- 3 | 4 | For support, see github issues at https://github.com/dpkp/kafka-python 5 | 6 | Limited IRC chat at #kafka-python on freenode (general chat is #apache-kafka). 7 | 8 | For information about Apache Kafka generally, see https://kafka.apache.org/ 9 | 10 | For general discussion of kafka-client design and implementation (not python 11 | specific), see https://groups.google.com/forum/m/#!forum/kafka-clients 12 | -------------------------------------------------------------------------------- /kafka/protocol/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class AbstractType(object): 7 | __metaclass__ = abc.ABCMeta 8 | 9 | @abc.abstractmethod 10 | def encode(cls, value): # pylint: disable=no-self-argument 11 | pass 12 | 13 | @abc.abstractmethod 14 | def decode(cls, data): # pylint: disable=no-self-argument 15 | pass 16 | 17 | @classmethod 18 | def repr(cls, value): 19 | return repr(value) 20 | -------------------------------------------------------------------------------- /kafka/metrics/stats/total.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.measurable_stat import AbstractMeasurableStat 4 | 5 | 6 | class Total(AbstractMeasurableStat): 7 | """An un-windowed cumulative total maintained over all time.""" 8 | def __init__(self, value=0.0): 9 | self._total = value 10 | 11 | def record(self, config, value, now): 12 | self._total += value 13 | 14 | def measure(self, config, now): 15 | return float(self._total) 16 | -------------------------------------------------------------------------------- /kafka/metrics/stats/count.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Count(AbstractSampledStat): 7 | """ 8 | An AbstractSampledStat that maintains a simple count of what it has seen. 9 | """ 10 | def __init__(self): 11 | super(Count, self).__init__(0.0) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value += 1.0 15 | 16 | def combine(self, samples, config, now): 17 | return float(sum(sample.value for sample in samples)) 18 | -------------------------------------------------------------------------------- /kafka/metrics/measurable_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.metrics.measurable import AbstractMeasurable 6 | from kafka.metrics.stat import AbstractStat 7 | 8 | 9 | class AbstractMeasurableStat(AbstractStat, AbstractMeasurable): 10 | """ 11 | An AbstractMeasurableStat is an AbstractStat that is also 12 | an AbstractMeasurable (i.e. can produce a single floating point value). 13 | This is the interface used for most of the simple statistics such 14 | as Avg, Max, Count, etc. 15 | """ 16 | __metaclass__ = abc.ABCMeta 17 | -------------------------------------------------------------------------------- /kafka/metrics/stats/max_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Max(AbstractSampledStat): 7 | """An AbstractSampledStat that gives the max over its samples.""" 8 | def __init__(self): 9 | super(Max, self).__init__(float('-inf')) 10 | 11 | def update(self, sample, config, value, now): 12 | sample.value = max(sample.value, value) 13 | 14 | def combine(self, samples, config, now): 15 | if not samples: 16 | return float('-inf') 17 | return float(max(sample.value for sample in samples)) 18 | -------------------------------------------------------------------------------- /kafka/serializer/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class Serializer(object): 7 | __meta__ = abc.ABCMeta 8 | 9 | def __init__(self, **config): 10 | pass 11 | 12 | @abc.abstractmethod 13 | def serialize(self, topic, value): 14 | pass 15 | 16 | def close(self): 17 | pass 18 | 19 | 20 | class Deserializer(object): 21 | __meta__ = abc.ABCMeta 22 | 23 | def __init__(self, **config): 24 | pass 25 | 26 | @abc.abstractmethod 27 | def deserialize(self, topic, bytes_): 28 | pass 29 | 30 | def close(self): 31 | pass 32 | -------------------------------------------------------------------------------- /kafka/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.compound_stat import NamedMeasurable 4 | from kafka.metrics.dict_reporter import DictReporter 5 | from kafka.metrics.kafka_metric import KafkaMetric 6 | from kafka.metrics.measurable import AnonMeasurable 7 | from kafka.metrics.metric_config import MetricConfig 8 | from kafka.metrics.metric_name import MetricName 9 | from kafka.metrics.metrics import Metrics 10 | from kafka.metrics.quota import Quota 11 | 12 | __all__ = [ 13 | 'AnonMeasurable', 'DictReporter', 'KafkaMetric', 'MetricConfig', 14 | 'MetricName', 'Metrics', 'NamedMeasurable', 'Quota' 15 | ] 16 | -------------------------------------------------------------------------------- /kafka/metrics/stats/min_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import sys 4 | 5 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 6 | 7 | 8 | class Min(AbstractSampledStat): 9 | """An AbstractSampledStat that gives the min over its samples.""" 10 | def __init__(self): 11 | super(Min, self).__init__(float(sys.maxsize)) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value = min(sample.value, value) 15 | 16 | def combine(self, samples, config, now): 17 | if not samples: 18 | return float(sys.maxsize) 19 | return float(min(sample.value for sample in samples)) 20 | -------------------------------------------------------------------------------- /kafka/metrics/stats/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.avg import Avg 4 | from kafka.metrics.stats.count import Count 5 | from kafka.metrics.stats.histogram import Histogram 6 | from kafka.metrics.stats.max_stat import Max 7 | from kafka.metrics.stats.min_stat import Min 8 | from kafka.metrics.stats.percentile import Percentile 9 | from kafka.metrics.stats.percentiles import Percentiles 10 | from kafka.metrics.stats.rate import Rate 11 | from kafka.metrics.stats.sensor import Sensor 12 | from kafka.metrics.stats.total import Total 13 | 14 | __all__ = [ 15 | 'Avg', 'Count', 'Histogram', 'Max', 'Min', 'Percentile', 'Percentiles', 16 | 'Rate', 'Sensor', 'Total' 17 | ] 18 | -------------------------------------------------------------------------------- /kafka/metrics/stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class AbstractStat(object): 7 | """ 8 | An AbstractStat is a quantity such as average, max, etc that is computed 9 | off the stream of updates to a sensor 10 | """ 11 | __metaclass__ = abc.ABCMeta 12 | 13 | @abc.abstractmethod 14 | def record(self, config, value, time_ms): 15 | """ 16 | Record the given value 17 | 18 | Arguments: 19 | config (MetricConfig): The configuration to use for this metric 20 | value (float): The value to record 21 | timeMs (int): The POSIX time in milliseconds this value occurred 22 | """ 23 | raise NotImplementedError 24 | -------------------------------------------------------------------------------- /test/test_cluster.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | 6 | from kafka.cluster import ClusterMetadata 7 | from kafka.protocol.metadata import MetadataResponse 8 | 9 | 10 | def test_empty_broker_list(): 11 | cluster = ClusterMetadata() 12 | assert len(cluster.brokers()) == 0 13 | 14 | cluster.update_metadata(MetadataResponse[0]( 15 | [(0, 'foo', 12), (1, 'bar', 34)], [])) 16 | assert len(cluster.brokers()) == 2 17 | 18 | # empty broker list response should be ignored 19 | cluster.update_metadata(MetadataResponse[0]( 20 | [], # empty brokers 21 | [(17, 'foo', []), (17, 'bar', [])])) # topics w/ error 22 | assert len(cluster.brokers()) == 2 23 | -------------------------------------------------------------------------------- /kafka/admin/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.admin.config_resource import ConfigResource, ConfigResourceType 4 | from kafka.admin.client import KafkaAdminClient 5 | from kafka.admin.acl_resource import (ACL, ACLFilter, ResourcePattern, ResourcePatternFilter, ACLOperation, 6 | ResourceType, ACLPermissionType, ACLResourcePatternType) 7 | from kafka.admin.new_topic import NewTopic 8 | from kafka.admin.new_partitions import NewPartitions 9 | 10 | __all__ = [ 11 | 'ConfigResource', 'ConfigResourceType', 'KafkaAdminClient', 'NewTopic', 'NewPartitions', 'ACL', 'ACLFilter', 12 | 'ResourcePattern', 'ResourcePatternFilter', 'ACLOperation', 'ResourceType', 'ACLPermissionType', 13 | 'ACLResourcePatternType' 14 | ] 15 | -------------------------------------------------------------------------------- /kafka/metrics/stats/avg.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Avg(AbstractSampledStat): 7 | """ 8 | An AbstractSampledStat that maintains a simple average over its samples. 9 | """ 10 | def __init__(self): 11 | super(Avg, self).__init__(0.0) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value += value 15 | 16 | def combine(self, samples, config, now): 17 | total_sum = 0 18 | total_count = 0 19 | for sample in samples: 20 | total_sum += sample.value 21 | total_count += sample.event_count 22 | if not total_count: 23 | return 0 24 | return float(total_sum) / total_count 25 | -------------------------------------------------------------------------------- /test/test_api_object_implementation.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import pytest 3 | 4 | from kafka.protocol.api import Request 5 | from kafka.protocol.api import Response 6 | 7 | 8 | attr_names = [n for n in dir(Request) if isinstance(getattr(Request, n), abc.abstractproperty)] 9 | @pytest.mark.parametrize('klass', Request.__subclasses__()) 10 | @pytest.mark.parametrize('attr_name', attr_names) 11 | def test_request_type_conformance(klass, attr_name): 12 | assert hasattr(klass, attr_name) 13 | 14 | attr_names = [n for n in dir(Response) if isinstance(getattr(Response, n), abc.abstractproperty)] 15 | @pytest.mark.parametrize('klass', Response.__subclasses__()) 16 | @pytest.mark.parametrize('attr_name', attr_names) 17 | def test_response_type_conformance(klass, attr_name): 18 | assert hasattr(klass, attr_name) 19 | -------------------------------------------------------------------------------- /kafka/admin/new_partitions.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class NewPartitions(object): 5 | """A class for new partition creation on existing topics. Note that the length of new_assignments, if specified, 6 | must be the difference between the new total number of partitions and the existing number of partitions. 7 | Arguments: 8 | total_count (int): the total number of partitions that should exist on the topic 9 | new_assignments ([[int]]): an array of arrays of replica assignments for new partitions. 10 | If not set, broker assigns replicas per an internal algorithm. 11 | """ 12 | 13 | def __init__( 14 | self, 15 | total_count, 16 | new_assignments=None 17 | ): 18 | self.total_count = total_count 19 | self.new_assignments = new_assignments 20 | -------------------------------------------------------------------------------- /kafka/protocol/frame.py: -------------------------------------------------------------------------------- 1 | class KafkaBytes(bytearray): 2 | def __init__(self, size): 3 | super(KafkaBytes, self).__init__(size) 4 | self._idx = 0 5 | 6 | def read(self, nbytes=None): 7 | if nbytes is None: 8 | nbytes = len(self) - self._idx 9 | start = self._idx 10 | self._idx += nbytes 11 | if self._idx > len(self): 12 | self._idx = len(self) 13 | return bytes(self[start:self._idx]) 14 | 15 | def write(self, data): 16 | start = self._idx 17 | self._idx += len(data) 18 | self[start:self._idx] = data 19 | 20 | def seek(self, idx): 21 | self._idx = idx 22 | 23 | def tell(self): 24 | return self._idx 25 | 26 | def __str__(self): 27 | return 'KafkaBytes(%d)' % len(self) 28 | 29 | def __repr__(self): 30 | return str(self) 31 | -------------------------------------------------------------------------------- /kafka/metrics/measurable.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class AbstractMeasurable(object): 7 | """A measurable quantity that can be registered as a metric""" 8 | @abc.abstractmethod 9 | def measure(self, config, now): 10 | """ 11 | Measure this quantity and return the result 12 | 13 | Arguments: 14 | config (MetricConfig): The configuration for this metric 15 | now (int): The POSIX time in milliseconds the measurement 16 | is being taken 17 | 18 | Returns: 19 | The measured value 20 | """ 21 | raise NotImplementedError 22 | 23 | 24 | class AnonMeasurable(AbstractMeasurable): 25 | def __init__(self, measure_fn): 26 | self._measure_fn = measure_fn 27 | 28 | def measure(self, config, now): 29 | return float(self._measure_fn(config, now)) 30 | -------------------------------------------------------------------------------- /travis_java_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # borrowed from: https://github.com/mansenfranzen/pywrangler/blob/master/tests/travis_java_install.sh 4 | 5 | # Kafka requires Java 8 in order to work properly. However, TravisCI's Ubuntu 6 | # 16.04 ships with Java 11 and Java can't be set with `jdk` when python is 7 | # selected as language. Ubuntu 14.04 does not work due to missing python 3.7 8 | # support on TravisCI which does have Java 8 as default. 9 | 10 | # show current JAVA_HOME and java version 11 | echo "Current JAVA_HOME: $JAVA_HOME" 12 | echo "Current java -version:" 13 | which java 14 | java -version 15 | 16 | echo "Updating JAVA_HOME" 17 | # change JAVA_HOME to Java 8 18 | export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 19 | 20 | echo "Updating PATH" 21 | export PATH=${PATH/\/usr\/local\/lib\/jvm\/openjdk11\/bin/$JAVA_HOME\/bin} 22 | 23 | echo "New java -version" 24 | which java 25 | java -version 26 | -------------------------------------------------------------------------------- /kafka/metrics/compound_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.metrics.stat import AbstractStat 6 | 7 | 8 | class AbstractCompoundStat(AbstractStat): 9 | """ 10 | A compound stat is a stat where a single measurement and associated 11 | data structure feeds many metrics. This is the example for a 12 | histogram which has many associated percentiles. 13 | """ 14 | __metaclass__ = abc.ABCMeta 15 | 16 | def stats(self): 17 | """ 18 | Return list of NamedMeasurable 19 | """ 20 | raise NotImplementedError 21 | 22 | 23 | class NamedMeasurable(object): 24 | def __init__(self, metric_name, measurable_stat): 25 | self._name = metric_name 26 | self._stat = measurable_stat 27 | 28 | @property 29 | def name(self): 30 | return self._name 31 | 32 | @property 33 | def stat(self): 34 | return self._stat 35 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | dist: xenial 4 | 5 | python: 6 | - 2.7 7 | - 3.4 8 | - 3.7 9 | - 3.8 10 | - pypy2.7-6.0 11 | 12 | env: 13 | - KAFKA_VERSION=0.8.2.2 14 | - KAFKA_VERSION=0.9.0.1 15 | - KAFKA_VERSION=0.10.2.2 16 | - KAFKA_VERSION=0.11.0.3 17 | - KAFKA_VERSION=1.1.1 18 | - KAFKA_VERSION=2.4.0 19 | - KAFKA_VERSION=2.5.0 20 | - KAFKA_VERSION=2.6.0 21 | 22 | addons: 23 | apt: 24 | packages: 25 | - libsnappy-dev 26 | - libzstd-dev 27 | - openjdk-8-jdk 28 | 29 | cache: 30 | directories: 31 | - $HOME/.cache/pip 32 | - servers/dist 33 | 34 | before_install: 35 | - source travis_java_install.sh 36 | - ./build_integration.sh 37 | 38 | install: 39 | - pip install tox coveralls 40 | - pip install . 41 | 42 | script: 43 | - tox -e `if [ "$TRAVIS_PYTHON_VERSION" == "pypy2.7-6.0" ]; then echo pypy; else echo py${TRAVIS_PYTHON_VERSION/./}; fi` 44 | 45 | after_success: 46 | - coveralls 47 | -------------------------------------------------------------------------------- /test/test_subscription_state.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | 6 | from kafka.consumer.subscription_state import SubscriptionState 7 | 8 | @pytest.mark.parametrize(('topic_name', 'expectation'), [ 9 | (0, pytest.raises(TypeError)), 10 | (None, pytest.raises(TypeError)), 11 | ('', pytest.raises(ValueError)), 12 | ('.', pytest.raises(ValueError)), 13 | ('..', pytest.raises(ValueError)), 14 | ('a' * 250, pytest.raises(ValueError)), 15 | ('abc/123', pytest.raises(ValueError)), 16 | ('/abc/123', pytest.raises(ValueError)), 17 | ('/abc123', pytest.raises(ValueError)), 18 | ('name with space', pytest.raises(ValueError)), 19 | ('name*with*stars', pytest.raises(ValueError)), 20 | ('name+with+plus', pytest.raises(ValueError)), 21 | ]) 22 | def test_topic_name_validation(topic_name, expectation): 23 | state = SubscriptionState() 24 | with expectation: 25 | state._ensure_valid_topic_name(topic_name) 26 | -------------------------------------------------------------------------------- /test/test_partition_movements.py: -------------------------------------------------------------------------------- 1 | from kafka.structs import TopicPartition 2 | 3 | from kafka.coordinator.assignors.sticky.partition_movements import PartitionMovements 4 | 5 | 6 | def test_empty_movements_are_sticky(): 7 | partition_movements = PartitionMovements() 8 | assert partition_movements.are_sticky() 9 | 10 | 11 | def test_sticky_movements(): 12 | partition_movements = PartitionMovements() 13 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 14 | partition_movements.move_partition(TopicPartition('t', 1), 'C2', 'C3') 15 | partition_movements.move_partition(TopicPartition('t', 1), 'C3', 'C1') 16 | assert partition_movements.are_sticky() 17 | 18 | 19 | def test_should_detect_non_sticky_assignment(): 20 | partition_movements = PartitionMovements() 21 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 22 | partition_movements.move_partition(TopicPartition('t', 2), 'C2', 'C1') 23 | assert not partition_movements.are_sticky() 24 | -------------------------------------------------------------------------------- /test/test_package.py: -------------------------------------------------------------------------------- 1 | class TestPackage: 2 | def test_top_level_namespace(self): 3 | import kafka as kafka1 4 | assert kafka1.KafkaConsumer.__name__ == "KafkaConsumer" 5 | assert kafka1.consumer.__name__ == "kafka.consumer" 6 | assert kafka1.codec.__name__ == "kafka.codec" 7 | 8 | def test_submodule_namespace(self): 9 | import kafka.client_async as client1 10 | assert client1.__name__ == "kafka.client_async" 11 | 12 | from kafka import client_async as client2 13 | assert client2.__name__ == "kafka.client_async" 14 | 15 | from kafka.client_async import KafkaClient as KafkaClient1 16 | assert KafkaClient1.__name__ == "KafkaClient" 17 | 18 | from kafka import KafkaClient as KafkaClient2 19 | assert KafkaClient2.__name__ == "KafkaClient" 20 | 21 | from kafka.codec import gzip_encode as gzip_encode1 22 | assert gzip_encode1.__name__ == "gzip_encode" 23 | 24 | from kafka.codec import snappy_encode 25 | assert snappy_encode.__name__ == "snappy_encode" 26 | -------------------------------------------------------------------------------- /kafka/protocol/pickle.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | try: 4 | import copyreg # pylint: disable=import-error 5 | except ImportError: 6 | import copy_reg as copyreg # pylint: disable=import-error 7 | 8 | import types 9 | 10 | 11 | def _pickle_method(method): 12 | try: 13 | func_name = method.__func__.__name__ 14 | obj = method.__self__ 15 | cls = method.__self__.__class__ 16 | except AttributeError: 17 | func_name = method.im_func.__name__ 18 | obj = method.im_self 19 | cls = method.im_class 20 | 21 | return _unpickle_method, (func_name, obj, cls) 22 | 23 | 24 | def _unpickle_method(func_name, obj, cls): 25 | for cls in cls.mro(): 26 | try: 27 | func = cls.__dict__[func_name] 28 | except KeyError: 29 | pass 30 | else: 31 | break 32 | return func.__get__(obj, cls) 33 | 34 | # https://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods 35 | copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method) 36 | -------------------------------------------------------------------------------- /kafka/metrics/kafka_metric.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import time 4 | 5 | 6 | class KafkaMetric(object): 7 | # NOTE java constructor takes a lock instance 8 | def __init__(self, metric_name, measurable, config): 9 | if not metric_name: 10 | raise ValueError('metric_name must be non-empty') 11 | if not measurable: 12 | raise ValueError('measurable must be non-empty') 13 | self._metric_name = metric_name 14 | self._measurable = measurable 15 | self._config = config 16 | 17 | @property 18 | def metric_name(self): 19 | return self._metric_name 20 | 21 | @property 22 | def measurable(self): 23 | return self._measurable 24 | 25 | @property 26 | def config(self): 27 | return self._config 28 | 29 | @config.setter 30 | def config(self, config): 31 | self._config = config 32 | 33 | def value(self, time_ms=None): 34 | if time_ms is None: 35 | time_ms = time.time() * 1000 36 | return self.measurable.measure(self.config, time_ms) 37 | -------------------------------------------------------------------------------- /docs/compatibility.rst: -------------------------------------------------------------------------------- 1 | Compatibility 2 | ------------- 3 | 4 | .. image:: https://img.shields.io/badge/kafka-2.6%2C%202.5%2C%202.4%2C%202.3%2C%202.2%2C%202.1%2C%202.0%2C%201.1%2C%201.0%2C%200.11%2C%200.10%2C%200.9%2C%200.8-brightgreen.svg 5 | :target: https://kafka-python.readthedocs.io/compatibility.html 6 | .. image:: https://img.shields.io/pypi/pyversions/kafka-python.svg 7 | :target: https://pypi.python.org/pypi/kafka-python 8 | 9 | kafka-python is compatible with (and tested against) broker versions 2.6 10 | through 0.8.0 . kafka-python is not compatible with the 0.8.2-beta release. 11 | 12 | Because the kafka server protocol is backwards compatible, kafka-python is 13 | expected to work with newer broker releases as well. 14 | 15 | Although kafka-python is tested and expected to work on recent broker versions, 16 | not all features are supported. Specifically, authentication codecs, and 17 | transactional producer/consumer support are not fully implemented. PRs welcome! 18 | 19 | kafka-python is tested on python 2.7, 3.4, 3.7, 3.8 and pypy2.7. 20 | 21 | Builds and tests via Travis-CI. See https://travis-ci.org/dpkp/kafka-python 22 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/trunk/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{26,27,34,35,36,37,38,py}, docs 3 | 4 | [pytest] 5 | testpaths = kafka test 6 | addopts = --durations=10 7 | log_format = %(created)f %(filename)-23s %(threadName)s %(message)s 8 | 9 | [testenv] 10 | deps = 11 | pytest 12 | pytest-cov 13 | py{27,34,35,36,37,38,py}: pylint 14 | py{27,34,35,36,37,38,py}: pytest-pylint 15 | pytest-mock 16 | mock 17 | python-snappy 18 | zstandard 19 | lz4 20 | xxhash 21 | crc32c 22 | commands = 23 | py.test {posargs:--pylint --pylint-rcfile=pylint.rc --pylint-error-types=EF --cov=kafka --cov-config=.covrc} 24 | setenv = 25 | CRC32C_SW_MODE = auto 26 | PROJECT_ROOT = {toxinidir} 27 | passenv = KAFKA_VERSION 28 | 29 | [testenv:py26] 30 | # pylint doesn't support python2.6 31 | commands = py.test {posargs:--cov=kafka --cov-config=.covrc} 32 | 33 | [testenv:pypy] 34 | # pylint is super slow on pypy... 35 | commands = py.test {posargs:--cov=kafka --cov-config=.covrc} 36 | 37 | [testenv:docs] 38 | deps = 39 | sphinx_rtd_theme 40 | sphinx 41 | 42 | commands = 43 | sphinx-apidoc -o docs/apidoc/ kafka/ 44 | sphinx-build -b html docs/ docs/_build 45 | -------------------------------------------------------------------------------- /kafka/coordinator/protocol.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.struct import Struct 4 | from kafka.protocol.types import Array, Bytes, Int16, Int32, Schema, String 5 | from kafka.structs import TopicPartition 6 | 7 | 8 | class ConsumerProtocolMemberMetadata(Struct): 9 | SCHEMA = Schema( 10 | ('version', Int16), 11 | ('subscription', Array(String('utf-8'))), 12 | ('user_data', Bytes)) 13 | 14 | 15 | class ConsumerProtocolMemberAssignment(Struct): 16 | SCHEMA = Schema( 17 | ('version', Int16), 18 | ('assignment', Array( 19 | ('topic', String('utf-8')), 20 | ('partitions', Array(Int32)))), 21 | ('user_data', Bytes)) 22 | 23 | def partitions(self): 24 | return [TopicPartition(topic, partition) 25 | for topic, partitions in self.assignment # pylint: disable-msg=no-member 26 | for partition in partitions] 27 | 28 | 29 | class ConsumerProtocol(object): 30 | PROTOCOL_TYPE = 'consumer' 31 | ASSIGNMENT_STRATEGIES = ('range', 'roundrobin') 32 | METADATA = ConsumerProtocolMemberMetadata 33 | ASSIGNMENT = ConsumerProtocolMemberAssignment 34 | -------------------------------------------------------------------------------- /kafka/admin/config_resource.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # enum in stdlib as of py3.4 4 | try: 5 | from enum import IntEnum # pylint: disable=import-error 6 | except ImportError: 7 | # vendored backport module 8 | from kafka.vendor.enum34 import IntEnum 9 | 10 | 11 | class ConfigResourceType(IntEnum): 12 | """An enumerated type of config resources""" 13 | 14 | BROKER = 4, 15 | TOPIC = 2 16 | 17 | 18 | class ConfigResource(object): 19 | """A class for specifying config resources. 20 | Arguments: 21 | resource_type (ConfigResourceType): the type of kafka resource 22 | name (string): The name of the kafka resource 23 | configs ({key : value}): A maps of config keys to values. 24 | """ 25 | 26 | def __init__( 27 | self, 28 | resource_type, 29 | name, 30 | configs=None 31 | ): 32 | if not isinstance(resource_type, (ConfigResourceType)): 33 | resource_type = ConfigResourceType[str(resource_type).upper()] # pylint: disable-msg=unsubscriptable-object 34 | self.resource_type = resource_type 35 | self.name = name 36 | self.configs = configs 37 | -------------------------------------------------------------------------------- /kafka/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | __title__ = 'kafka' 4 | from kafka.version import __version__ 5 | __author__ = 'Dana Powers' 6 | __license__ = 'Apache License 2.0' 7 | __copyright__ = 'Copyright 2016 Dana Powers, David Arthur, and Contributors' 8 | 9 | # Set default logging handler to avoid "No handler found" warnings. 10 | import logging 11 | try: # Python 2.7+ 12 | from logging import NullHandler 13 | except ImportError: 14 | class NullHandler(logging.Handler): 15 | def emit(self, record): 16 | pass 17 | 18 | logging.getLogger(__name__).addHandler(NullHandler()) 19 | 20 | 21 | from kafka.admin import KafkaAdminClient 22 | from kafka.client_async import KafkaClient 23 | from kafka.consumer import KafkaConsumer 24 | from kafka.consumer.subscription_state import ConsumerRebalanceListener 25 | from kafka.producer import KafkaProducer 26 | from kafka.conn import BrokerConnection 27 | from kafka.serializer import Serializer, Deserializer 28 | from kafka.structs import TopicPartition, OffsetAndMetadata 29 | 30 | 31 | __all__ = [ 32 | 'BrokerConnection', 'ConsumerRebalanceListener', 'KafkaAdminClient', 33 | 'KafkaClient', 'KafkaConsumer', 'KafkaProducer', 34 | ] 35 | -------------------------------------------------------------------------------- /servers/trunk/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.logger.kafka=DEBUG, stdout 23 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout 24 | log4j.logger.org.apache.zookeeper=INFO, stdout 25 | -------------------------------------------------------------------------------- /kafka/metrics/metric_config.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import sys 4 | 5 | 6 | class MetricConfig(object): 7 | """Configuration values for metrics""" 8 | def __init__(self, quota=None, samples=2, event_window=sys.maxsize, 9 | time_window_ms=30 * 1000, tags=None): 10 | """ 11 | Arguments: 12 | quota (Quota, optional): Upper or lower bound of a value. 13 | samples (int, optional): Max number of samples kept per metric. 14 | event_window (int, optional): Max number of values per sample. 15 | time_window_ms (int, optional): Max age of an individual sample. 16 | tags (dict of {str: str}, optional): Tags for each metric. 17 | """ 18 | self.quota = quota 19 | self._samples = samples 20 | self.event_window = event_window 21 | self.time_window_ms = time_window_ms 22 | # tags should be OrderedDict (not supported in py26) 23 | self.tags = tags if tags else {} 24 | 25 | @property 26 | def samples(self): 27 | return self._samples 28 | 29 | @samples.setter 30 | def samples(self, value): 31 | if value < 1: 32 | raise ValueError('The number of samples must be at least 1.') 33 | self._samples = value 34 | -------------------------------------------------------------------------------- /kafka/metrics/quota.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class Quota(object): 5 | """An upper or lower bound for metrics""" 6 | def __init__(self, bound, is_upper): 7 | self._bound = bound 8 | self._upper = is_upper 9 | 10 | @staticmethod 11 | def upper_bound(upper_bound): 12 | return Quota(upper_bound, True) 13 | 14 | @staticmethod 15 | def lower_bound(lower_bound): 16 | return Quota(lower_bound, False) 17 | 18 | def is_upper_bound(self): 19 | return self._upper 20 | 21 | @property 22 | def bound(self): 23 | return self._bound 24 | 25 | def is_acceptable(self, value): 26 | return ((self.is_upper_bound() and value <= self.bound) or 27 | (not self.is_upper_bound() and value >= self.bound)) 28 | 29 | def __hash__(self): 30 | prime = 31 31 | result = prime + self.bound 32 | return prime * result + self.is_upper_bound() 33 | 34 | def __eq__(self, other): 35 | if self is other: 36 | return True 37 | return (type(self) == type(other) and 38 | self.bound == other.bound and 39 | self.is_upper_bound() == other.is_upper_bound()) 40 | 41 | def __ne__(self, other): 42 | return not self.__eq__(other) 43 | -------------------------------------------------------------------------------- /test/test_consumer.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from kafka import KafkaConsumer 4 | from kafka.errors import KafkaConfigurationError 5 | 6 | 7 | class TestKafkaConsumer: 8 | def test_session_timeout_larger_than_request_timeout_raises(self): 9 | with pytest.raises(KafkaConfigurationError): 10 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), group_id='foo', session_timeout_ms=50000, request_timeout_ms=40000) 11 | 12 | def test_fetch_max_wait_larger_than_request_timeout_raises(self): 13 | with pytest.raises(KafkaConfigurationError): 14 | KafkaConsumer(bootstrap_servers='localhost:9092', fetch_max_wait_ms=50000, request_timeout_ms=40000) 15 | 16 | def test_request_timeout_larger_than_connections_max_idle_ms_raises(self): 17 | with pytest.raises(KafkaConfigurationError): 18 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), request_timeout_ms=50000, connections_max_idle_ms=40000) 19 | 20 | def test_subscription_copy(self): 21 | consumer = KafkaConsumer('foo', api_version=(0, 10)) 22 | sub = consumer.subscription() 23 | assert sub is not consumer.subscription() 24 | assert sub == set(['foo']) 25 | sub.add('fizz') 26 | assert consumer.subscription() == set(['foo']) 27 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /kafka/protocol/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | API_KEYS = { 5 | 0: 'Produce', 6 | 1: 'Fetch', 7 | 2: 'ListOffsets', 8 | 3: 'Metadata', 9 | 4: 'LeaderAndIsr', 10 | 5: 'StopReplica', 11 | 6: 'UpdateMetadata', 12 | 7: 'ControlledShutdown', 13 | 8: 'OffsetCommit', 14 | 9: 'OffsetFetch', 15 | 10: 'FindCoordinator', 16 | 11: 'JoinGroup', 17 | 12: 'Heartbeat', 18 | 13: 'LeaveGroup', 19 | 14: 'SyncGroup', 20 | 15: 'DescribeGroups', 21 | 16: 'ListGroups', 22 | 17: 'SaslHandshake', 23 | 18: 'ApiVersions', 24 | 19: 'CreateTopics', 25 | 20: 'DeleteTopics', 26 | 21: 'DeleteRecords', 27 | 22: 'InitProducerId', 28 | 23: 'OffsetForLeaderEpoch', 29 | 24: 'AddPartitionsToTxn', 30 | 25: 'AddOffsetsToTxn', 31 | 26: 'EndTxn', 32 | 27: 'WriteTxnMarkers', 33 | 28: 'TxnOffsetCommit', 34 | 29: 'DescribeAcls', 35 | 30: 'CreateAcls', 36 | 31: 'DeleteAcls', 37 | 32: 'DescribeConfigs', 38 | 33: 'AlterConfigs', 39 | 36: 'SaslAuthenticate', 40 | 37: 'CreatePartitions', 41 | 38: 'CreateDelegationToken', 42 | 39: 'RenewDelegationToken', 43 | 40: 'ExpireDelegationToken', 44 | 41: 'DescribeDelegationToken', 45 | 42: 'DeleteGroups', 46 | 45: 'AlterPartitionReassignments', 47 | 46: 'ListPartitionReassignments', 48 | 48: 'DescribeClientQuotas', 49 | } 50 | -------------------------------------------------------------------------------- /kafka/admin/new_topic.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.errors import IllegalArgumentError 4 | 5 | 6 | class NewTopic(object): 7 | """ A class for new topic creation 8 | Arguments: 9 | name (string): name of the topic 10 | num_partitions (int): number of partitions 11 | or -1 if replica_assignment has been specified 12 | replication_factor (int): replication factor or -1 if 13 | replica assignment is specified 14 | replica_assignment (dict of int: [int]): A mapping containing 15 | partition id and replicas to assign to it. 16 | topic_configs (dict of str: str): A mapping of config key 17 | and value for the topic. 18 | """ 19 | 20 | def __init__( 21 | self, 22 | name, 23 | num_partitions, 24 | replication_factor, 25 | replica_assignments=None, 26 | topic_configs=None, 27 | ): 28 | if not (num_partitions == -1 or replication_factor == -1) ^ (replica_assignments is None): 29 | raise IllegalArgumentError('either num_partitions/replication_factor or replica_assignment must be specified') 30 | self.name = name 31 | self.num_partitions = num_partitions 32 | self.replication_factor = replication_factor 33 | self.replica_assignments = replica_assignments or {} 34 | self.topic_configs = topic_configs or {} 35 | -------------------------------------------------------------------------------- /kafka/oauth/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | # This statement is compatible with both Python 2.7 & 3+ 6 | ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) 7 | 8 | class AbstractTokenProvider(ABC): 9 | """ 10 | A Token Provider must be used for the SASL OAuthBearer protocol. 11 | 12 | The implementation should ensure token reuse so that multiple 13 | calls at connect time do not create multiple tokens. The implementation 14 | should also periodically refresh the token in order to guarantee 15 | that each call returns an unexpired token. A timeout error should 16 | be returned after a short period of inactivity so that the 17 | broker can log debugging info and retry. 18 | 19 | Token Providers MUST implement the token() method 20 | """ 21 | 22 | def __init__(self, **config): 23 | pass 24 | 25 | @abc.abstractmethod 26 | def token(self): 27 | """ 28 | Returns a (str) ID/Access Token to be sent to the Kafka 29 | client. 30 | """ 31 | pass 32 | 33 | def extensions(self): 34 | """ 35 | This is an OPTIONAL method that may be implemented. 36 | 37 | Returns a map of key-value pairs that can 38 | be sent with the SASL/OAUTHBEARER initial client request. If 39 | not implemented, the values are ignored. This feature is only available 40 | in Kafka >= 2.1.0. 41 | """ 42 | return {} 43 | -------------------------------------------------------------------------------- /test/test_partitioner.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import pytest 4 | 5 | from kafka.partitioner import DefaultPartitioner, murmur2 6 | 7 | 8 | def test_default_partitioner(): 9 | partitioner = DefaultPartitioner() 10 | all_partitions = available = list(range(100)) 11 | # partitioner should return the same partition for the same key 12 | p1 = partitioner(b'foo', all_partitions, available) 13 | p2 = partitioner(b'foo', all_partitions, available) 14 | assert p1 == p2 15 | assert p1 in all_partitions 16 | 17 | # when key is None, choose one of available partitions 18 | assert partitioner(None, all_partitions, [123]) == 123 19 | 20 | # with fallback to all_partitions 21 | assert partitioner(None, all_partitions, []) in all_partitions 22 | 23 | 24 | @pytest.mark.parametrize("bytes_payload,partition_number", [ 25 | (b'', 681), (b'a', 524), (b'ab', 434), (b'abc', 107), (b'123456789', 566), 26 | (b'\x00 ', 742) 27 | ]) 28 | def test_murmur2_java_compatibility(bytes_payload, partition_number): 29 | partitioner = DefaultPartitioner() 30 | all_partitions = available = list(range(1000)) 31 | # compare with output from Kafka's org.apache.kafka.clients.producer.Partitioner 32 | assert partitioner(bytes_payload, all_partitions, available) == partition_number 33 | 34 | 35 | def test_murmur2_not_ascii(): 36 | # Verify no regression of murmur2() bug encoding py2 bytes that don't ascii encode 37 | murmur2(b'\xa4') 38 | murmur2(b'\x81' * 1000) 39 | -------------------------------------------------------------------------------- /test/testutil.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import os 4 | import random 5 | import re 6 | import string 7 | import time 8 | 9 | 10 | def special_to_underscore(string, _matcher=re.compile(r'[^a-zA-Z0-9_]+')): 11 | return _matcher.sub('_', string) 12 | 13 | 14 | def random_string(length): 15 | return "".join(random.choice(string.ascii_letters) for i in range(length)) 16 | 17 | 18 | def env_kafka_version(): 19 | """Return the Kafka version set in the OS environment as a tuple. 20 | 21 | Example: '0.8.1.1' --> (0, 8, 1, 1) 22 | """ 23 | if 'KAFKA_VERSION' not in os.environ: 24 | return () 25 | return tuple(map(int, os.environ['KAFKA_VERSION'].split('.'))) 26 | 27 | 28 | def assert_message_count(messages, num_messages): 29 | """Check that we received the expected number of messages with no duplicates.""" 30 | # Make sure we got them all 31 | assert len(messages) == num_messages 32 | # Make sure there are no duplicates 33 | # Note: Currently duplicates are identified only using key/value. Other attributes like topic, partition, headers, 34 | # timestamp, etc are ignored... this could be changed if necessary, but will be more tolerant of dupes. 35 | unique_messages = {(m.key, m.value) for m in messages} 36 | assert len(unique_messages) == num_messages 37 | 38 | 39 | class Timer(object): 40 | def __enter__(self): 41 | self.start = time.time() 42 | return self 43 | 44 | def __exit__(self, *args): 45 | self.end = time.time() 46 | self.interval = self.end - self.start 47 | -------------------------------------------------------------------------------- /kafka/metrics/metrics_reporter.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class AbstractMetricsReporter(object): 7 | """ 8 | An abstract class to allow things to listen as new metrics 9 | are created so they can be reported. 10 | """ 11 | __metaclass__ = abc.ABCMeta 12 | 13 | @abc.abstractmethod 14 | def init(self, metrics): 15 | """ 16 | This is called when the reporter is first registered 17 | to initially register all existing metrics 18 | 19 | Arguments: 20 | metrics (list of KafkaMetric): All currently existing metrics 21 | """ 22 | raise NotImplementedError 23 | 24 | @abc.abstractmethod 25 | def metric_change(self, metric): 26 | """ 27 | This is called whenever a metric is updated or added 28 | 29 | Arguments: 30 | metric (KafkaMetric) 31 | """ 32 | raise NotImplementedError 33 | 34 | @abc.abstractmethod 35 | def metric_removal(self, metric): 36 | """ 37 | This is called whenever a metric is removed 38 | 39 | Arguments: 40 | metric (KafkaMetric) 41 | """ 42 | raise NotImplementedError 43 | 44 | @abc.abstractmethod 45 | def configure(self, configs): 46 | """ 47 | Configure this class with the given key-value pairs 48 | 49 | Arguments: 50 | configs (dict of {str, ?}) 51 | """ 52 | raise NotImplementedError 53 | 54 | @abc.abstractmethod 55 | def close(self): 56 | """Called when the metrics repository is closed.""" 57 | raise NotImplementedError 58 | -------------------------------------------------------------------------------- /test/test_sender.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | import io 6 | 7 | from kafka.client_async import KafkaClient 8 | from kafka.cluster import ClusterMetadata 9 | from kafka.metrics import Metrics 10 | from kafka.protocol.produce import ProduceRequest 11 | from kafka.producer.record_accumulator import RecordAccumulator, ProducerBatch 12 | from kafka.producer.sender import Sender 13 | from kafka.record.memory_records import MemoryRecordsBuilder 14 | from kafka.structs import TopicPartition 15 | 16 | 17 | @pytest.fixture 18 | def client(mocker): 19 | _cli = mocker.Mock(spec=KafkaClient(bootstrap_servers=(), api_version=(0, 9))) 20 | _cli.cluster = mocker.Mock(spec=ClusterMetadata()) 21 | return _cli 22 | 23 | 24 | @pytest.fixture 25 | def accumulator(): 26 | return RecordAccumulator() 27 | 28 | 29 | @pytest.fixture 30 | def metrics(): 31 | return Metrics() 32 | 33 | 34 | @pytest.fixture 35 | def sender(client, accumulator, metrics): 36 | return Sender(client, client.cluster, accumulator, metrics) 37 | 38 | 39 | @pytest.mark.parametrize(("api_version", "produce_version"), [ 40 | ((0, 10), 2), 41 | ((0, 9), 1), 42 | ((0, 8), 0) 43 | ]) 44 | def test_produce_request(sender, mocker, api_version, produce_version): 45 | sender.config['api_version'] = api_version 46 | tp = TopicPartition('foo', 0) 47 | buffer = io.BytesIO() 48 | records = MemoryRecordsBuilder( 49 | magic=1, compression_type=0, batch_size=100000) 50 | batch = ProducerBatch(tp, records, buffer) 51 | records.close() 52 | produce_request = sender._produce_request(0, 0, 0, [batch]) 53 | assert isinstance(produce_request, ProduceRequest[produce_version]) 54 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | import logging 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class AbstractPartitionAssignor(object): 10 | """ 11 | Abstract assignor implementation which does some common grunt work (in particular collecting 12 | partition counts which are always needed in assignors). 13 | """ 14 | 15 | @abc.abstractproperty 16 | def name(self): 17 | """.name should be a string identifying the assignor""" 18 | pass 19 | 20 | @abc.abstractmethod 21 | def assign(self, cluster, members): 22 | """Perform group assignment given cluster metadata and member subscriptions 23 | 24 | Arguments: 25 | cluster (ClusterMetadata): metadata for use in assignment 26 | members (dict of {member_id: MemberMetadata}): decoded metadata for 27 | each member in the group. 28 | 29 | Returns: 30 | dict: {member_id: MemberAssignment} 31 | """ 32 | pass 33 | 34 | @abc.abstractmethod 35 | def metadata(self, topics): 36 | """Generate ProtocolMetadata to be submitted via JoinGroupRequest. 37 | 38 | Arguments: 39 | topics (set): a member's subscribed topics 40 | 41 | Returns: 42 | MemberMetadata struct 43 | """ 44 | pass 45 | 46 | @abc.abstractmethod 47 | def on_assignment(self, assignment): 48 | """Callback that runs on each assignment. 49 | 50 | This method can be used to update internal state, if any, of the 51 | partition assignor. 52 | 53 | Arguments: 54 | assignment (MemberAssignment): the member's assignment 55 | """ 56 | pass 57 | -------------------------------------------------------------------------------- /benchmarks/load_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | import threading, logging, time 4 | 5 | from kafka import KafkaConsumer, KafkaProducer 6 | 7 | msg_size = 524288 8 | 9 | producer_stop = threading.Event() 10 | consumer_stop = threading.Event() 11 | 12 | class Producer(threading.Thread): 13 | big_msg = b'1' * msg_size 14 | 15 | def run(self): 16 | producer = KafkaProducer(bootstrap_servers='localhost:9092') 17 | self.sent = 0 18 | 19 | while not producer_stop.is_set(): 20 | producer.send('my-topic', self.big_msg) 21 | self.sent += 1 22 | producer.flush() 23 | 24 | 25 | class Consumer(threading.Thread): 26 | 27 | def run(self): 28 | consumer = KafkaConsumer(bootstrap_servers='localhost:9092', 29 | auto_offset_reset='earliest') 30 | consumer.subscribe(['my-topic']) 31 | self.valid = 0 32 | self.invalid = 0 33 | 34 | for message in consumer: 35 | if len(message.value) == msg_size: 36 | self.valid += 1 37 | else: 38 | self.invalid += 1 39 | 40 | if consumer_stop.is_set(): 41 | break 42 | 43 | consumer.close() 44 | 45 | def main(): 46 | threads = [ 47 | Producer(), 48 | Consumer() 49 | ] 50 | 51 | for t in threads: 52 | t.start() 53 | 54 | time.sleep(10) 55 | producer_stop.set() 56 | consumer_stop.set() 57 | print('Messages sent: %d' % threads[0].sent) 58 | print('Messages recvd: %d' % threads[1].valid) 59 | print('Messages invalid: %d' % threads[1].invalid) 60 | 61 | if __name__ == "__main__": 62 | logging.basicConfig( 63 | format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s', 64 | level=logging.INFO 65 | ) 66 | main() 67 | -------------------------------------------------------------------------------- /kafka/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import binascii 4 | import weakref 5 | 6 | from kafka.vendor import six 7 | 8 | 9 | if six.PY3: 10 | MAX_INT = 2 ** 31 11 | TO_SIGNED = 2 ** 32 12 | 13 | def crc32(data): 14 | crc = binascii.crc32(data) 15 | # py2 and py3 behave a little differently 16 | # CRC is encoded as a signed int in kafka protocol 17 | # so we'll convert the py3 unsigned result to signed 18 | if crc >= MAX_INT: 19 | crc -= TO_SIGNED 20 | return crc 21 | else: 22 | from binascii import crc32 23 | 24 | 25 | class WeakMethod(object): 26 | """ 27 | Callable that weakly references a method and the object it is bound to. It 28 | is based on https://stackoverflow.com/a/24287465. 29 | 30 | Arguments: 31 | 32 | object_dot_method: A bound instance method (i.e. 'object.method'). 33 | """ 34 | def __init__(self, object_dot_method): 35 | try: 36 | self.target = weakref.ref(object_dot_method.__self__) 37 | except AttributeError: 38 | self.target = weakref.ref(object_dot_method.im_self) 39 | self._target_id = id(self.target()) 40 | try: 41 | self.method = weakref.ref(object_dot_method.__func__) 42 | except AttributeError: 43 | self.method = weakref.ref(object_dot_method.im_func) 44 | self._method_id = id(self.method()) 45 | 46 | def __call__(self, *args, **kwargs): 47 | """ 48 | Calls the method on target with args and kwargs. 49 | """ 50 | return self.method()(self.target(), *args, **kwargs) 51 | 52 | def __hash__(self): 53 | return hash(self.target) ^ hash(self.method) 54 | 55 | def __eq__(self, other): 56 | if not isinstance(other, WeakMethod): 57 | return False 58 | return self._target_id == other._target_id and self._method_id == other._method_id 59 | 60 | 61 | class Dict(dict): 62 | """Utility class to support passing weakrefs to dicts 63 | 64 | See: https://docs.python.org/2/library/weakref.html 65 | """ 66 | pass 67 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Install 2 | ####### 3 | 4 | Install with your favorite package manager 5 | 6 | Latest Release 7 | ************** 8 | Pip: 9 | 10 | .. code:: bash 11 | 12 | pip install kafka-python 13 | 14 | Releases are also listed at https://github.com/dpkp/kafka-python/releases 15 | 16 | 17 | Bleeding-Edge 18 | ************* 19 | 20 | .. code:: bash 21 | 22 | git clone https://github.com/dpkp/kafka-python 23 | pip install ./kafka-python 24 | 25 | 26 | Optional crc32c install 27 | *********************** 28 | Highly recommended if you are using Kafka 11+ brokers. For those `kafka-python` 29 | uses a new message protocol version, that requires calculation of `crc32c`, 30 | which differs from the `zlib.crc32` hash implementation. By default `kafka-python` 31 | calculates it in pure python, which is quite slow. To speed it up we optionally 32 | support https://pypi.python.org/pypi/crc32c package if it's installed. 33 | 34 | .. code:: bash 35 | 36 | pip install 'kafka-python[crc32c]' 37 | 38 | 39 | Optional ZSTD install 40 | ******************** 41 | 42 | To enable ZSTD compression/decompression, install python-zstandard: 43 | 44 | >>> pip install 'kafka-python[zstd]' 45 | 46 | 47 | Optional LZ4 install 48 | ******************** 49 | 50 | To enable LZ4 compression/decompression, install python-lz4: 51 | 52 | >>> pip install 'kafka-python[lz4]' 53 | 54 | 55 | Optional Snappy install 56 | *********************** 57 | 58 | Install Development Libraries 59 | ============================= 60 | 61 | Download and build Snappy from https://google.github.io/snappy/ 62 | 63 | Ubuntu: 64 | 65 | .. code:: bash 66 | 67 | apt-get install libsnappy-dev 68 | 69 | OSX: 70 | 71 | .. code:: bash 72 | 73 | brew install snappy 74 | 75 | From Source: 76 | 77 | .. code:: bash 78 | 79 | wget https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz 80 | tar xzvf snappy-1.1.3.tar.gz 81 | cd snappy-1.1.3 82 | ./configure 83 | make 84 | sudo make install 85 | 86 | Install Python Module 87 | ===================== 88 | 89 | Install the `python-snappy` module 90 | 91 | .. code:: bash 92 | 93 | pip install 'kafka-python[snappy]' 94 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/sorted_set.py: -------------------------------------------------------------------------------- 1 | class SortedSet: 2 | def __init__(self, iterable=None, key=None): 3 | self._key = key if key is not None else lambda x: x 4 | self._set = set(iterable) if iterable is not None else set() 5 | 6 | self._cached_last = None 7 | self._cached_first = None 8 | 9 | def first(self): 10 | if self._cached_first is not None: 11 | return self._cached_first 12 | 13 | first = None 14 | for element in self._set: 15 | if first is None or self._key(first) > self._key(element): 16 | first = element 17 | self._cached_first = first 18 | return first 19 | 20 | def last(self): 21 | if self._cached_last is not None: 22 | return self._cached_last 23 | 24 | last = None 25 | for element in self._set: 26 | if last is None or self._key(last) < self._key(element): 27 | last = element 28 | self._cached_last = last 29 | return last 30 | 31 | def pop_last(self): 32 | value = self.last() 33 | self._set.remove(value) 34 | self._cached_last = None 35 | return value 36 | 37 | def add(self, value): 38 | if self._cached_last is not None and self._key(value) > self._key(self._cached_last): 39 | self._cached_last = value 40 | if self._cached_first is not None and self._key(value) < self._key(self._cached_first): 41 | self._cached_first = value 42 | 43 | return self._set.add(value) 44 | 45 | def remove(self, value): 46 | if self._cached_last is not None and self._cached_last == value: 47 | self._cached_last = None 48 | if self._cached_first is not None and self._cached_first == value: 49 | self._cached_first = None 50 | 51 | return self._set.remove(value) 52 | 53 | def __contains__(self, value): 54 | return value in self._set 55 | 56 | def __iter__(self): 57 | return iter(sorted(self._set, key=self._key)) 58 | 59 | def _bool(self): 60 | return len(self._set) != 0 61 | 62 | __nonzero__ = _bool 63 | __bool__ = _bool 64 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Some simple testing tasks (sorry, UNIX only). 2 | 3 | FLAGS= 4 | KAFKA_VERSION=0.11.0.2 5 | SCALA_VERSION=2.12 6 | 7 | setup: 8 | pip install -r requirements-dev.txt 9 | pip install -Ue . 10 | 11 | servers/$(KAFKA_VERSION)/kafka-bin: 12 | KAFKA_VERSION=$(KAFKA_VERSION) SCALA_VERSION=$(SCALA_VERSION) ./build_integration.sh 13 | 14 | build-integration: servers/$(KAFKA_VERSION)/kafka-bin 15 | 16 | # Test and produce coverage using tox. This is the same as is run on Travis 17 | test37: build-integration 18 | KAFKA_VERSION=$(KAFKA_VERSION) SCALA_VERSION=$(SCALA_VERSION) tox -e py37 -- $(FLAGS) 19 | 20 | test27: build-integration 21 | KAFKA_VERSION=$(KAFKA_VERSION) SCALA_VERSION=$(SCALA_VERSION) tox -e py27 -- $(FLAGS) 22 | 23 | # Test using py.test directly if you want to use local python. Useful for other 24 | # platforms that require manual installation for C libraries, ie. Windows. 25 | test-local: build-integration 26 | KAFKA_VERSION=$(KAFKA_VERSION) SCALA_VERSION=$(SCALA_VERSION) py.test \ 27 | --pylint --pylint-rcfile=pylint.rc --pylint-error-types=EF $(FLAGS) kafka test 28 | 29 | cov-local: build-integration 30 | KAFKA_VERSION=$(KAFKA_VERSION) SCALA_VERSION=$(SCALA_VERSION) py.test \ 31 | --pylint --pylint-rcfile=pylint.rc --pylint-error-types=EF --cov=kafka \ 32 | --cov-config=.covrc --cov-report html $(FLAGS) kafka test 33 | @echo "open file://`pwd`/htmlcov/index.html" 34 | 35 | # Check the readme for syntax errors, which can lead to invalid formatting on 36 | # PyPi homepage (https://pypi.python.org/pypi/kafka-python) 37 | check-readme: 38 | python setup.py check -rms 39 | 40 | clean: 41 | rm -rf `find . -name __pycache__` 42 | rm -f `find . -type f -name '*.py[co]' ` 43 | rm -f `find . -type f -name '*~' ` 44 | rm -f `find . -type f -name '.*~' ` 45 | rm -f `find . -type f -name '@*' ` 46 | rm -f `find . -type f -name '#*#' ` 47 | rm -f `find . -type f -name '*.orig' ` 48 | rm -f `find . -type f -name '*.rej' ` 49 | rm -f .coverage 50 | rm -rf htmlcov 51 | rm -rf docs/_build/ 52 | rm -rf cover 53 | rm -rf dist 54 | 55 | doc: 56 | make -C docs html 57 | @echo "open file://`pwd`/docs/_build/html/index.html" 58 | 59 | .PHONY: all test37 test27 test-local cov-local clean doc 60 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from setuptools import setup, Command, find_packages 5 | 6 | # Pull version from source without importing 7 | # since we can't import something we haven't built yet :) 8 | exec(open('kafka/version.py').read()) 9 | 10 | 11 | class Tox(Command): 12 | 13 | user_options = [] 14 | 15 | def initialize_options(self): 16 | pass 17 | 18 | def finalize_options(self): 19 | pass 20 | 21 | @classmethod 22 | def run(cls): 23 | import tox 24 | sys.exit(tox.cmdline([])) 25 | 26 | 27 | test_require = ['tox', 'mock'] 28 | 29 | here = os.path.abspath(os.path.dirname(__file__)) 30 | 31 | with open(os.path.join(here, 'README.rst')) as f: 32 | README = f.read() 33 | 34 | setup( 35 | name="kafka-python", 36 | version=__version__, 37 | 38 | tests_require=test_require, 39 | extras_require={ 40 | "crc32c": ["crc32c"], 41 | "lz4": ["lz4"], 42 | "snappy": ["python-snappy"], 43 | "zstd": ["python-zstandard"], 44 | }, 45 | cmdclass={"test": Tox}, 46 | packages=find_packages(exclude=['test']), 47 | author="Dana Powers", 48 | author_email="dana.powers@gmail.com", 49 | url="https://github.com/dpkp/kafka-python", 50 | license="Apache License 2.0", 51 | description="Pure Python client for Apache Kafka", 52 | long_description=README, 53 | keywords="apache kafka", 54 | classifiers=[ 55 | "Development Status :: 5 - Production/Stable", 56 | "Intended Audience :: Developers", 57 | "License :: OSI Approved :: Apache Software License", 58 | "Programming Language :: Python", 59 | "Programming Language :: Python :: 2", 60 | "Programming Language :: Python :: 2.7", 61 | "Programming Language :: Python :: 3", 62 | "Programming Language :: Python :: 3.4", 63 | "Programming Language :: Python :: 3.5", 64 | "Programming Language :: Python :: 3.6", 65 | "Programming Language :: Python :: 3.7", 66 | "Programming Language :: Python :: 3.8", 67 | "Programming Language :: Python :: Implementation :: PyPy", 68 | "Topic :: Software Development :: Libraries :: Python Modules", 69 | ] 70 | ) 71 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import threading, time 3 | 4 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer 5 | from kafka.admin import NewTopic 6 | 7 | 8 | class Producer(threading.Thread): 9 | def __init__(self): 10 | threading.Thread.__init__(self) 11 | self.stop_event = threading.Event() 12 | 13 | def stop(self): 14 | self.stop_event.set() 15 | 16 | def run(self): 17 | producer = KafkaProducer(bootstrap_servers='localhost:9092') 18 | 19 | while not self.stop_event.is_set(): 20 | producer.send('my-topic', b"test") 21 | producer.send('my-topic', b"\xc2Hola, mundo!") 22 | time.sleep(1) 23 | 24 | producer.close() 25 | 26 | 27 | class Consumer(threading.Thread): 28 | def __init__(self): 29 | threading.Thread.__init__(self) 30 | self.stop_event = threading.Event() 31 | 32 | def stop(self): 33 | self.stop_event.set() 34 | 35 | def run(self): 36 | consumer = KafkaConsumer(bootstrap_servers='localhost:9092', 37 | auto_offset_reset='earliest', 38 | consumer_timeout_ms=1000) 39 | consumer.subscribe(['my-topic']) 40 | 41 | while not self.stop_event.is_set(): 42 | for message in consumer: 43 | print(message) 44 | if self.stop_event.is_set(): 45 | break 46 | 47 | consumer.close() 48 | 49 | 50 | def main(): 51 | # Create 'my-topic' Kafka topic 52 | try: 53 | admin = KafkaAdminClient(bootstrap_servers='localhost:9092') 54 | 55 | topic = NewTopic(name='my-topic', 56 | num_partitions=1, 57 | replication_factor=1) 58 | admin.create_topics([topic]) 59 | except Exception: 60 | pass 61 | 62 | tasks = [ 63 | Producer(), 64 | Consumer() 65 | ] 66 | 67 | # Start threads of a publisher/producer and a subscriber/consumer to 'my-topic' Kafka topic 68 | for t in tasks: 69 | t.start() 70 | 71 | time.sleep(10) 72 | 73 | # Stop threads 74 | for task in tasks: 75 | task.stop() 76 | 77 | for task in tasks: 78 | task.join() 79 | 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /benchmarks/record_batch_compose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import print_function 3 | import hashlib 4 | import itertools 5 | import os 6 | import random 7 | 8 | import pyperf 9 | 10 | from kafka.record.memory_records import MemoryRecordsBuilder 11 | 12 | 13 | DEFAULT_BATCH_SIZE = 1600 * 1024 14 | KEY_SIZE = 6 15 | VALUE_SIZE = 60 16 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 17 | 18 | # With values above v1 record is 100 bytes, so 10 000 bytes for 100 messages 19 | MESSAGES_PER_BATCH = 100 20 | 21 | 22 | def random_bytes(length): 23 | buffer = bytearray(length) 24 | for i in range(length): 25 | buffer[i] = random.randint(0, 255) 26 | return bytes(buffer) 27 | 28 | 29 | def prepare(): 30 | return iter(itertools.cycle([ 31 | (random_bytes(KEY_SIZE), 32 | random_bytes(VALUE_SIZE), 33 | random.randint(*TIMESTAMP_RANGE) 34 | ) 35 | for _ in range(int(MESSAGES_PER_BATCH * 1.94)) 36 | ])) 37 | 38 | 39 | def finalize(results): 40 | # Just some strange code to make sure PyPy does execute the main code 41 | # properly, without optimizing it away 42 | hash_val = hashlib.md5() 43 | for buf in results: 44 | hash_val.update(buf) 45 | print(hash_val, file=open(os.devnull, "w")) 46 | 47 | 48 | def func(loops, magic): 49 | # Jit can optimize out the whole function if the result is the same each 50 | # time, so we need some randomized input data ) 51 | precomputed_samples = prepare() 52 | results = [] 53 | 54 | # Main benchmark code. 55 | t0 = pyperf.perf_counter() 56 | for _ in range(loops): 57 | batch = MemoryRecordsBuilder( 58 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 59 | for _ in range(MESSAGES_PER_BATCH): 60 | key, value, timestamp = next(precomputed_samples) 61 | size = batch.append( 62 | timestamp=timestamp, key=key, value=value) 63 | assert size 64 | batch.close() 65 | results.append(batch.buffer()) 66 | 67 | res = pyperf.perf_counter() - t0 68 | 69 | finalize(results) 70 | 71 | return res 72 | 73 | 74 | runner = pyperf.Runner() 75 | runner.bench_time_func('batch_append_v0', func, 0) 76 | runner.bench_time_func('batch_append_v1', func, 1) 77 | runner.bench_time_func('batch_append_v2', func, 2) 78 | -------------------------------------------------------------------------------- /kafka/vendor/socketpair.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # vendored from https://github.com/mhils/backports.socketpair 3 | from __future__ import absolute_import 4 | 5 | import sys 6 | import socket 7 | import errno 8 | 9 | _LOCALHOST = '127.0.0.1' 10 | _LOCALHOST_V6 = '::1' 11 | 12 | if not hasattr(socket, "socketpair"): 13 | # Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. 14 | def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): 15 | if family == socket.AF_INET: 16 | host = _LOCALHOST 17 | elif family == socket.AF_INET6: 18 | host = _LOCALHOST_V6 19 | else: 20 | raise ValueError("Only AF_INET and AF_INET6 socket address families " 21 | "are supported") 22 | if type != socket.SOCK_STREAM: 23 | raise ValueError("Only SOCK_STREAM socket type is supported") 24 | if proto != 0: 25 | raise ValueError("Only protocol zero is supported") 26 | 27 | # We create a connected TCP socket. Note the trick with 28 | # setblocking(False) that prevents us from having to create a thread. 29 | lsock = socket.socket(family, type, proto) 30 | try: 31 | lsock.bind((host, 0)) 32 | lsock.listen(min(socket.SOMAXCONN, 128)) 33 | # On IPv6, ignore flow_info and scope_id 34 | addr, port = lsock.getsockname()[:2] 35 | csock = socket.socket(family, type, proto) 36 | try: 37 | csock.setblocking(False) 38 | if sys.version_info >= (3, 0): 39 | try: 40 | csock.connect((addr, port)) 41 | except (BlockingIOError, InterruptedError): 42 | pass 43 | else: 44 | try: 45 | csock.connect((addr, port)) 46 | except socket.error as e: 47 | if e.errno != errno.WSAEWOULDBLOCK: 48 | raise 49 | csock.setblocking(True) 50 | ssock, _ = lsock.accept() 51 | except Exception: 52 | csock.close() 53 | raise 54 | finally: 55 | lsock.close() 56 | return (ssock, csock) 57 | 58 | socket.socketpair = socketpair 59 | -------------------------------------------------------------------------------- /benchmarks/record_batch_read.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | import hashlib 4 | import itertools 5 | import os 6 | import random 7 | 8 | import pyperf 9 | 10 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 11 | 12 | 13 | DEFAULT_BATCH_SIZE = 1600 * 1024 14 | KEY_SIZE = 6 15 | VALUE_SIZE = 60 16 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 17 | 18 | BATCH_SAMPLES = 5 19 | MESSAGES_PER_BATCH = 100 20 | 21 | 22 | def random_bytes(length): 23 | buffer = bytearray(length) 24 | for i in range(length): 25 | buffer[i] = random.randint(0, 255) 26 | return bytes(buffer) 27 | 28 | 29 | def prepare(magic): 30 | samples = [] 31 | for _ in range(BATCH_SAMPLES): 32 | batch = MemoryRecordsBuilder( 33 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 34 | for _ in range(MESSAGES_PER_BATCH): 35 | size = batch.append( 36 | random.randint(*TIMESTAMP_RANGE), 37 | random_bytes(KEY_SIZE), 38 | random_bytes(VALUE_SIZE), 39 | headers=[]) 40 | assert size 41 | batch.close() 42 | samples.append(bytes(batch.buffer())) 43 | 44 | return iter(itertools.cycle(samples)) 45 | 46 | 47 | def finalize(results): 48 | # Just some strange code to make sure PyPy does execute the code above 49 | # properly 50 | hash_val = hashlib.md5() 51 | for buf in results: 52 | hash_val.update(buf) 53 | print(hash_val, file=open(os.devnull, "w")) 54 | 55 | 56 | def func(loops, magic): 57 | # Jit can optimize out the whole function if the result is the same each 58 | # time, so we need some randomized input data ) 59 | precomputed_samples = prepare(magic) 60 | results = [] 61 | 62 | # Main benchmark code. 63 | batch_data = next(precomputed_samples) 64 | t0 = pyperf.perf_counter() 65 | for _ in range(loops): 66 | records = MemoryRecords(batch_data) 67 | while records.has_next(): 68 | batch = records.next_batch() 69 | batch.validate_crc() 70 | for record in batch: 71 | results.append(record.value) 72 | 73 | res = pyperf.perf_counter() - t0 74 | finalize(results) 75 | 76 | return res 77 | 78 | 79 | runner = pyperf.Runner() 80 | runner.bench_time_func('batch_read_v0', func, 0) 81 | runner.bench_time_func('batch_read_v1', func, 1) 82 | runner.bench_time_func('batch_read_v2', func, 2) 83 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.cleanup.interval.mins=1 52 | 53 | ############################# Zookeeper ############################# 54 | 55 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 56 | 57 | # Timeout in ms for connecting to zookeeper 58 | zookeeper.connection.timeout.ms=1000000 59 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 60 | zookeeper.session.timeout.ms=500 61 | 62 | kafka.metrics.polling.interval.secs=5 63 | kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter 64 | kafka.csv.metrics.dir={tmp_dir} 65 | kafka.csv.metrics.reporter.enabled=false 66 | 67 | log.cleanup.policy=delete 68 | -------------------------------------------------------------------------------- /kafka/coordinator/heartbeat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division 2 | 3 | import copy 4 | import time 5 | 6 | 7 | class Heartbeat(object): 8 | DEFAULT_CONFIG = { 9 | 'group_id': None, 10 | 'heartbeat_interval_ms': 3000, 11 | 'session_timeout_ms': 10000, 12 | 'max_poll_interval_ms': 300000, 13 | 'retry_backoff_ms': 100, 14 | } 15 | 16 | def __init__(self, **configs): 17 | self.config = copy.copy(self.DEFAULT_CONFIG) 18 | for key in self.config: 19 | if key in configs: 20 | self.config[key] = configs[key] 21 | 22 | if self.config['group_id'] is not None: 23 | assert (self.config['heartbeat_interval_ms'] 24 | <= self.config['session_timeout_ms']), ( 25 | 'Heartbeat interval must be lower than the session timeout') 26 | 27 | self.last_send = -1 * float('inf') 28 | self.last_receive = -1 * float('inf') 29 | self.last_poll = -1 * float('inf') 30 | self.last_reset = time.time() 31 | self.heartbeat_failed = None 32 | 33 | def poll(self): 34 | self.last_poll = time.time() 35 | 36 | def sent_heartbeat(self): 37 | self.last_send = time.time() 38 | self.heartbeat_failed = False 39 | 40 | def fail_heartbeat(self): 41 | self.heartbeat_failed = True 42 | 43 | def received_heartbeat(self): 44 | self.last_receive = time.time() 45 | 46 | def time_to_next_heartbeat(self): 47 | """Returns seconds (float) remaining before next heartbeat should be sent""" 48 | time_since_last_heartbeat = time.time() - max(self.last_send, self.last_reset) 49 | if self.heartbeat_failed: 50 | delay_to_next_heartbeat = self.config['retry_backoff_ms'] / 1000 51 | else: 52 | delay_to_next_heartbeat = self.config['heartbeat_interval_ms'] / 1000 53 | return max(0, delay_to_next_heartbeat - time_since_last_heartbeat) 54 | 55 | def should_heartbeat(self): 56 | return self.time_to_next_heartbeat() == 0 57 | 58 | def session_timeout_expired(self): 59 | last_recv = max(self.last_receive, self.last_reset) 60 | return (time.time() - last_recv) > (self.config['session_timeout_ms'] / 1000) 61 | 62 | def reset_timeouts(self): 63 | self.last_reset = time.time() 64 | self.last_poll = time.time() 65 | self.heartbeat_failed = False 66 | 67 | def poll_timeout_expired(self): 68 | return (time.time() - self.last_poll) > (self.config['max_poll_interval_ms'] / 1000) 69 | -------------------------------------------------------------------------------- /kafka/protocol/struct.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from io import BytesIO 4 | 5 | from kafka.protocol.abstract import AbstractType 6 | from kafka.protocol.types import Schema 7 | 8 | from kafka.util import WeakMethod 9 | 10 | 11 | class Struct(AbstractType): 12 | SCHEMA = Schema() 13 | 14 | def __init__(self, *args, **kwargs): 15 | if len(args) == len(self.SCHEMA.fields): 16 | for i, name in enumerate(self.SCHEMA.names): 17 | self.__dict__[name] = args[i] 18 | elif len(args) > 0: 19 | raise ValueError('Args must be empty or mirror schema') 20 | else: 21 | for name in self.SCHEMA.names: 22 | self.__dict__[name] = kwargs.pop(name, None) 23 | if kwargs: 24 | raise ValueError('Keyword(s) not in schema %s: %s' 25 | % (list(self.SCHEMA.names), 26 | ', '.join(kwargs.keys()))) 27 | 28 | # overloading encode() to support both class and instance 29 | # Without WeakMethod() this creates circular ref, which 30 | # causes instances to "leak" to garbage 31 | self.encode = WeakMethod(self._encode_self) 32 | 33 | 34 | @classmethod 35 | def encode(cls, item): # pylint: disable=E0202 36 | bits = [] 37 | for i, field in enumerate(cls.SCHEMA.fields): 38 | bits.append(field.encode(item[i])) 39 | return b''.join(bits) 40 | 41 | def _encode_self(self): 42 | return self.SCHEMA.encode( 43 | [self.__dict__[name] for name in self.SCHEMA.names] 44 | ) 45 | 46 | @classmethod 47 | def decode(cls, data): 48 | if isinstance(data, bytes): 49 | data = BytesIO(data) 50 | return cls(*[field.decode(data) for field in cls.SCHEMA.fields]) 51 | 52 | def get_item(self, name): 53 | if name not in self.SCHEMA.names: 54 | raise KeyError("%s is not in the schema" % name) 55 | return self.__dict__[name] 56 | 57 | def __repr__(self): 58 | key_vals = [] 59 | for name, field in zip(self.SCHEMA.names, self.SCHEMA.fields): 60 | key_vals.append('%s=%s' % (name, field.repr(self.__dict__[name]))) 61 | return self.__class__.__name__ + '(' + ', '.join(key_vals) + ')' 62 | 63 | def __hash__(self): 64 | return hash(self.encode()) 65 | 66 | def __eq__(self, other): 67 | if self.SCHEMA != other.SCHEMA: 68 | return False 69 | for attr in self.SCHEMA.names: 70 | if self.__dict__[attr] != other.__dict__[attr]: 71 | return False 72 | return True 73 | -------------------------------------------------------------------------------- /docs/tests.rst: -------------------------------------------------------------------------------- 1 | Tests 2 | ===== 3 | 4 | .. image:: https://coveralls.io/repos/dpkp/kafka-python/badge.svg?branch=master&service=github 5 | :target: https://coveralls.io/github/dpkp/kafka-python?branch=master 6 | .. image:: https://travis-ci.org/dpkp/kafka-python.svg?branch=master 7 | :target: https://travis-ci.org/dpkp/kafka-python 8 | 9 | Test environments are managed via tox. The test suite is run via pytest. 10 | 11 | Linting is run via pylint, but is generally skipped on pypy due to pylint 12 | compatibility / performance issues. 13 | 14 | For test coverage details, see https://coveralls.io/github/dpkp/kafka-python 15 | 16 | The test suite includes unit tests that mock network interfaces, as well as 17 | integration tests that setup and teardown kafka broker (and zookeeper) 18 | fixtures for client / consumer / producer testing. 19 | 20 | 21 | Unit tests 22 | ------------------ 23 | 24 | To run the tests locally, install tox: 25 | 26 | .. code:: bash 27 | 28 | pip install tox 29 | 30 | For more details, see https://tox.readthedocs.io/en/latest/install.html 31 | 32 | Then simply run tox, optionally setting the python environment. 33 | If unset, tox will loop through all environments. 34 | 35 | .. code:: bash 36 | 37 | tox -e py27 38 | tox -e py35 39 | 40 | # run protocol tests only 41 | tox -- -v test.test_protocol 42 | 43 | # re-run the last failing test, dropping into pdb 44 | tox -e py27 -- --lf --pdb 45 | 46 | # see available (pytest) options 47 | tox -e py27 -- --help 48 | 49 | 50 | Integration tests 51 | ----------------- 52 | 53 | .. code:: bash 54 | 55 | KAFKA_VERSION=0.8.2.2 tox -e py27 56 | KAFKA_VERSION=1.0.1 tox -e py36 57 | 58 | 59 | Integration tests start Kafka and Zookeeper fixtures. This requires downloading 60 | kafka server binaries: 61 | 62 | .. code:: bash 63 | 64 | ./build_integration.sh 65 | 66 | By default, this will install the broker versions listed in build_integration.sh's `ALL_RELEASES` 67 | into the servers/ directory. To install a specific version, set the `KAFKA_VERSION` variable: 68 | 69 | .. code:: bash 70 | 71 | KAFKA_VERSION=1.0.1 ./build_integration.sh 72 | 73 | Then to run the tests against a specific Kafka version, simply set the `KAFKA_VERSION` 74 | env variable to the server build you want to use for testing: 75 | 76 | .. code:: bash 77 | 78 | KAFKA_VERSION=1.0.1 tox -e py36 79 | 80 | To test against the kafka source tree, set KAFKA_VERSION=trunk 81 | [optionally set SCALA_VERSION (defaults to the value set in `build_integration.sh`)] 82 | 83 | .. code:: bash 84 | 85 | SCALA_VERSION=2.12 KAFKA_VERSION=trunk ./build_integration.sh 86 | KAFKA_VERSION=trunk tox -e py36 87 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.retention.check.interval.ms=60000 52 | log.cleanup.interval.mins=1 53 | log.cleaner.enable=false 54 | 55 | ############################# Zookeeper ############################# 56 | 57 | # Zookeeper connection string (see zookeeper docs for details). 58 | # This is a comma separated host:port pairs, each corresponding to a zk 59 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 60 | # You can also append an optional chroot string to the urls to specify the 61 | # root directory for all kafka znodes. 62 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 63 | 64 | # Timeout in ms for connecting to zookeeper 65 | zookeeper.connection.timeout.ms=1000000 66 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 67 | zookeeper.session.timeout.ms=500 68 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Current Maintainer 2 | * Dana Powers, [@dpkp](https://github.com/dpkp) 3 | 4 | # Original Author and First Commit 5 | * David Arthur, [@mumrah](https://github.com/mumrah) 6 | 7 | # Contributors - 2015 (alpha by username) 8 | * Alex Couture-Beil, [@alexcb](https://github.com/alexcb) 9 | * Ali-Akber Saifee, [@alisaifee](https://github.com/alisaifee) 10 | * Christophe-Marie Duquesne, [@chmduquesne](https://github.com/chmduquesne) 11 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 12 | * Kasper Jacobsen, [@Dinoshauer](https://github.com/Dinoshauer) 13 | * Ross Duggan, [@duggan](https://github.com/duggan) 14 | * Enrico Canzonieri, [@ecanzonieri](https://github.com/ecanzonieri) 15 | * haosdent, [@haosdent](https://github.com/haosdent) 16 | * Arturo Filastò, [@hellais](https://github.com/hellais) 17 | * Job Evers‐Meltzer, [@jobevers](https://github.com/jobevers) 18 | * Martin Olveyra, [@kalessin](https://github.com/kalessin) 19 | * Kubilay Kocak, [@koobs](https://github.com/koobs) 20 | * Matthew L Daniel 21 | * Eric Hewitt, [@meandthewallaby](https://github.com/meandthewallaby) 22 | * Oliver Jowett [@mutability](https://github.com/mutability) 23 | * Shaolei Zhou, [@reAsOn2010](https://github.com/reAsOn2010) 24 | * Oskari Saarenmaa, [@saaros](https://github.com/saaros) 25 | * John Anderson, [@sontek](https://github.com/sontek) 26 | * Eduard Iskandarov, [@toidi](https://github.com/toidi) 27 | * Todd Palino, [@toddpalino](https://github.com/toddpalino) 28 | * trbs, [@trbs](https://github.com/trbs) 29 | * Viktor Shlapakov, [@vshlapakov](https://github.com/vshlapakov) 30 | * Will Daly, [@wedaly](https://github.com/wedaly) 31 | * Warren Kiser, [@wkiser](https://github.com/wkiser) 32 | * William Ting, [@wting](https://github.com/wting) 33 | * Zack Dever, [@zackdever](https://github.com/zackdever) 34 | 35 | # More Contributors 36 | * Bruno Renié, [@brutasse](https://github.com/brutasse) 37 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 38 | * Jesse Myers, [@jessemyers](https://github.com/jessemyers) 39 | * Mahendra M, [@mahendra](https://github.com/mahendra) 40 | * Miguel Eduardo Gil Biraud, [@mgilbir](https://github.com/mgilbir) 41 | * Marc Labbé, [@mrtheb](https://github.com/mrtheb) 42 | * Patrick Lucas, [@patricklucas](https://github.com/patricklucas) 43 | * Omar Ghishan, [@rdiomar](https://github.com/rdiomar) - RIP, Omar. 2014 44 | * Ivan Pouzyrevsky, [@sandello](https://github.com/sandello) 45 | * Lou Marvin Caraig, [@se7entyse7en](https://github.com/se7entyse7en) 46 | * waliaashish85, [@waliaashish85](https://github.com/waliaashish85) 47 | * Mark Roberts, [@wizzat](https://github.com/wizzat) 48 | * Christophe Lecointe [@christophelec](https://github.com/christophelec) 49 | * Mohamed Helmi Hichri [@hellich](https://github.com/hellich) 50 | 51 | Thanks to all who have contributed! 52 | -------------------------------------------------------------------------------- /kafka/future.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import functools 4 | import logging 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class Future(object): 10 | error_on_callbacks = False # and errbacks 11 | 12 | def __init__(self): 13 | self.is_done = False 14 | self.value = None 15 | self.exception = None 16 | self._callbacks = [] 17 | self._errbacks = [] 18 | 19 | def succeeded(self): 20 | return self.is_done and not bool(self.exception) 21 | 22 | def failed(self): 23 | return self.is_done and bool(self.exception) 24 | 25 | def retriable(self): 26 | try: 27 | return self.exception.retriable 28 | except AttributeError: 29 | return False 30 | 31 | def success(self, value): 32 | assert not self.is_done, 'Future is already complete' 33 | self.value = value 34 | self.is_done = True 35 | if self._callbacks: 36 | self._call_backs('callback', self._callbacks, self.value) 37 | return self 38 | 39 | def failure(self, e): 40 | assert not self.is_done, 'Future is already complete' 41 | self.exception = e if type(e) is not type else e() 42 | assert isinstance(self.exception, BaseException), ( 43 | 'future failed without an exception') 44 | self.is_done = True 45 | self._call_backs('errback', self._errbacks, self.exception) 46 | return self 47 | 48 | def add_callback(self, f, *args, **kwargs): 49 | if args or kwargs: 50 | f = functools.partial(f, *args, **kwargs) 51 | if self.is_done and not self.exception: 52 | self._call_backs('callback', [f], self.value) 53 | else: 54 | self._callbacks.append(f) 55 | return self 56 | 57 | def add_errback(self, f, *args, **kwargs): 58 | if args or kwargs: 59 | f = functools.partial(f, *args, **kwargs) 60 | if self.is_done and self.exception: 61 | self._call_backs('errback', [f], self.exception) 62 | else: 63 | self._errbacks.append(f) 64 | return self 65 | 66 | def add_both(self, f, *args, **kwargs): 67 | self.add_callback(f, *args, **kwargs) 68 | self.add_errback(f, *args, **kwargs) 69 | return self 70 | 71 | def chain(self, future): 72 | self.add_callback(future.success) 73 | self.add_errback(future.failure) 74 | return self 75 | 76 | def _call_backs(self, back_type, backs, value): 77 | for f in backs: 78 | try: 79 | f(value) 80 | except Exception as e: 81 | log.exception('Error processing %s', back_type) 82 | if self.error_on_callbacks: 83 | raise e 84 | -------------------------------------------------------------------------------- /kafka/metrics/dict_reporter.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | import threading 5 | 6 | from kafka.metrics.metrics_reporter import AbstractMetricsReporter 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class DictReporter(AbstractMetricsReporter): 12 | """A basic dictionary based metrics reporter. 13 | 14 | Store all metrics in a two level dictionary of category > name > metric. 15 | """ 16 | def __init__(self, prefix=''): 17 | self._lock = threading.Lock() 18 | self._prefix = prefix if prefix else '' # never allow None 19 | self._store = {} 20 | 21 | def snapshot(self): 22 | """ 23 | Return a nested dictionary snapshot of all metrics and their 24 | values at this time. Example: 25 | { 26 | 'category': { 27 | 'metric1_name': 42.0, 28 | 'metric2_name': 'foo' 29 | } 30 | } 31 | """ 32 | return dict((category, dict((name, metric.value()) 33 | for name, metric in list(metrics.items()))) 34 | for category, metrics in 35 | list(self._store.items())) 36 | 37 | def init(self, metrics): 38 | for metric in metrics: 39 | self.metric_change(metric) 40 | 41 | def metric_change(self, metric): 42 | with self._lock: 43 | category = self.get_category(metric) 44 | if category not in self._store: 45 | self._store[category] = {} 46 | self._store[category][metric.metric_name.name] = metric 47 | 48 | def metric_removal(self, metric): 49 | with self._lock: 50 | category = self.get_category(metric) 51 | metrics = self._store.get(category, {}) 52 | removed = metrics.pop(metric.metric_name.name, None) 53 | if not metrics: 54 | self._store.pop(category, None) 55 | return removed 56 | 57 | def get_category(self, metric): 58 | """ 59 | Return a string category for the metric. 60 | 61 | The category is made up of this reporter's prefix and the 62 | metric's group and tags. 63 | 64 | Examples: 65 | prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2} 66 | returns: 'foo.bar.a=1,b=2' 67 | 68 | prefix = 'foo', group = 'bar', tags = None 69 | returns: 'foo.bar' 70 | 71 | prefix = None, group = 'bar', tags = None 72 | returns: 'bar' 73 | """ 74 | tags = ','.join('%s=%s' % (k, v) for k, v in 75 | sorted(metric.metric_name.tags.items())) 76 | return '.'.join(x for x in 77 | [self._prefix, metric.metric_name.group, tags] if x) 78 | 79 | def configure(self, configs): 80 | pass 81 | 82 | def close(self): 83 | pass 84 | -------------------------------------------------------------------------------- /test/test_acl_comparisons.py: -------------------------------------------------------------------------------- 1 | from kafka.admin.acl_resource import ACL 2 | from kafka.admin.acl_resource import ACLOperation 3 | from kafka.admin.acl_resource import ACLPermissionType 4 | from kafka.admin.acl_resource import ResourcePattern 5 | from kafka.admin.acl_resource import ResourceType 6 | from kafka.admin.acl_resource import ACLResourcePatternType 7 | 8 | 9 | def test_different_acls_are_different(): 10 | one = ACL( 11 | principal='User:A', 12 | host='*', 13 | operation=ACLOperation.ALL, 14 | permission_type=ACLPermissionType.ALLOW, 15 | resource_pattern=ResourcePattern( 16 | resource_type=ResourceType.TOPIC, 17 | resource_name='some-topic', 18 | pattern_type=ACLResourcePatternType.LITERAL 19 | ) 20 | ) 21 | 22 | two = ACL( 23 | principal='User:B', # Different principal 24 | host='*', 25 | operation=ACLOperation.ALL, 26 | permission_type=ACLPermissionType.ALLOW, 27 | resource_pattern=ResourcePattern( 28 | resource_type=ResourceType.TOPIC, 29 | resource_name='some-topic', 30 | pattern_type=ACLResourcePatternType.LITERAL 31 | ) 32 | ) 33 | 34 | assert one != two 35 | assert hash(one) != hash(two) 36 | 37 | def test_different_acls_are_different_with_glob_topics(): 38 | one = ACL( 39 | principal='User:A', 40 | host='*', 41 | operation=ACLOperation.ALL, 42 | permission_type=ACLPermissionType.ALLOW, 43 | resource_pattern=ResourcePattern( 44 | resource_type=ResourceType.TOPIC, 45 | resource_name='*', 46 | pattern_type=ACLResourcePatternType.LITERAL 47 | ) 48 | ) 49 | 50 | two = ACL( 51 | principal='User:B', # Different principal 52 | host='*', 53 | operation=ACLOperation.ALL, 54 | permission_type=ACLPermissionType.ALLOW, 55 | resource_pattern=ResourcePattern( 56 | resource_type=ResourceType.TOPIC, 57 | resource_name='*', 58 | pattern_type=ACLResourcePatternType.LITERAL 59 | ) 60 | ) 61 | 62 | assert one != two 63 | assert hash(one) != hash(two) 64 | 65 | def test_same_acls_are_same(): 66 | one = ACL( 67 | principal='User:A', 68 | host='*', 69 | operation=ACLOperation.ALL, 70 | permission_type=ACLPermissionType.ALLOW, 71 | resource_pattern=ResourcePattern( 72 | resource_type=ResourceType.TOPIC, 73 | resource_name='some-topic', 74 | pattern_type=ACLResourcePatternType.LITERAL 75 | ) 76 | ) 77 | 78 | two = ACL( 79 | principal='User:A', 80 | host='*', 81 | operation=ACLOperation.ALL, 82 | permission_type=ACLPermissionType.ALLOW, 83 | resource_pattern=ResourcePattern( 84 | resource_type=ResourceType.TOPIC, 85 | resource_name='some-topic', 86 | pattern_type=ACLResourcePatternType.LITERAL 87 | ) 88 | ) 89 | 90 | assert one == two 91 | assert hash(one) == hash(two) 92 | assert len(set((one, two))) == 1 93 | -------------------------------------------------------------------------------- /test/test_sasl_integration.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import uuid 3 | 4 | import pytest 5 | 6 | from kafka.admin import NewTopic 7 | from kafka.protocol.metadata import MetadataRequest_v1 8 | from test.testutil import assert_message_count, env_kafka_version, random_string, special_to_underscore 9 | 10 | 11 | @pytest.fixture( 12 | params=[ 13 | pytest.param( 14 | "PLAIN", marks=pytest.mark.skipif(env_kafka_version() < (0, 10), reason="Requires KAFKA_VERSION >= 0.10") 15 | ), 16 | pytest.param( 17 | "SCRAM-SHA-256", 18 | marks=pytest.mark.skipif(env_kafka_version() < (0, 10, 2), reason="Requires KAFKA_VERSION >= 0.10.2"), 19 | ), 20 | pytest.param( 21 | "SCRAM-SHA-512", 22 | marks=pytest.mark.skipif(env_kafka_version() < (0, 10, 2), reason="Requires KAFKA_VERSION >= 0.10.2"), 23 | ), 24 | ] 25 | ) 26 | def sasl_kafka(request, kafka_broker_factory): 27 | sasl_kafka = kafka_broker_factory(transport="SASL_PLAINTEXT", sasl_mechanism=request.param)[0] 28 | yield sasl_kafka 29 | sasl_kafka.child.dump_logs() 30 | 31 | 32 | def test_admin(request, sasl_kafka): 33 | topic_name = special_to_underscore(request.node.name + random_string(4)) 34 | admin, = sasl_kafka.get_admin_clients(1) 35 | admin.create_topics([NewTopic(topic_name, 1, 1)]) 36 | assert topic_name in sasl_kafka.get_topic_names() 37 | 38 | 39 | def test_produce_and_consume(request, sasl_kafka): 40 | topic_name = special_to_underscore(request.node.name + random_string(4)) 41 | sasl_kafka.create_topics([topic_name], num_partitions=2) 42 | producer, = sasl_kafka.get_producers(1) 43 | 44 | messages_and_futures = [] # [(message, produce_future),] 45 | for i in range(100): 46 | encoded_msg = "{}-{}-{}".format(i, request.node.name, uuid.uuid4()).encode("utf-8") 47 | future = producer.send(topic_name, value=encoded_msg, partition=i % 2) 48 | messages_and_futures.append((encoded_msg, future)) 49 | producer.flush() 50 | 51 | for (msg, f) in messages_and_futures: 52 | assert f.succeeded() 53 | 54 | consumer, = sasl_kafka.get_consumers(1, [topic_name]) 55 | messages = {0: [], 1: []} 56 | for i, message in enumerate(consumer, 1): 57 | logging.debug("Consumed message %s", repr(message)) 58 | messages[message.partition].append(message) 59 | if i >= 100: 60 | break 61 | 62 | assert_message_count(messages[0], 50) 63 | assert_message_count(messages[1], 50) 64 | 65 | 66 | def test_client(request, sasl_kafka): 67 | topic_name = special_to_underscore(request.node.name + random_string(4)) 68 | sasl_kafka.create_topics([topic_name], num_partitions=1) 69 | 70 | client, = sasl_kafka.get_clients(1) 71 | request = MetadataRequest_v1(None) 72 | client.send(0, request) 73 | for _ in range(10): 74 | result = client.poll(timeout_ms=10000) 75 | if len(result) > 0: 76 | break 77 | else: 78 | raise RuntimeError("Couldn't fetch topic response from Broker.") 79 | result = result[0] 80 | assert topic_name in [t[1] for t in result.topics] 81 | -------------------------------------------------------------------------------- /kafka/metrics/stats/percentiles.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics import AnonMeasurable, NamedMeasurable 4 | from kafka.metrics.compound_stat import AbstractCompoundStat 5 | from kafka.metrics.stats import Histogram 6 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 7 | 8 | 9 | class BucketSizing(object): 10 | CONSTANT = 0 11 | LINEAR = 1 12 | 13 | 14 | class Percentiles(AbstractSampledStat, AbstractCompoundStat): 15 | """A compound stat that reports one or more percentiles""" 16 | def __init__(self, size_in_bytes, bucketing, max_val, min_val=0.0, 17 | percentiles=None): 18 | super(Percentiles, self).__init__(0.0) 19 | self._percentiles = percentiles or [] 20 | self._buckets = int(size_in_bytes / 4) 21 | if bucketing == BucketSizing.CONSTANT: 22 | self._bin_scheme = Histogram.ConstantBinScheme(self._buckets, 23 | min_val, max_val) 24 | elif bucketing == BucketSizing.LINEAR: 25 | if min_val != 0.0: 26 | raise ValueError('Linear bucket sizing requires min_val' 27 | ' to be 0.0.') 28 | self.bin_scheme = Histogram.LinearBinScheme(self._buckets, max_val) 29 | else: 30 | ValueError('Unknown bucket type: %s' % (bucketing,)) 31 | 32 | def stats(self): 33 | measurables = [] 34 | 35 | def make_measure_fn(pct): 36 | return lambda config, now: self.value(config, now, 37 | pct / 100.0) 38 | 39 | for percentile in self._percentiles: 40 | measure_fn = make_measure_fn(percentile.percentile) 41 | stat = NamedMeasurable(percentile.name, AnonMeasurable(measure_fn)) 42 | measurables.append(stat) 43 | return measurables 44 | 45 | def value(self, config, now, quantile): 46 | self.purge_obsolete_samples(config, now) 47 | count = sum(sample.event_count for sample in self._samples) 48 | if count == 0.0: 49 | return float('NaN') 50 | sum_val = 0.0 51 | quant = float(quantile) 52 | for b in range(self._buckets): 53 | for sample in self._samples: 54 | assert type(sample) is self.HistogramSample 55 | hist = sample.histogram.counts 56 | sum_val += hist[b] 57 | if sum_val / count > quant: 58 | return self._bin_scheme.from_bin(b) 59 | return float('inf') 60 | 61 | def combine(self, samples, config, now): 62 | return self.value(config, now, 0.5) 63 | 64 | def new_sample(self, time_ms): 65 | return Percentiles.HistogramSample(self._bin_scheme, time_ms) 66 | 67 | def update(self, sample, config, value, time_ms): 68 | assert type(sample) is self.HistogramSample 69 | sample.histogram.record(value) 70 | 71 | class HistogramSample(AbstractSampledStat.Sample): 72 | def __init__(self, scheme, now): 73 | super(Percentiles.HistogramSample, self).__init__(0.0, now) 74 | self.histogram = Histogram(scheme) 75 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/range.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import collections 4 | import logging 5 | 6 | from kafka.vendor import six 7 | 8 | from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor 9 | from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | class RangePartitionAssignor(AbstractPartitionAssignor): 15 | """ 16 | The range assignor works on a per-topic basis. For each topic, we lay out 17 | the available partitions in numeric order and the consumers in 18 | lexicographic order. We then divide the number of partitions by the total 19 | number of consumers to determine the number of partitions to assign to each 20 | consumer. If it does not evenly divide, then the first few consumers will 21 | have one extra partition. 22 | 23 | For example, suppose there are two consumers C0 and C1, two topics t0 and 24 | t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1, 25 | t0p2, t1p0, t1p1, and t1p2. 26 | 27 | The assignment will be: 28 | C0: [t0p0, t0p1, t1p0, t1p1] 29 | C1: [t0p2, t1p2] 30 | """ 31 | name = 'range' 32 | version = 0 33 | 34 | @classmethod 35 | def assign(cls, cluster, member_metadata): 36 | consumers_per_topic = collections.defaultdict(list) 37 | for member, metadata in six.iteritems(member_metadata): 38 | for topic in metadata.subscription: 39 | consumers_per_topic[topic].append(member) 40 | 41 | # construct {member_id: {topic: [partition, ...]}} 42 | assignment = collections.defaultdict(dict) 43 | 44 | for topic, consumers_for_topic in six.iteritems(consumers_per_topic): 45 | partitions = cluster.partitions_for_topic(topic) 46 | if partitions is None: 47 | log.warning('No partition metadata for topic %s', topic) 48 | continue 49 | partitions = sorted(partitions) 50 | consumers_for_topic.sort() 51 | 52 | partitions_per_consumer = len(partitions) // len(consumers_for_topic) 53 | consumers_with_extra = len(partitions) % len(consumers_for_topic) 54 | 55 | for i, member in enumerate(consumers_for_topic): 56 | start = partitions_per_consumer * i 57 | start += min(i, consumers_with_extra) 58 | length = partitions_per_consumer 59 | if not i + 1 > consumers_with_extra: 60 | length += 1 61 | assignment[member][topic] = partitions[start:start+length] 62 | 63 | protocol_assignment = {} 64 | for member_id in member_metadata: 65 | protocol_assignment[member_id] = ConsumerProtocolMemberAssignment( 66 | cls.version, 67 | sorted(assignment[member_id].items()), 68 | b'') 69 | return protocol_assignment 70 | 71 | @classmethod 72 | def metadata(cls, topics): 73 | return ConsumerProtocolMemberMetadata(cls.version, list(topics), b'') 74 | 75 | @classmethod 76 | def on_assignment(cls, assignment): 77 | pass 78 | -------------------------------------------------------------------------------- /kafka/metrics/stats/histogram.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import math 4 | 5 | 6 | class Histogram(object): 7 | def __init__(self, bin_scheme): 8 | self._hist = [0.0] * bin_scheme.bins 9 | self._count = 0.0 10 | self._bin_scheme = bin_scheme 11 | 12 | def record(self, value): 13 | self._hist[self._bin_scheme.to_bin(value)] += 1.0 14 | self._count += 1.0 15 | 16 | def value(self, quantile): 17 | if self._count == 0.0: 18 | return float('NaN') 19 | _sum = 0.0 20 | quant = float(quantile) 21 | for i, value in enumerate(self._hist[:-1]): 22 | _sum += value 23 | if _sum / self._count > quant: 24 | return self._bin_scheme.from_bin(i) 25 | return float('inf') 26 | 27 | @property 28 | def counts(self): 29 | return self._hist 30 | 31 | def clear(self): 32 | for i in range(self._hist): 33 | self._hist[i] = 0.0 34 | self._count = 0 35 | 36 | def __str__(self): 37 | values = ['%.10f:%.0f' % (self._bin_scheme.from_bin(i), value) for 38 | i, value in enumerate(self._hist[:-1])] 39 | values.append('%s:%s' % (float('inf'), self._hist[-1])) 40 | return '{%s}' % ','.join(values) 41 | 42 | class ConstantBinScheme(object): 43 | def __init__(self, bins, min_val, max_val): 44 | if bins < 2: 45 | raise ValueError('Must have at least 2 bins.') 46 | self._min = float(min_val) 47 | self._max = float(max_val) 48 | self._bins = int(bins) 49 | self._bucket_width = (max_val - min_val) / (bins - 2) 50 | 51 | @property 52 | def bins(self): 53 | return self._bins 54 | 55 | def from_bin(self, b): 56 | if b == 0: 57 | return float('-inf') 58 | elif b == self._bins - 1: 59 | return float('inf') 60 | else: 61 | return self._min + (b - 1) * self._bucket_width 62 | 63 | def to_bin(self, x): 64 | if x < self._min: 65 | return 0 66 | elif x > self._max: 67 | return self._bins - 1 68 | else: 69 | return int(((x - self._min) / self._bucket_width) + 1) 70 | 71 | class LinearBinScheme(object): 72 | def __init__(self, num_bins, max_val): 73 | self._bins = num_bins 74 | self._max = max_val 75 | self._scale = max_val / (num_bins * (num_bins - 1) / 2) 76 | 77 | @property 78 | def bins(self): 79 | return self._bins 80 | 81 | def from_bin(self, b): 82 | if b == self._bins - 1: 83 | return float('inf') 84 | else: 85 | unscaled = (b * (b + 1.0)) / 2.0 86 | return unscaled * self._scale 87 | 88 | def to_bin(self, x): 89 | if x < 0.0: 90 | raise ValueError('Values less than 0.0 not accepted.') 91 | elif x > self._max: 92 | return self._bins - 1 93 | else: 94 | scaled = x / self._scale 95 | return int(-0.5 + math.sqrt(2.0 * scaled + 0.25)) 96 | -------------------------------------------------------------------------------- /kafka/structs.py: -------------------------------------------------------------------------------- 1 | """ Other useful structs """ 2 | from __future__ import absolute_import 3 | 4 | from collections import namedtuple 5 | 6 | 7 | """A topic and partition tuple 8 | 9 | Keyword Arguments: 10 | topic (str): A topic name 11 | partition (int): A partition id 12 | """ 13 | TopicPartition = namedtuple("TopicPartition", 14 | ["topic", "partition"]) 15 | 16 | 17 | """A Kafka broker metadata used by admin tools. 18 | 19 | Keyword Arguments: 20 | nodeID (int): The Kafka broker id. 21 | host (str): The Kafka broker hostname. 22 | port (int): The Kafka broker port. 23 | rack (str): The rack of the broker, which is used to in rack aware 24 | partition assignment for fault tolerance. 25 | Examples: `RACK1`, `us-east-1d`. Default: None 26 | """ 27 | BrokerMetadata = namedtuple("BrokerMetadata", 28 | ["nodeId", "host", "port", "rack"]) 29 | 30 | 31 | """A topic partition metadata describing the state in the MetadataResponse. 32 | 33 | Keyword Arguments: 34 | topic (str): The topic name of the partition this metadata relates to. 35 | partition (int): The id of the partition this metadata relates to. 36 | leader (int): The id of the broker that is the leader for the partition. 37 | replicas (List[int]): The ids of all brokers that contain replicas of the 38 | partition. 39 | isr (List[int]): The ids of all brokers that contain in-sync replicas of 40 | the partition. 41 | error (KafkaError): A KafkaError object associated with the request for 42 | this partition metadata. 43 | """ 44 | PartitionMetadata = namedtuple("PartitionMetadata", 45 | ["topic", "partition", "leader", "replicas", "isr", "error"]) 46 | 47 | 48 | """The Kafka offset commit API 49 | 50 | The Kafka offset commit API allows users to provide additional metadata 51 | (in the form of a string) when an offset is committed. This can be useful 52 | (for example) to store information about which node made the commit, 53 | what time the commit was made, etc. 54 | 55 | Keyword Arguments: 56 | offset (int): The offset to be committed 57 | metadata (str): Non-null metadata 58 | """ 59 | OffsetAndMetadata = namedtuple("OffsetAndMetadata", 60 | # TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata) 61 | ["offset", "metadata"]) 62 | 63 | 64 | """An offset and timestamp tuple 65 | 66 | Keyword Arguments: 67 | offset (int): An offset 68 | timestamp (int): The timestamp associated to the offset 69 | """ 70 | OffsetAndTimestamp = namedtuple("OffsetAndTimestamp", 71 | ["offset", "timestamp"]) 72 | 73 | MemberInformation = namedtuple("MemberInformation", 74 | ["member_id", "client_id", "client_host", "member_metadata", "member_assignment"]) 75 | 76 | GroupInformation = namedtuple("GroupInformation", 77 | ["error_code", "group", "state", "protocol_type", "protocol", "members", "authorized_operations"]) 78 | 79 | """Define retry policy for async producer 80 | 81 | Keyword Arguments: 82 | Limit (int): Number of retries. limit >= 0, 0 means no retries 83 | backoff_ms (int): Milliseconds to backoff. 84 | retry_on_timeouts: 85 | """ 86 | RetryOptions = namedtuple("RetryOptions", 87 | ["limit", "backoff_ms", "retry_on_timeouts"]) 88 | -------------------------------------------------------------------------------- /kafka/producer/future.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import collections 4 | import threading 5 | 6 | from kafka import errors as Errors 7 | from kafka.future import Future 8 | 9 | 10 | class FutureProduceResult(Future): 11 | def __init__(self, topic_partition): 12 | super(FutureProduceResult, self).__init__() 13 | self.topic_partition = topic_partition 14 | self._latch = threading.Event() 15 | 16 | def success(self, value): 17 | ret = super(FutureProduceResult, self).success(value) 18 | self._latch.set() 19 | return ret 20 | 21 | def failure(self, error): 22 | ret = super(FutureProduceResult, self).failure(error) 23 | self._latch.set() 24 | return ret 25 | 26 | def wait(self, timeout=None): 27 | # wait() on python2.6 returns None instead of the flag value 28 | return self._latch.wait(timeout) or self._latch.is_set() 29 | 30 | 31 | class FutureRecordMetadata(Future): 32 | def __init__(self, produce_future, relative_offset, timestamp_ms, checksum, serialized_key_size, serialized_value_size, serialized_header_size): 33 | super(FutureRecordMetadata, self).__init__() 34 | self._produce_future = produce_future 35 | # packing args as a tuple is a minor speed optimization 36 | self.args = (relative_offset, timestamp_ms, checksum, serialized_key_size, serialized_value_size, serialized_header_size) 37 | produce_future.add_callback(self._produce_success) 38 | produce_future.add_errback(self.failure) 39 | 40 | def _produce_success(self, offset_and_timestamp): 41 | offset, produce_timestamp_ms, log_start_offset = offset_and_timestamp 42 | 43 | # Unpacking from args tuple is minor speed optimization 44 | (relative_offset, timestamp_ms, checksum, 45 | serialized_key_size, serialized_value_size, serialized_header_size) = self.args 46 | 47 | # None is when Broker does not support the API (<0.10) and 48 | # -1 is when the broker is configured for CREATE_TIME timestamps 49 | if produce_timestamp_ms is not None and produce_timestamp_ms != -1: 50 | timestamp_ms = produce_timestamp_ms 51 | if offset != -1 and relative_offset is not None: 52 | offset += relative_offset 53 | tp = self._produce_future.topic_partition 54 | metadata = RecordMetadata(tp[0], tp[1], tp, offset, timestamp_ms, log_start_offset, 55 | checksum, serialized_key_size, 56 | serialized_value_size, serialized_header_size) 57 | self.success(metadata) 58 | 59 | def get(self, timeout=None): 60 | if not self.is_done and not self._produce_future.wait(timeout): 61 | raise Errors.KafkaTimeoutError( 62 | "Timeout after waiting for %s secs." % (timeout,)) 63 | assert self.is_done 64 | if self.failed(): 65 | raise self.exception # pylint: disable-msg=raising-bad-type 66 | return self.value 67 | 68 | 69 | RecordMetadata = collections.namedtuple( 70 | 'RecordMetadata', ['topic', 'partition', 'topic_partition', 'offset', 'timestamp', 'log_start_offset', 71 | 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size']) 72 | -------------------------------------------------------------------------------- /kafka/scram.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import base64 4 | import hashlib 5 | import hmac 6 | import uuid 7 | 8 | from kafka.vendor import six 9 | 10 | 11 | if six.PY2: 12 | def xor_bytes(left, right): 13 | return bytearray(ord(lb) ^ ord(rb) for lb, rb in zip(left, right)) 14 | else: 15 | def xor_bytes(left, right): 16 | return bytes(lb ^ rb for lb, rb in zip(left, right)) 17 | 18 | 19 | class ScramClient: 20 | MECHANISMS = { 21 | 'SCRAM-SHA-256': hashlib.sha256, 22 | 'SCRAM-SHA-512': hashlib.sha512 23 | } 24 | 25 | def __init__(self, user, password, mechanism): 26 | self.nonce = str(uuid.uuid4()).replace('-', '') 27 | self.auth_message = '' 28 | self.salted_password = None 29 | self.user = user 30 | self.password = password.encode('utf-8') 31 | self.hashfunc = self.MECHANISMS[mechanism] 32 | self.hashname = ''.join(mechanism.lower().split('-')[1:3]) 33 | self.stored_key = None 34 | self.client_key = None 35 | self.client_signature = None 36 | self.client_proof = None 37 | self.server_key = None 38 | self.server_signature = None 39 | 40 | def first_message(self): 41 | client_first_bare = 'n={},r={}'.format(self.user, self.nonce) 42 | self.auth_message += client_first_bare 43 | return 'n,,' + client_first_bare 44 | 45 | def process_server_first_message(self, server_first_message): 46 | self.auth_message += ',' + server_first_message 47 | params = dict(pair.split('=', 1) for pair in server_first_message.split(',')) 48 | server_nonce = params['r'] 49 | if not server_nonce.startswith(self.nonce): 50 | raise ValueError("Server nonce, did not start with client nonce!") 51 | self.nonce = server_nonce 52 | self.auth_message += ',c=biws,r=' + self.nonce 53 | 54 | salt = base64.b64decode(params['s'].encode('utf-8')) 55 | iterations = int(params['i']) 56 | self.create_salted_password(salt, iterations) 57 | 58 | self.client_key = self.hmac(self.salted_password, b'Client Key') 59 | self.stored_key = self.hashfunc(self.client_key).digest() 60 | self.client_signature = self.hmac(self.stored_key, self.auth_message.encode('utf-8')) 61 | self.client_proof = xor_bytes(self.client_key, self.client_signature) 62 | self.server_key = self.hmac(self.salted_password, b'Server Key') 63 | self.server_signature = self.hmac(self.server_key, self.auth_message.encode('utf-8')) 64 | 65 | def hmac(self, key, msg): 66 | return hmac.new(key, msg, digestmod=self.hashfunc).digest() 67 | 68 | def create_salted_password(self, salt, iterations): 69 | self.salted_password = hashlib.pbkdf2_hmac( 70 | self.hashname, self.password, salt, iterations 71 | ) 72 | 73 | def final_message(self): 74 | return 'c=biws,r={},p={}'.format(self.nonce, base64.b64encode(self.client_proof).decode('utf-8')) 75 | 76 | def process_server_final_message(self, server_final_message): 77 | params = dict(pair.split('=', 1) for pair in server_final_message.split(',')) 78 | if self.server_signature != base64.b64decode(params['v'].encode('utf-8')): 79 | raise ValueError("Server sent wrong signature!") 80 | 81 | 82 | -------------------------------------------------------------------------------- /test/test_admin.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import kafka.admin 4 | from kafka.errors import IllegalArgumentError 5 | 6 | 7 | def test_config_resource(): 8 | with pytest.raises(KeyError): 9 | bad_resource = kafka.admin.ConfigResource('something', 'foo') 10 | good_resource = kafka.admin.ConfigResource('broker', 'bar') 11 | assert good_resource.resource_type == kafka.admin.ConfigResourceType.BROKER 12 | assert good_resource.name == 'bar' 13 | assert good_resource.configs is None 14 | good_resource = kafka.admin.ConfigResource(kafka.admin.ConfigResourceType.TOPIC, 'baz', {'frob': 'nob'}) 15 | assert good_resource.resource_type == kafka.admin.ConfigResourceType.TOPIC 16 | assert good_resource.name == 'baz' 17 | assert good_resource.configs == {'frob': 'nob'} 18 | 19 | 20 | def test_new_partitions(): 21 | good_partitions = kafka.admin.NewPartitions(6) 22 | assert good_partitions.total_count == 6 23 | assert good_partitions.new_assignments is None 24 | good_partitions = kafka.admin.NewPartitions(7, [[1, 2, 3]]) 25 | assert good_partitions.total_count == 7 26 | assert good_partitions.new_assignments == [[1, 2, 3]] 27 | 28 | 29 | def test_acl_resource(): 30 | good_acl = kafka.admin.ACL( 31 | "User:bar", 32 | "*", 33 | kafka.admin.ACLOperation.ALL, 34 | kafka.admin.ACLPermissionType.ALLOW, 35 | kafka.admin.ResourcePattern( 36 | kafka.admin.ResourceType.TOPIC, 37 | "foo", 38 | kafka.admin.ACLResourcePatternType.LITERAL 39 | ) 40 | ) 41 | 42 | assert(good_acl.resource_pattern.resource_type == kafka.admin.ResourceType.TOPIC) 43 | assert(good_acl.operation == kafka.admin.ACLOperation.ALL) 44 | assert(good_acl.permission_type == kafka.admin.ACLPermissionType.ALLOW) 45 | assert(good_acl.resource_pattern.pattern_type == kafka.admin.ACLResourcePatternType.LITERAL) 46 | 47 | with pytest.raises(IllegalArgumentError): 48 | kafka.admin.ACL( 49 | "User:bar", 50 | "*", 51 | kafka.admin.ACLOperation.ANY, 52 | kafka.admin.ACLPermissionType.ANY, 53 | kafka.admin.ResourcePattern( 54 | kafka.admin.ResourceType.TOPIC, 55 | "foo", 56 | kafka.admin.ACLResourcePatternType.LITERAL 57 | ) 58 | ) 59 | 60 | def test_new_topic(): 61 | with pytest.raises(IllegalArgumentError): 62 | bad_topic = kafka.admin.NewTopic('foo', -1, -1) 63 | with pytest.raises(IllegalArgumentError): 64 | bad_topic = kafka.admin.NewTopic('foo', 1, -1) 65 | with pytest.raises(IllegalArgumentError): 66 | bad_topic = kafka.admin.NewTopic('foo', 1, 1, {1: [1, 1, 1]}) 67 | good_topic = kafka.admin.NewTopic('foo', 1, 2) 68 | assert good_topic.name == 'foo' 69 | assert good_topic.num_partitions == 1 70 | assert good_topic.replication_factor == 2 71 | assert good_topic.replica_assignments == {} 72 | assert good_topic.topic_configs == {} 73 | good_topic = kafka.admin.NewTopic('bar', -1, -1, {1: [1, 2, 3]}, {'key': 'value'}) 74 | assert good_topic.name == 'bar' 75 | assert good_topic.num_partitions == -1 76 | assert good_topic.replication_factor == -1 77 | assert good_topic.replica_assignments == {1: [1, 2, 3]} 78 | assert good_topic.topic_configs == {'key': 'value'} 79 | -------------------------------------------------------------------------------- /kafka/partitioner/default.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import random 4 | 5 | from kafka.vendor import six 6 | 7 | 8 | class DefaultPartitioner(object): 9 | """Default partitioner. 10 | 11 | Hashes key to partition using murmur2 hashing (from java client) 12 | If key is None, selects partition randomly from available, 13 | or from all partitions if none are currently available 14 | """ 15 | @classmethod 16 | def __call__(cls, key, all_partitions, available): 17 | """ 18 | Get the partition corresponding to key 19 | :param key: partitioning key 20 | :param all_partitions: list of all partitions sorted by partition ID 21 | :param available: list of available partitions in no particular order 22 | :return: one of the values from all_partitions or available 23 | """ 24 | if key is None: 25 | if available: 26 | return random.choice(available) 27 | return random.choice(all_partitions) 28 | 29 | idx = murmur2(key) 30 | idx &= 0x7fffffff 31 | idx %= len(all_partitions) 32 | return all_partitions[idx] 33 | 34 | 35 | # https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244 36 | def murmur2(data): 37 | """Pure-python Murmur2 implementation. 38 | 39 | Based on java client, see org.apache.kafka.common.utils.Utils.murmur2 40 | 41 | Args: 42 | data (bytes): opaque bytes 43 | 44 | Returns: MurmurHash2 of data 45 | """ 46 | # Python2 bytes is really a str, causing the bitwise operations below to fail 47 | # so convert to bytearray. 48 | if six.PY2: 49 | data = bytearray(bytes(data)) 50 | 51 | length = len(data) 52 | seed = 0x9747b28c 53 | # 'm' and 'r' are mixing constants generated offline. 54 | # They're not really 'magic', they just happen to work well. 55 | m = 0x5bd1e995 56 | r = 24 57 | 58 | # Initialize the hash to a random value 59 | h = seed ^ length 60 | length4 = length // 4 61 | 62 | for i in range(length4): 63 | i4 = i * 4 64 | k = ((data[i4 + 0] & 0xff) + 65 | ((data[i4 + 1] & 0xff) << 8) + 66 | ((data[i4 + 2] & 0xff) << 16) + 67 | ((data[i4 + 3] & 0xff) << 24)) 68 | k &= 0xffffffff 69 | k *= m 70 | k &= 0xffffffff 71 | k ^= (k % 0x100000000) >> r # k ^= k >>> r 72 | k &= 0xffffffff 73 | k *= m 74 | k &= 0xffffffff 75 | 76 | h *= m 77 | h &= 0xffffffff 78 | h ^= k 79 | h &= 0xffffffff 80 | 81 | # Handle the last few bytes of the input array 82 | extra_bytes = length % 4 83 | if extra_bytes >= 3: 84 | h ^= (data[(length & ~3) + 2] & 0xff) << 16 85 | h &= 0xffffffff 86 | if extra_bytes >= 2: 87 | h ^= (data[(length & ~3) + 1] & 0xff) << 8 88 | h &= 0xffffffff 89 | if extra_bytes >= 1: 90 | h ^= (data[length & ~3] & 0xff) 91 | h &= 0xffffffff 92 | h *= m 93 | h &= 0xffffffff 94 | 95 | h ^= (h % 0x100000000) >> 13 # h >>> 13; 96 | h &= 0xffffffff 97 | h *= m 98 | h &= 0xffffffff 99 | h ^= (h % 0x100000000) >> 15 # h >>> 15; 100 | h &= 0xffffffff 101 | 102 | return h 103 | --------------------------------------------------------------------------------