├── .covrc ├── .github ├── dependabot.yml └── workflows │ ├── codeql-analysis.yml │ └── python-package.yml ├── .gitignore ├── .readthedocs.yaml ├── AUTHORS.md ├── CHANGES.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── apidoc │ ├── BrokerConnection.rst │ ├── ClusterMetadata.rst │ ├── KafkaAdminClient.rst │ ├── KafkaClient.rst │ ├── KafkaConsumer.rst │ ├── KafkaProducer.rst │ └── modules.rst ├── changelog.rst ├── compatibility.rst ├── conf.py ├── index.rst ├── install.rst ├── license.rst ├── make.bat ├── requirements.txt ├── support.rst ├── tests.rst └── usage.rst ├── example.py ├── kafka ├── __init__.py ├── admin │ ├── __init__.py │ ├── acl_resource.py │ ├── client.py │ ├── config_resource.py │ ├── new_partitions.py │ └── new_topic.py ├── benchmarks │ ├── README.md │ ├── __init__.py │ ├── consumer_performance.py │ ├── load_example.py │ ├── producer_performance.py │ ├── record_batch_compose.py │ ├── record_batch_read.py │ └── varint_speed.py ├── client_async.py ├── cluster.py ├── codec.py ├── conn.py ├── consumer │ ├── __init__.py │ ├── fetcher.py │ ├── group.py │ └── subscription_state.py ├── coordinator │ ├── __init__.py │ ├── assignors │ │ ├── __init__.py │ │ ├── abstract.py │ │ ├── range.py │ │ ├── roundrobin.py │ │ └── sticky │ │ │ ├── __init__.py │ │ │ ├── partition_movements.py │ │ │ ├── sorted_set.py │ │ │ └── sticky_assignor.py │ ├── base.py │ ├── consumer.py │ ├── heartbeat.py │ └── protocol.py ├── errors.py ├── future.py ├── metrics │ ├── __init__.py │ ├── compound_stat.py │ ├── dict_reporter.py │ ├── kafka_metric.py │ ├── measurable.py │ ├── measurable_stat.py │ ├── metric_config.py │ ├── metric_name.py │ ├── metrics.py │ ├── metrics_reporter.py │ ├── quota.py │ ├── stat.py │ └── stats │ │ ├── __init__.py │ │ ├── avg.py │ │ ├── count.py │ │ ├── histogram.py │ │ ├── max_stat.py │ │ ├── min_stat.py │ │ ├── percentile.py │ │ ├── percentiles.py │ │ ├── rate.py │ │ ├── sampled_stat.py │ │ ├── sensor.py │ │ └── total.py ├── partitioner │ ├── __init__.py │ └── default.py ├── producer │ ├── __init__.py │ ├── future.py │ ├── kafka.py │ ├── record_accumulator.py │ ├── sender.py │ └── transaction_manager.py ├── protocol │ ├── __init__.py │ ├── abstract.py │ ├── add_offsets_to_txn.py │ ├── add_partitions_to_txn.py │ ├── admin.py │ ├── api.py │ ├── api_versions.py │ ├── broker_api_versions.py │ ├── commit.py │ ├── end_txn.py │ ├── fetch.py │ ├── find_coordinator.py │ ├── frame.py │ ├── group.py │ ├── init_producer_id.py │ ├── list_offsets.py │ ├── message.py │ ├── metadata.py │ ├── offset_for_leader_epoch.py │ ├── parser.py │ ├── pickle.py │ ├── produce.py │ ├── sasl_authenticate.py │ ├── sasl_handshake.py │ ├── struct.py │ ├── txn_offset_commit.py │ └── types.py ├── record │ ├── README │ ├── __init__.py │ ├── _crc32c.py │ ├── abc.py │ ├── default_records.py │ ├── legacy_records.py │ ├── memory_records.py │ └── util.py ├── sasl │ ├── __init__.py │ ├── abc.py │ ├── gssapi.py │ ├── msk.py │ ├── oauth.py │ ├── plain.py │ ├── scram.py │ └── sspi.py ├── serializer │ ├── __init__.py │ └── abstract.py ├── socks5_wrapper.py ├── structs.py ├── util.py ├── vendor │ ├── __init__.py │ ├── enum34.py │ ├── selectors34.py │ ├── six.py │ └── socketpair.py └── version.py ├── pylint.rc ├── pyproject.toml ├── pytest.ini ├── requirements-dev.txt ├── servers ├── 0.10.0.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.10.0.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.10.1.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.10.2.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.10.2.2 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.11.0.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.11.0.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.11.0.2 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.11.0.3 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.0 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.1.1 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.1 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.2.0 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.2.1 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.8.2.2 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.9.0.0 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 0.9.0.1 │ └── resources │ │ ├── kafka.properties │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 1.0.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 1.0.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 1.0.2 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 1.1.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 1.1.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.0.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.0.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.1.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.1.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.2.1 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.3.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.4.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.5.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 2.6.0 │ └── resources │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ └── zookeeper.properties ├── 4.0.0 │ └── resources │ │ └── kafka.properties ├── resources │ └── default │ │ ├── kafka.properties │ │ ├── kafka_server_jaas.conf │ │ ├── log4j.properties │ │ ├── sasl_command.conf │ │ └── zookeeper.properties └── trunk │ └── resources │ ├── kafka.properties │ ├── kafka_server_jaas.conf │ ├── log4j.properties │ └── zookeeper.properties ├── setup.py └── test ├── __init__.py ├── conftest.py ├── integration ├── __init__.py ├── conftest.py ├── fixtures.py ├── test_admin_integration.py ├── test_consumer_group.py ├── test_consumer_integration.py ├── test_producer_integration.py └── test_sasl_integration.py ├── record ├── test_default_records.py ├── test_legacy_records.py ├── test_records.py └── test_util.py ├── sasl ├── test_gssapi.py └── test_msk.py ├── service.py ├── test_acl_comparisons.py ├── test_admin.py ├── test_api_object_implementation.py ├── test_assignors.py ├── test_client_async.py ├── test_cluster.py ├── test_codec.py ├── test_conn.py ├── test_consumer.py ├── test_coordinator.py ├── test_fetcher.py ├── test_metrics.py ├── test_object_conversion.py ├── test_package.py ├── test_partition_movements.py ├── test_partitioner.py ├── test_producer.py ├── test_protocol.py ├── test_record_accumulator.py ├── test_sender.py ├── test_subscription_state.py ├── test_util.py └── testutil.py /.covrc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | kafka/vendor/* 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | # 8 | # ******** NOTE ******** 9 | # We have attempted to detect the languages in your repository. Please check 10 | # the `language` matrix defined below to confirm you have the correct set of 11 | # supported CodeQL languages. 12 | # 13 | name: CodeQL 14 | on: 15 | push: 16 | branches: [master] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [master] 20 | schedule: 21 | - cron: 19 10 * * 6 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | runs-on: ubuntu-latest 26 | permissions: 27 | actions: read 28 | contents: read 29 | security-events: write 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | language: [python] 34 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 35 | # Learn more: 36 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 37 | steps: 38 | - name: Checkout repository 39 | uses: actions/checkout@v4 40 | 41 | # Initializes the CodeQL tools for scanning. 42 | - name: Initialize CodeQL 43 | uses: github/codeql-action/init@v3 44 | with: 45 | languages: ${{ matrix.language }} 46 | # If you wish to specify custom queries, you can do so here or in a config file. 47 | # By default, queries listed here will override any specified in a config file. 48 | # Prefix the list here with "+" to use these queries and those in the config file. 49 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 50 | 51 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 52 | # If this step fails, then you should remove it and run the build manually (see below) 53 | - name: Autobuild 54 | uses: github/codeql-action/autobuild@v3 55 | 56 | # ℹ️ Command-line programs to run using the OS shell. 57 | # 📚 https://git.io/JvXDl 58 | 59 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 60 | # and modify them (or add more) to build your code if your project 61 | # uses a compiled language 62 | 63 | #- run: | 64 | # make bootstrap 65 | # make release 66 | - name: Perform CodeQL Analysis 67 | uses: github/codeql-action/analyze@v3 68 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # Derived from https://github.com/actions/starter-workflows/blob/main/ci/python-package.yml 2 | # 3 | name: Python Package 4 | 5 | on: 6 | push: 7 | branches: ["master"] 8 | pull_request: 9 | branches: ["master"] 10 | 11 | env: 12 | FORCE_COLOR: "1" # Make tools pretty. 13 | PIP_DISABLE_PIP_VERSION_CHECK: "1" 14 | PIP_NO_PYTHON_VERSION_WARNING: "1" 15 | 16 | jobs: 17 | build: 18 | 19 | runs-on: ubuntu-latest 20 | name: "Test: python ${{ matrix.python }} / kafka ${{ matrix.kafka }}" 21 | continue-on-error: ${{ matrix.experimental || false }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | kafka: 26 | - "0.8.2.2" 27 | - "0.9.0.1" 28 | - "0.10.2.2" 29 | - "0.11.0.3" 30 | - "1.1.1" 31 | - "2.4.0" 32 | - "2.8.2" 33 | - "3.0.2" 34 | - "3.5.2" 35 | - "3.9.0" 36 | - "4.0.0" 37 | python: 38 | - "3.13" 39 | include: 40 | #- python: "pypy3.9" 41 | # kafka: "2.6.0" 42 | # experimental: true 43 | - python: "3.8" 44 | kafka: "4.0.0" 45 | - python: "3.9" 46 | kafka: "4.0.0" 47 | - python: "3.10" 48 | kafka: "4.0.0" 49 | - python: "3.11" 50 | kafka: "4.0.0" 51 | - python: "3.12" 52 | kafka: "4.0.0" 53 | 54 | steps: 55 | - uses: actions/checkout@v4 56 | - name: Set up Python ${{ matrix.python }} 57 | uses: actions/setup-python@v5 58 | with: 59 | python-version: ${{ matrix.python }} 60 | cache: pip 61 | cache-dependency-path: | 62 | requirements-dev.txt 63 | - name: Install dependencies 64 | run: | 65 | sudo apt install -y libsnappy-dev libzstd-dev 66 | python -m pip install --upgrade pip 67 | pip install -r requirements-dev.txt 68 | - name: Pylint 69 | run: pylint --recursive=y --errors-only --exit-zero kafka test 70 | - name: Setup java 71 | uses: actions/setup-java@v4 72 | with: 73 | distribution: temurin 74 | java-version: 23 75 | - name: Restore cached kafka releases 76 | id: cache-servers-dist-restore 77 | uses: actions/cache/restore@v4 78 | with: 79 | path: servers/dist 80 | key: servers-dist-${{ matrix.kafka }} 81 | - name: Install Kafka release 82 | run: make servers/${{ matrix.kafka }}/kafka-bin 83 | - name: Update kafka release cache 84 | id: cache-servers-dist-save 85 | uses: actions/cache/save@v4 86 | with: 87 | path: servers/dist 88 | key: ${{ steps.cache-servers-dist-restore.outputs.cache-primary-key }} 89 | - name: Pytest 90 | run: make test 91 | env: 92 | KAFKA_VERSION: ${{ matrix.kafka }} 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | .tox 4 | build 5 | dist 6 | MANIFEST 7 | env 8 | servers/*/kafka-bin* 9 | servers/*/resources/ssl* 10 | .coverage* 11 | .noseids 12 | docs/_build 13 | .cache* 14 | .idea/ 15 | integration-test/ 16 | tests-env/ 17 | .pytest_cache/ 18 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.12" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt 36 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Current Maintainer 2 | * Dana Powers, [@dpkp](https://github.com/dpkp) 3 | 4 | # Original Author and First Commit 5 | * David Arthur, [@mumrah](https://github.com/mumrah) 6 | 7 | # Contributors - 2015 (alpha by username) 8 | * Alex Couture-Beil, [@alexcb](https://github.com/alexcb) 9 | * Ali-Akber Saifee, [@alisaifee](https://github.com/alisaifee) 10 | * Christophe-Marie Duquesne, [@chmduquesne](https://github.com/chmduquesne) 11 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 12 | * Kasper Jacobsen, [@Dinoshauer](https://github.com/Dinoshauer) 13 | * Ross Duggan, [@duggan](https://github.com/duggan) 14 | * Enrico Canzonieri, [@ecanzonieri](https://github.com/ecanzonieri) 15 | * haosdent, [@haosdent](https://github.com/haosdent) 16 | * Arturo Filastò, [@hellais](https://github.com/hellais) 17 | * Job Evers‐Meltzer, [@jobevers](https://github.com/jobevers) 18 | * Martin Olveyra, [@kalessin](https://github.com/kalessin) 19 | * Kubilay Kocak, [@koobs](https://github.com/koobs) 20 | * Matthew L Daniel 21 | * Eric Hewitt, [@meandthewallaby](https://github.com/meandthewallaby) 22 | * Oliver Jowett [@mutability](https://github.com/mutability) 23 | * Shaolei Zhou, [@reAsOn2010](https://github.com/reAsOn2010) 24 | * Oskari Saarenmaa, [@saaros](https://github.com/saaros) 25 | * John Anderson, [@sontek](https://github.com/sontek) 26 | * Eduard Iskandarov, [@toidi](https://github.com/toidi) 27 | * Todd Palino, [@toddpalino](https://github.com/toddpalino) 28 | * trbs, [@trbs](https://github.com/trbs) 29 | * Viktor Shlapakov, [@vshlapakov](https://github.com/vshlapakov) 30 | * Will Daly, [@wedaly](https://github.com/wedaly) 31 | * Warren Kiser, [@wkiser](https://github.com/wkiser) 32 | * William Ting, [@wting](https://github.com/wting) 33 | * Zack Dever, [@zackdever](https://github.com/zackdever) 34 | 35 | # More Contributors 36 | * Bruno Renié, [@brutasse](https://github.com/brutasse) 37 | * Thomas Dimson, [@cosbynator](https://github.com/cosbynator) 38 | * Jesse Myers, [@jessemyers](https://github.com/jessemyers) 39 | * Mahendra M, [@mahendra](https://github.com/mahendra) 40 | * Miguel Eduardo Gil Biraud, [@mgilbir](https://github.com/mgilbir) 41 | * Marc Labbé, [@mrtheb](https://github.com/mrtheb) 42 | * Patrick Lucas, [@patricklucas](https://github.com/patricklucas) 43 | * Omar Ghishan, [@rdiomar](https://github.com/rdiomar) - RIP, Omar. 2014 44 | * Ivan Pouzyrevsky, [@sandello](https://github.com/sandello) 45 | * Lou Marvin Caraig, [@se7entyse7en](https://github.com/se7entyse7en) 46 | * waliaashish85, [@waliaashish85](https://github.com/waliaashish85) 47 | * Mark Roberts, [@wizzat](https://github.com/wizzat) 48 | * Christophe Lecointe [@christophelec](https://github.com/christophelec) 49 | * Mohamed Helmi Hichri [@hellich](https://github.com/hellich) 50 | 51 | Thanks to all who have contributed! 52 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include kafka *.py 2 | include README.rst 3 | include LICENSE 4 | include AUTHORS.md 5 | include CHANGES.md 6 | -------------------------------------------------------------------------------- /docs/apidoc/BrokerConnection.rst: -------------------------------------------------------------------------------- 1 | BrokerConnection 2 | ================ 3 | 4 | .. autoclass:: kafka.BrokerConnection 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/ClusterMetadata.rst: -------------------------------------------------------------------------------- 1 | ClusterMetadata 2 | =========== 3 | 4 | .. autoclass:: kafka.cluster.ClusterMetadata 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaAdminClient.rst: -------------------------------------------------------------------------------- 1 | KafkaAdminClient 2 | =========== 3 | 4 | .. autoclass:: kafka.KafkaAdminClient 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaClient.rst: -------------------------------------------------------------------------------- 1 | KafkaClient 2 | =========== 3 | 4 | .. autoclass:: kafka.KafkaClient 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaConsumer.rst: -------------------------------------------------------------------------------- 1 | KafkaConsumer 2 | ============= 3 | 4 | .. autoclass:: kafka.KafkaConsumer 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/KafkaProducer.rst: -------------------------------------------------------------------------------- 1 | KafkaProducer 2 | ============= 3 | 4 | .. autoclass:: kafka.KafkaProducer 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/apidoc/modules.rst: -------------------------------------------------------------------------------- 1 | kafka-python API 2 | **************** 3 | 4 | .. toctree:: 5 | 6 | KafkaConsumer 7 | KafkaProducer 8 | KafkaAdminClient 9 | KafkaClient 10 | BrokerConnection 11 | ClusterMetadata 12 | -------------------------------------------------------------------------------- /docs/compatibility.rst: -------------------------------------------------------------------------------- 1 | Compatibility 2 | ------------- 3 | 4 | .. image:: https://img.shields.io/badge/kafka-4.0--0.8-brightgreen.svg 5 | :target: https://kafka-python.readthedocs.io/compatibility.html 6 | .. image:: https://img.shields.io/pypi/pyversions/kafka-python.svg 7 | :target: https://pypi.python.org/pypi/kafka-python 8 | 9 | kafka-python is compatible with (and tested against) broker versions 4.0 10 | through 0.8.0 . kafka-python is not compatible with the 0.8.2-beta release. 11 | 12 | Because the kafka server protocol is backwards compatible, kafka-python is 13 | expected to work with newer broker releases as well. 14 | 15 | Although kafka-python is tested and expected to work on recent broker versions, 16 | not all features are supported. Specifically, transactional producer/consumer 17 | support is not fully implemented. PRs welcome! 18 | 19 | kafka-python is tested on python 2.7, and 3.8-3.13. 20 | 21 | Builds and tests via Github Actions Workflows. See https://github.com/dpkp/kafka-python/actions 22 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Install 2 | ####### 3 | 4 | Install with your favorite package manager 5 | 6 | Latest Release 7 | ************** 8 | Pip: 9 | 10 | .. code:: bash 11 | 12 | pip install kafka-python 13 | 14 | Releases are also listed at https://github.com/dpkp/kafka-python/releases 15 | 16 | 17 | Bleeding-Edge 18 | ************* 19 | 20 | .. code:: bash 21 | 22 | git clone https://github.com/dpkp/kafka-python 23 | pip install ./kafka-python 24 | 25 | 26 | Optional crc32c install 27 | *********************** 28 | Highly recommended if you are using Kafka 11+ brokers. For those `kafka-python` 29 | uses a new message protocol version, that requires calculation of `crc32c`, 30 | which differs from the `zlib.crc32` hash implementation. By default `kafka-python` 31 | calculates it in pure python, which is quite slow. To speed it up we optionally 32 | support https://pypi.python.org/pypi/crc32c package if it's installed. 33 | 34 | .. code:: bash 35 | 36 | pip install 'kafka-python[crc32c]' 37 | 38 | 39 | Optional ZSTD install 40 | ******************** 41 | 42 | To enable ZSTD compression/decompression, install python-zstandard: 43 | 44 | >>> pip install 'kafka-python[zstd]' 45 | 46 | 47 | Optional LZ4 install 48 | ******************** 49 | 50 | To enable LZ4 compression/decompression, install python-lz4: 51 | 52 | >>> pip install 'kafka-python[lz4]' 53 | 54 | 55 | Optional Snappy install 56 | *********************** 57 | 58 | Install Development Libraries 59 | ============================= 60 | 61 | Download and build Snappy from https://google.github.io/snappy/ 62 | 63 | Ubuntu: 64 | 65 | .. code:: bash 66 | 67 | apt-get install libsnappy-dev 68 | 69 | OSX: 70 | 71 | .. code:: bash 72 | 73 | brew install snappy 74 | 75 | From Source: 76 | 77 | .. code:: bash 78 | 79 | wget https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz 80 | tar xzvf snappy-1.1.3.tar.gz 81 | cd snappy-1.1.3 82 | ./configure 83 | make 84 | sudo make install 85 | 86 | Install Python Module 87 | ===================== 88 | 89 | Install the `python-snappy` module 90 | 91 | .. code:: bash 92 | 93 | pip install 'kafka-python[snappy]' 94 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ------- 3 | 4 | .. image:: https://img.shields.io/badge/license-Apache%202-blue.svg 5 | :target: https://github.com/dpkp/kafka-python/blob/master/LICENSE 6 | 7 | Apache License, v2.0. See `LICENSE `_. 8 | 9 | Copyright 2025, Dana Powers, David Arthur, and Contributors 10 | (See `AUTHORS `_). 11 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==8.1.3 2 | sphinx_rtd_theme==3.0.2 3 | 4 | # Install kafka-python in editable mode 5 | # This allows the sphinx autodoc module 6 | # to load the Python modules and extract docstrings. 7 | # -e .. 8 | -------------------------------------------------------------------------------- /docs/support.rst: -------------------------------------------------------------------------------- 1 | Support 2 | ------- 3 | 4 | For support, see github issues at https://github.com/dpkp/kafka-python 5 | 6 | Limited IRC chat at #kafka-python on freenode (general chat is #apache-kafka). 7 | 8 | For information about Apache Kafka generally, see https://kafka.apache.org/ 9 | 10 | For general discussion of kafka-client design and implementation (not python 11 | specific), see https://groups.google.com/forum/m/#!forum/kafka-clients 12 | -------------------------------------------------------------------------------- /docs/tests.rst: -------------------------------------------------------------------------------- 1 | Tests 2 | ===== 3 | 4 | .. image:: https://coveralls.io/repos/dpkp/kafka-python/badge.svg?branch=master&service=github 5 | :target: https://coveralls.io/github/dpkp/kafka-python?branch=master 6 | .. image:: https://img.shields.io/github/actions/workflow/status/dpkp/kafka-python/python-package.yml 7 | :target: https://github.com/dpkp/kafka-python/actions/workflows/python-package.yml 8 | 9 | The test suite is run via pytest. 10 | 11 | Linting is run via pylint, but is currently skipped during CI/CD due to 12 | accumulated debt. We'd like to transition to ruff! 13 | 14 | For test coverage details, see https://coveralls.io/github/dpkp/kafka-python 15 | Coverage reporting is currently disabled as we have transitioned from travis 16 | to GH Actions and have not yet re-enabled coveralls integration. 17 | 18 | The test suite includes unit tests that mock network interfaces, as well as 19 | integration tests that setup and teardown kafka broker (and zookeeper) 20 | fixtures for client / consumer / producer testing. 21 | 22 | 23 | Unit tests 24 | ------------------ 25 | 26 | To run the tests locally, install test dependencies: 27 | 28 | .. code:: bash 29 | 30 | pip install -r requirements-dev.txt 31 | 32 | Then simply run pytest (or make test) from your preferred python + virtualenv. 33 | 34 | .. code:: bash 35 | 36 | # run protocol tests only (via pytest) 37 | pytest test/test_protocol.py 38 | 39 | # Run conn tests only (via make) 40 | PYTESTS=test/test_conn.py make test 41 | 42 | 43 | Integration tests 44 | ----------------- 45 | 46 | .. code:: bash 47 | 48 | KAFKA_VERSION=4.0.0 make test 49 | 50 | 51 | Integration tests start Kafka and Zookeeper fixtures. Make will download 52 | kafka server binaries automatically if needed. 53 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import threading, time 3 | 4 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer 5 | from kafka.admin import NewTopic 6 | 7 | 8 | class Producer(threading.Thread): 9 | def __init__(self): 10 | threading.Thread.__init__(self) 11 | self.stop_event = threading.Event() 12 | 13 | def stop(self): 14 | self.stop_event.set() 15 | 16 | def run(self): 17 | producer = KafkaProducer(bootstrap_servers='localhost:9092') 18 | 19 | while not self.stop_event.is_set(): 20 | producer.send('my-topic', b"test") 21 | producer.send('my-topic', b"\xc2Hola, mundo!") 22 | time.sleep(1) 23 | 24 | producer.close() 25 | 26 | 27 | class Consumer(threading.Thread): 28 | def __init__(self): 29 | threading.Thread.__init__(self) 30 | self.stop_event = threading.Event() 31 | 32 | def stop(self): 33 | self.stop_event.set() 34 | 35 | def run(self): 36 | consumer = KafkaConsumer(bootstrap_servers='localhost:9092', 37 | auto_offset_reset='earliest', 38 | consumer_timeout_ms=1000) 39 | consumer.subscribe(['my-topic']) 40 | 41 | while not self.stop_event.is_set(): 42 | for message in consumer: 43 | print(message) 44 | if self.stop_event.is_set(): 45 | break 46 | 47 | consumer.close() 48 | 49 | 50 | def main(): 51 | # Create 'my-topic' Kafka topic 52 | try: 53 | admin = KafkaAdminClient(bootstrap_servers='localhost:9092') 54 | 55 | topic = NewTopic(name='my-topic', 56 | num_partitions=1, 57 | replication_factor=1) 58 | admin.create_topics([topic]) 59 | except Exception: 60 | pass 61 | 62 | tasks = [ 63 | Producer(), 64 | Consumer() 65 | ] 66 | 67 | # Start threads of a publisher/producer and a subscriber/consumer to 'my-topic' Kafka topic 68 | for t in tasks: 69 | t.start() 70 | 71 | time.sleep(10) 72 | 73 | # Stop threads 74 | for task in tasks: 75 | task.stop() 76 | 77 | for task in tasks: 78 | task.join() 79 | 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /kafka/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | __title__ = 'kafka' 4 | from kafka.version import __version__ 5 | __author__ = 'Dana Powers' 6 | __license__ = 'Apache License 2.0' 7 | __copyright__ = 'Copyright 2025 Dana Powers, David Arthur, and Contributors' 8 | 9 | # Set default logging handler to avoid "No handler found" warnings. 10 | import logging 11 | try: # Python 2.7+ 12 | from logging import NullHandler 13 | except ImportError: 14 | class NullHandler(logging.Handler): 15 | def emit(self, record): 16 | pass 17 | 18 | logging.getLogger(__name__).addHandler(NullHandler()) 19 | 20 | 21 | from kafka.admin import KafkaAdminClient 22 | from kafka.client_async import KafkaClient 23 | from kafka.consumer import KafkaConsumer 24 | from kafka.consumer.subscription_state import ConsumerRebalanceListener 25 | from kafka.producer import KafkaProducer 26 | from kafka.conn import BrokerConnection 27 | from kafka.serializer import Serializer, Deserializer 28 | from kafka.structs import TopicPartition, OffsetAndMetadata 29 | 30 | 31 | __all__ = [ 32 | 'BrokerConnection', 'ConsumerRebalanceListener', 'KafkaAdminClient', 33 | 'KafkaClient', 'KafkaConsumer', 'KafkaProducer', 34 | ] 35 | -------------------------------------------------------------------------------- /kafka/admin/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.admin.config_resource import ConfigResource, ConfigResourceType 4 | from kafka.admin.client import KafkaAdminClient 5 | from kafka.admin.acl_resource import (ACL, ACLFilter, ResourcePattern, ResourcePatternFilter, ACLOperation, 6 | ResourceType, ACLPermissionType, ACLResourcePatternType) 7 | from kafka.admin.new_topic import NewTopic 8 | from kafka.admin.new_partitions import NewPartitions 9 | 10 | __all__ = [ 11 | 'ConfigResource', 'ConfigResourceType', 'KafkaAdminClient', 'NewTopic', 'NewPartitions', 'ACL', 'ACLFilter', 12 | 'ResourcePattern', 'ResourcePatternFilter', 'ACLOperation', 'ResourceType', 'ACLPermissionType', 13 | 'ACLResourcePatternType' 14 | ] 15 | -------------------------------------------------------------------------------- /kafka/admin/config_resource.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # enum in stdlib as of py3.4 4 | try: 5 | from enum import IntEnum # pylint: disable=import-error 6 | except ImportError: 7 | # vendored backport module 8 | from kafka.vendor.enum34 import IntEnum 9 | 10 | 11 | class ConfigResourceType(IntEnum): 12 | """An enumerated type of config resources""" 13 | 14 | BROKER = 4, 15 | TOPIC = 2 16 | 17 | 18 | class ConfigResource(object): 19 | """A class for specifying config resources. 20 | Arguments: 21 | resource_type (ConfigResourceType): the type of kafka resource 22 | name (string): The name of the kafka resource 23 | configs ({key : value}): A maps of config keys to values. 24 | """ 25 | 26 | def __init__( 27 | self, 28 | resource_type, 29 | name, 30 | configs=None 31 | ): 32 | if not isinstance(resource_type, (ConfigResourceType)): 33 | resource_type = ConfigResourceType[str(resource_type).upper()] # pylint: disable-msg=unsubscriptable-object 34 | self.resource_type = resource_type 35 | self.name = name 36 | self.configs = configs 37 | -------------------------------------------------------------------------------- /kafka/admin/new_partitions.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class NewPartitions(object): 5 | """A class for new partition creation on existing topics. Note that the length of new_assignments, if specified, 6 | must be the difference between the new total number of partitions and the existing number of partitions. 7 | Arguments: 8 | total_count (int): the total number of partitions that should exist on the topic 9 | new_assignments ([[int]]): an array of arrays of replica assignments for new partitions. 10 | If not set, broker assigns replicas per an internal algorithm. 11 | """ 12 | 13 | def __init__( 14 | self, 15 | total_count, 16 | new_assignments=None 17 | ): 18 | self.total_count = total_count 19 | self.new_assignments = new_assignments 20 | -------------------------------------------------------------------------------- /kafka/admin/new_topic.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.errors import IllegalArgumentError 4 | 5 | 6 | class NewTopic(object): 7 | """ A class for new topic creation 8 | Arguments: 9 | name (string): name of the topic 10 | num_partitions (int): number of partitions 11 | or -1 if replica_assignment has been specified 12 | replication_factor (int): replication factor or -1 if 13 | replica assignment is specified 14 | replica_assignment (dict of int: [int]): A mapping containing 15 | partition id and replicas to assign to it. 16 | topic_configs (dict of str: str): A mapping of config key 17 | and value for the topic. 18 | """ 19 | 20 | def __init__( 21 | self, 22 | name, 23 | num_partitions, 24 | replication_factor, 25 | replica_assignments=None, 26 | topic_configs=None, 27 | ): 28 | if not (num_partitions == -1 or replication_factor == -1) ^ (replica_assignments is None): 29 | raise IllegalArgumentError('either num_partitions/replication_factor or replica_assignment must be specified') 30 | self.name = name 31 | self.num_partitions = num_partitions 32 | self.replication_factor = replication_factor 33 | self.replica_assignments = replica_assignments or {} 34 | self.topic_configs = topic_configs or {} 35 | -------------------------------------------------------------------------------- /kafka/benchmarks/README.md: -------------------------------------------------------------------------------- 1 | The `record_batch_*` benchmarks in this section are written using 2 | ``pyperf`` library, created by Victor Stinner. For more information on 3 | how to get reliable results of test runs please consult 4 | https://pyperf.readthedocs.io/en/latest/run_benchmark.html. 5 | -------------------------------------------------------------------------------- /kafka/benchmarks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/kafka/benchmarks/__init__.py -------------------------------------------------------------------------------- /kafka/benchmarks/record_batch_compose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from __future__ import print_function 3 | import hashlib 4 | import itertools 5 | import os 6 | import random 7 | 8 | import pyperf 9 | 10 | from kafka.record.memory_records import MemoryRecordsBuilder 11 | 12 | 13 | DEFAULT_BATCH_SIZE = 1600 * 1024 14 | KEY_SIZE = 6 15 | VALUE_SIZE = 60 16 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 17 | 18 | # With values above v1 record is 100 bytes, so 10 000 bytes for 100 messages 19 | MESSAGES_PER_BATCH = 100 20 | 21 | 22 | def random_bytes(length): 23 | buffer = bytearray(length) 24 | for i in range(length): 25 | buffer[i] = random.randint(0, 255) 26 | return bytes(buffer) 27 | 28 | 29 | def prepare(): 30 | return iter(itertools.cycle([ 31 | (random_bytes(KEY_SIZE), 32 | random_bytes(VALUE_SIZE), 33 | random.randint(*TIMESTAMP_RANGE) 34 | ) 35 | for _ in range(int(MESSAGES_PER_BATCH * 1.94)) 36 | ])) 37 | 38 | 39 | def finalize(results): 40 | # Just some strange code to make sure PyPy does execute the main code 41 | # properly, without optimizing it away 42 | hash_val = hashlib.md5() 43 | for buf in results: 44 | hash_val.update(buf) 45 | print(hash_val, file=open(os.devnull, "w")) 46 | 47 | 48 | def func(loops, magic): 49 | # Jit can optimize out the whole function if the result is the same each 50 | # time, so we need some randomized input data ) 51 | precomputed_samples = prepare() 52 | results = [] 53 | 54 | # Main benchmark code. 55 | t0 = pyperf.perf_counter() 56 | for _ in range(loops): 57 | batch = MemoryRecordsBuilder( 58 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 59 | for _ in range(MESSAGES_PER_BATCH): 60 | key, value, timestamp = next(precomputed_samples) 61 | size = batch.append( 62 | timestamp=timestamp, key=key, value=value) 63 | assert size 64 | batch.close() 65 | results.append(batch.buffer()) 66 | 67 | res = pyperf.perf_counter() - t0 68 | 69 | finalize(results) 70 | 71 | return res 72 | 73 | 74 | if __name__ == '__main__': 75 | runner = pyperf.Runner() 76 | runner.bench_time_func('batch_append_v0', func, 0) 77 | runner.bench_time_func('batch_append_v1', func, 1) 78 | runner.bench_time_func('batch_append_v2', func, 2) 79 | -------------------------------------------------------------------------------- /kafka/benchmarks/record_batch_read.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | import hashlib 4 | import itertools 5 | import os 6 | import random 7 | 8 | import pyperf 9 | 10 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 11 | 12 | 13 | DEFAULT_BATCH_SIZE = 1600 * 1024 14 | KEY_SIZE = 6 15 | VALUE_SIZE = 60 16 | TIMESTAMP_RANGE = [1505824130000, 1505824140000] 17 | 18 | BATCH_SAMPLES = 5 19 | MESSAGES_PER_BATCH = 100 20 | 21 | 22 | def random_bytes(length): 23 | buffer = bytearray(length) 24 | for i in range(length): 25 | buffer[i] = random.randint(0, 255) 26 | return bytes(buffer) 27 | 28 | 29 | def prepare(magic): 30 | samples = [] 31 | for _ in range(BATCH_SAMPLES): 32 | batch = MemoryRecordsBuilder( 33 | magic, batch_size=DEFAULT_BATCH_SIZE, compression_type=0) 34 | for _ in range(MESSAGES_PER_BATCH): 35 | size = batch.append( 36 | random.randint(*TIMESTAMP_RANGE), 37 | random_bytes(KEY_SIZE), 38 | random_bytes(VALUE_SIZE), 39 | headers=[]) 40 | assert size 41 | batch.close() 42 | samples.append(bytes(batch.buffer())) 43 | 44 | return iter(itertools.cycle(samples)) 45 | 46 | 47 | def finalize(results): 48 | # Just some strange code to make sure PyPy does execute the code above 49 | # properly 50 | hash_val = hashlib.md5() 51 | for buf in results: 52 | hash_val.update(buf) 53 | print(hash_val, file=open(os.devnull, "w")) 54 | 55 | 56 | def func(loops, magic): 57 | # Jit can optimize out the whole function if the result is the same each 58 | # time, so we need some randomized input data ) 59 | precomputed_samples = prepare(magic) 60 | results = [] 61 | 62 | # Main benchmark code. 63 | batch_data = next(precomputed_samples) 64 | t0 = pyperf.perf_counter() 65 | for _ in range(loops): 66 | records = MemoryRecords(batch_data) 67 | while records.has_next(): 68 | batch = records.next_batch() 69 | batch.validate_crc() 70 | for record in batch: 71 | results.append(record.value) 72 | 73 | res = pyperf.perf_counter() - t0 74 | finalize(results) 75 | 76 | return res 77 | 78 | 79 | if __name__ == '__main__': 80 | runner = pyperf.Runner() 81 | runner.bench_time_func('batch_read_v0', func, 0) 82 | runner.bench_time_func('batch_read_v1', func, 1) 83 | runner.bench_time_func('batch_read_v2', func, 2) 84 | -------------------------------------------------------------------------------- /kafka/consumer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.consumer.group import KafkaConsumer 4 | 5 | __all__ = [ 6 | 'KafkaConsumer' 7 | ] 8 | -------------------------------------------------------------------------------- /kafka/coordinator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/kafka/coordinator/__init__.py -------------------------------------------------------------------------------- /kafka/coordinator/assignors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/kafka/coordinator/assignors/__init__.py -------------------------------------------------------------------------------- /kafka/coordinator/assignors/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | import logging 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class AbstractPartitionAssignor(object): 10 | """ 11 | Abstract assignor implementation which does some common grunt work (in particular collecting 12 | partition counts which are always needed in assignors). 13 | """ 14 | 15 | @abc.abstractproperty 16 | def name(self): 17 | """.name should be a string identifying the assignor""" 18 | pass 19 | 20 | @abc.abstractmethod 21 | def assign(self, cluster, members): 22 | """Perform group assignment given cluster metadata and member subscriptions 23 | 24 | Arguments: 25 | cluster (ClusterMetadata): metadata for use in assignment 26 | members (dict of {member_id: MemberMetadata}): decoded metadata for 27 | each member in the group. 28 | 29 | Returns: 30 | dict: {member_id: MemberAssignment} 31 | """ 32 | pass 33 | 34 | @abc.abstractmethod 35 | def metadata(self, topics): 36 | """Generate ProtocolMetadata to be submitted via JoinGroupRequest. 37 | 38 | Arguments: 39 | topics (set): a member's subscribed topics 40 | 41 | Returns: 42 | MemberMetadata struct 43 | """ 44 | pass 45 | 46 | @abc.abstractmethod 47 | def on_assignment(self, assignment): 48 | """Callback that runs on each assignment. 49 | 50 | This method can be used to update internal state, if any, of the 51 | partition assignor. 52 | 53 | Arguments: 54 | assignment (MemberAssignment): the member's assignment 55 | """ 56 | pass 57 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/range.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import collections 4 | import logging 5 | 6 | from kafka.vendor import six 7 | 8 | from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor 9 | from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | class RangePartitionAssignor(AbstractPartitionAssignor): 15 | """ 16 | The range assignor works on a per-topic basis. For each topic, we lay out 17 | the available partitions in numeric order and the consumers in 18 | lexicographic order. We then divide the number of partitions by the total 19 | number of consumers to determine the number of partitions to assign to each 20 | consumer. If it does not evenly divide, then the first few consumers will 21 | have one extra partition. 22 | 23 | For example, suppose there are two consumers C0 and C1, two topics t0 and 24 | t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1, 25 | t0p2, t1p0, t1p1, and t1p2. 26 | 27 | The assignment will be: 28 | C0: [t0p0, t0p1, t1p0, t1p1] 29 | C1: [t0p2, t1p2] 30 | """ 31 | name = 'range' 32 | version = 0 33 | 34 | @classmethod 35 | def assign(cls, cluster, member_metadata): 36 | consumers_per_topic = collections.defaultdict(list) 37 | for member, metadata in six.iteritems(member_metadata): 38 | for topic in metadata.subscription: 39 | consumers_per_topic[topic].append(member) 40 | 41 | # construct {member_id: {topic: [partition, ...]}} 42 | assignment = collections.defaultdict(dict) 43 | 44 | for topic, consumers_for_topic in six.iteritems(consumers_per_topic): 45 | partitions = cluster.partitions_for_topic(topic) 46 | if partitions is None: 47 | log.warning('No partition metadata for topic %s', topic) 48 | continue 49 | partitions = sorted(partitions) 50 | consumers_for_topic.sort() 51 | 52 | partitions_per_consumer = len(partitions) // len(consumers_for_topic) 53 | consumers_with_extra = len(partitions) % len(consumers_for_topic) 54 | 55 | for i, member in enumerate(consumers_for_topic): 56 | start = partitions_per_consumer * i 57 | start += min(i, consumers_with_extra) 58 | length = partitions_per_consumer 59 | if not i + 1 > consumers_with_extra: 60 | length += 1 61 | assignment[member][topic] = partitions[start:start+length] 62 | 63 | protocol_assignment = {} 64 | for member_id in member_metadata: 65 | protocol_assignment[member_id] = ConsumerProtocolMemberAssignment( 66 | cls.version, 67 | sorted(assignment[member_id].items()), 68 | b'') 69 | return protocol_assignment 70 | 71 | @classmethod 72 | def metadata(cls, topics): 73 | return ConsumerProtocolMemberMetadata(cls.version, list(topics), b'') 74 | 75 | @classmethod 76 | def on_assignment(cls, assignment): 77 | pass 78 | -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/kafka/coordinator/assignors/sticky/__init__.py -------------------------------------------------------------------------------- /kafka/coordinator/assignors/sticky/sorted_set.py: -------------------------------------------------------------------------------- 1 | class SortedSet: 2 | def __init__(self, iterable=None, key=None): 3 | self._key = key if key is not None else lambda x: x 4 | self._set = set(iterable) if iterable is not None else set() 5 | 6 | self._cached_last = None 7 | self._cached_first = None 8 | 9 | def first(self): 10 | if self._cached_first is not None: 11 | return self._cached_first 12 | 13 | first = None 14 | for element in self._set: 15 | if first is None or self._key(first) > self._key(element): 16 | first = element 17 | self._cached_first = first 18 | return first 19 | 20 | def last(self): 21 | if self._cached_last is not None: 22 | return self._cached_last 23 | 24 | last = None 25 | for element in self._set: 26 | if last is None or self._key(last) < self._key(element): 27 | last = element 28 | self._cached_last = last 29 | return last 30 | 31 | def pop_last(self): 32 | value = self.last() 33 | self._set.remove(value) 34 | self._cached_last = None 35 | return value 36 | 37 | def add(self, value): 38 | if self._cached_last is not None and self._key(value) > self._key(self._cached_last): 39 | self._cached_last = value 40 | if self._cached_first is not None and self._key(value) < self._key(self._cached_first): 41 | self._cached_first = value 42 | 43 | return self._set.add(value) 44 | 45 | def remove(self, value): 46 | if self._cached_last is not None and self._cached_last == value: 47 | self._cached_last = None 48 | if self._cached_first is not None and self._cached_first == value: 49 | self._cached_first = None 50 | 51 | return self._set.remove(value) 52 | 53 | def __contains__(self, value): 54 | return value in self._set 55 | 56 | def __iter__(self): 57 | return iter(sorted(self._set, key=self._key)) 58 | 59 | def _bool(self): 60 | return len(self._set) != 0 61 | 62 | __nonzero__ = _bool 63 | __bool__ = _bool 64 | -------------------------------------------------------------------------------- /kafka/coordinator/protocol.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.struct import Struct 4 | from kafka.protocol.types import Array, Bytes, Int16, Int32, Schema, String 5 | from kafka.structs import TopicPartition 6 | 7 | 8 | class ConsumerProtocolMemberMetadata(Struct): 9 | SCHEMA = Schema( 10 | ('version', Int16), 11 | ('subscription', Array(String('utf-8'))), 12 | ('user_data', Bytes)) 13 | 14 | 15 | class ConsumerProtocolMemberAssignment(Struct): 16 | SCHEMA = Schema( 17 | ('version', Int16), 18 | ('assignment', Array( 19 | ('topic', String('utf-8')), 20 | ('partitions', Array(Int32)))), 21 | ('user_data', Bytes)) 22 | 23 | def partitions(self): 24 | return [TopicPartition(topic, partition) 25 | for topic, partitions in self.assignment # pylint: disable-msg=no-member 26 | for partition in partitions] 27 | 28 | 29 | class ConsumerProtocol(object): 30 | PROTOCOL_TYPE = 'consumer' 31 | ASSIGNMENT_STRATEGIES = ('range', 'roundrobin') 32 | METADATA = ConsumerProtocolMemberMetadata 33 | ASSIGNMENT = ConsumerProtocolMemberAssignment 34 | -------------------------------------------------------------------------------- /kafka/future.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import functools 4 | import logging 5 | import threading 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | 10 | class Future(object): 11 | error_on_callbacks = False # and errbacks 12 | 13 | def __init__(self): 14 | self.is_done = False 15 | self.value = None 16 | self.exception = None 17 | self._callbacks = [] 18 | self._errbacks = [] 19 | self._lock = threading.Lock() 20 | 21 | def succeeded(self): 22 | return self.is_done and not bool(self.exception) 23 | 24 | def failed(self): 25 | return self.is_done and bool(self.exception) 26 | 27 | def retriable(self): 28 | try: 29 | return self.exception.retriable 30 | except AttributeError: 31 | return False 32 | 33 | def success(self, value): 34 | assert not self.is_done, 'Future is already complete' 35 | with self._lock: 36 | self.value = value 37 | self.is_done = True 38 | if self._callbacks: 39 | self._call_backs('callback', self._callbacks, self.value) 40 | return self 41 | 42 | def failure(self, e): 43 | assert not self.is_done, 'Future is already complete' 44 | exception = e if type(e) is not type else e() 45 | assert isinstance(exception, BaseException), ( 46 | 'future failed without an exception') 47 | with self._lock: 48 | self.exception = exception 49 | self.is_done = True 50 | self._call_backs('errback', self._errbacks, self.exception) 51 | return self 52 | 53 | def add_callback(self, f, *args, **kwargs): 54 | if args or kwargs: 55 | f = functools.partial(f, *args, **kwargs) 56 | with self._lock: 57 | if not self.is_done: 58 | self._callbacks.append(f) 59 | elif self.succeeded(): 60 | self._lock.release() 61 | self._call_backs('callback', [f], self.value) 62 | self._lock.acquire() 63 | return self 64 | 65 | def add_errback(self, f, *args, **kwargs): 66 | if args or kwargs: 67 | f = functools.partial(f, *args, **kwargs) 68 | with self._lock: 69 | if not self.is_done: 70 | self._errbacks.append(f) 71 | elif self.failed(): 72 | self._lock.release() 73 | self._call_backs('errback', [f], self.exception) 74 | self._lock.acquire() 75 | return self 76 | 77 | def add_both(self, f, *args, **kwargs): 78 | self.add_callback(f, *args, **kwargs) 79 | self.add_errback(f, *args, **kwargs) 80 | return self 81 | 82 | def chain(self, future): 83 | self.add_callback(future.success) 84 | self.add_errback(future.failure) 85 | return self 86 | 87 | def _call_backs(self, back_type, backs, value): 88 | for f in backs: 89 | try: 90 | f(value) 91 | except Exception as e: 92 | log.exception('Error processing %s', back_type) 93 | if self.error_on_callbacks: 94 | raise e 95 | -------------------------------------------------------------------------------- /kafka/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.compound_stat import NamedMeasurable 4 | from kafka.metrics.dict_reporter import DictReporter 5 | from kafka.metrics.kafka_metric import KafkaMetric 6 | from kafka.metrics.measurable import AnonMeasurable 7 | from kafka.metrics.metric_config import MetricConfig 8 | from kafka.metrics.metric_name import MetricName 9 | from kafka.metrics.metrics import Metrics 10 | from kafka.metrics.quota import Quota 11 | 12 | __all__ = [ 13 | 'AnonMeasurable', 'DictReporter', 'KafkaMetric', 'MetricConfig', 14 | 'MetricName', 'Metrics', 'NamedMeasurable', 'Quota' 15 | ] 16 | -------------------------------------------------------------------------------- /kafka/metrics/compound_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.metrics.stat import AbstractStat 6 | from kafka.vendor.six import add_metaclass 7 | 8 | 9 | @add_metaclass(abc.ABCMeta) 10 | class AbstractCompoundStat(AbstractStat): 11 | """ 12 | A compound stat is a stat where a single measurement and associated 13 | data structure feeds many metrics. This is the example for a 14 | histogram which has many associated percentiles. 15 | """ 16 | def stats(self): 17 | """ 18 | Return list of NamedMeasurable 19 | """ 20 | raise NotImplementedError 21 | 22 | 23 | class NamedMeasurable(object): 24 | __slots__ = ('_name', '_stat') 25 | 26 | def __init__(self, metric_name, measurable_stat): 27 | self._name = metric_name 28 | self._stat = measurable_stat 29 | 30 | @property 31 | def name(self): 32 | return self._name 33 | 34 | @property 35 | def stat(self): 36 | return self._stat 37 | -------------------------------------------------------------------------------- /kafka/metrics/dict_reporter.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | import threading 5 | 6 | from kafka.metrics.metrics_reporter import AbstractMetricsReporter 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | class DictReporter(AbstractMetricsReporter): 12 | """A basic dictionary based metrics reporter. 13 | 14 | Store all metrics in a two level dictionary of category > name > metric. 15 | """ 16 | def __init__(self, prefix=''): 17 | self._lock = threading.Lock() 18 | self._prefix = prefix if prefix else '' # never allow None 19 | self._store = {} 20 | 21 | def snapshot(self): 22 | """ 23 | Return a nested dictionary snapshot of all metrics and their 24 | values at this time. Example: 25 | { 26 | 'category': { 27 | 'metric1_name': 42.0, 28 | 'metric2_name': 'foo' 29 | } 30 | } 31 | """ 32 | return dict((category, dict((name, metric.value()) 33 | for name, metric in list(metrics.items()))) 34 | for category, metrics in 35 | list(self._store.items())) 36 | 37 | def init(self, metrics): 38 | for metric in metrics: 39 | self.metric_change(metric) 40 | 41 | def metric_change(self, metric): 42 | with self._lock: 43 | category = self.get_category(metric) 44 | if category not in self._store: 45 | self._store[category] = {} 46 | self._store[category][metric.metric_name.name] = metric 47 | 48 | def metric_removal(self, metric): 49 | with self._lock: 50 | category = self.get_category(metric) 51 | metrics = self._store.get(category, {}) 52 | removed = metrics.pop(metric.metric_name.name, None) 53 | if not metrics: 54 | self._store.pop(category, None) 55 | return removed 56 | 57 | def get_category(self, metric): 58 | """ 59 | Return a string category for the metric. 60 | 61 | The category is made up of this reporter's prefix and the 62 | metric's group and tags. 63 | 64 | Examples: 65 | prefix = 'foo', group = 'bar', tags = {'a': 1, 'b': 2} 66 | returns: 'foo.bar.a=1,b=2' 67 | 68 | prefix = 'foo', group = 'bar', tags = None 69 | returns: 'foo.bar' 70 | 71 | prefix = None, group = 'bar', tags = None 72 | returns: 'bar' 73 | """ 74 | tags = ','.join('%s=%s' % (k, v) for k, v in 75 | sorted(metric.metric_name.tags.items())) 76 | return '.'.join(x for x in 77 | [self._prefix, metric.metric_name.group, tags] if x) 78 | 79 | def configure(self, configs): 80 | pass 81 | 82 | def close(self): 83 | pass 84 | -------------------------------------------------------------------------------- /kafka/metrics/kafka_metric.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import time 4 | 5 | 6 | class KafkaMetric(object): 7 | __slots__ = ('_metric_name', '_measurable', '_config') 8 | 9 | # NOTE java constructor takes a lock instance 10 | def __init__(self, metric_name, measurable, config): 11 | if not metric_name: 12 | raise ValueError('metric_name must be non-empty') 13 | if not measurable: 14 | raise ValueError('measurable must be non-empty') 15 | self._metric_name = metric_name 16 | self._measurable = measurable 17 | self._config = config 18 | 19 | @property 20 | def metric_name(self): 21 | return self._metric_name 22 | 23 | @property 24 | def measurable(self): 25 | return self._measurable 26 | 27 | @property 28 | def config(self): 29 | return self._config 30 | 31 | @config.setter 32 | def config(self, config): 33 | self._config = config 34 | 35 | def value(self, time_ms=None): 36 | if time_ms is None: 37 | time_ms = time.time() * 1000 38 | return self._measurable.measure(self._config, time_ms) 39 | -------------------------------------------------------------------------------- /kafka/metrics/measurable.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class AbstractMeasurable(object): 7 | """A measurable quantity that can be registered as a metric""" 8 | @abc.abstractmethod 9 | def measure(self, config, now): 10 | """ 11 | Measure this quantity and return the result 12 | 13 | Arguments: 14 | config (MetricConfig): The configuration for this metric 15 | now (int): The POSIX time in milliseconds the measurement 16 | is being taken 17 | 18 | Returns: 19 | The measured value 20 | """ 21 | raise NotImplementedError 22 | 23 | 24 | class AnonMeasurable(AbstractMeasurable): 25 | def __init__(self, measure_fn): 26 | self._measure_fn = measure_fn 27 | 28 | def measure(self, config, now): 29 | return float(self._measure_fn(config, now)) 30 | -------------------------------------------------------------------------------- /kafka/metrics/measurable_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.metrics.measurable import AbstractMeasurable 6 | from kafka.metrics.stat import AbstractStat 7 | from kafka.vendor.six import add_metaclass 8 | 9 | 10 | @add_metaclass(abc.ABCMeta) 11 | class AbstractMeasurableStat(AbstractStat, AbstractMeasurable): 12 | """ 13 | An AbstractMeasurableStat is an AbstractStat that is also 14 | an AbstractMeasurable (i.e. can produce a single floating point value). 15 | This is the interface used for most of the simple statistics such 16 | as Avg, Max, Count, etc. 17 | """ 18 | -------------------------------------------------------------------------------- /kafka/metrics/metric_config.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import sys 4 | 5 | 6 | class MetricConfig(object): 7 | """Configuration values for metrics""" 8 | __slots__ = ('quota', '_samples', 'event_window', 'time_window_ms', 'tags') 9 | 10 | def __init__(self, quota=None, samples=2, event_window=sys.maxsize, 11 | time_window_ms=30 * 1000, tags=None): 12 | """ 13 | Arguments: 14 | quota (Quota, optional): Upper or lower bound of a value. 15 | samples (int, optional): Max number of samples kept per metric. 16 | event_window (int, optional): Max number of values per sample. 17 | time_window_ms (int, optional): Max age of an individual sample. 18 | tags (dict of {str: str}, optional): Tags for each metric. 19 | """ 20 | self.quota = quota 21 | self._samples = samples 22 | self.event_window = event_window 23 | self.time_window_ms = time_window_ms 24 | # tags should be OrderedDict (not supported in py26) 25 | self.tags = tags if tags else {} 26 | 27 | @property 28 | def samples(self): 29 | return self._samples 30 | 31 | @samples.setter 32 | def samples(self, value): 33 | if value < 1: 34 | raise ValueError('The number of samples must be at least 1.') 35 | self._samples = value 36 | -------------------------------------------------------------------------------- /kafka/metrics/metrics_reporter.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.vendor.six import add_metaclass 6 | 7 | 8 | @add_metaclass(abc.ABCMeta) 9 | class AbstractMetricsReporter(object): 10 | """ 11 | An abstract class to allow things to listen as new metrics 12 | are created so they can be reported. 13 | """ 14 | @abc.abstractmethod 15 | def init(self, metrics): 16 | """ 17 | This is called when the reporter is first registered 18 | to initially register all existing metrics 19 | 20 | Arguments: 21 | metrics (list of KafkaMetric): All currently existing metrics 22 | """ 23 | raise NotImplementedError 24 | 25 | @abc.abstractmethod 26 | def metric_change(self, metric): 27 | """ 28 | This is called whenever a metric is updated or added 29 | 30 | Arguments: 31 | metric (KafkaMetric) 32 | """ 33 | raise NotImplementedError 34 | 35 | @abc.abstractmethod 36 | def metric_removal(self, metric): 37 | """ 38 | This is called whenever a metric is removed 39 | 40 | Arguments: 41 | metric (KafkaMetric) 42 | """ 43 | raise NotImplementedError 44 | 45 | @abc.abstractmethod 46 | def configure(self, configs): 47 | """ 48 | Configure this class with the given key-value pairs 49 | 50 | Arguments: 51 | configs (dict of {str, ?}) 52 | """ 53 | raise NotImplementedError 54 | 55 | @abc.abstractmethod 56 | def close(self): 57 | """Called when the metrics repository is closed.""" 58 | raise NotImplementedError 59 | -------------------------------------------------------------------------------- /kafka/metrics/quota.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class Quota(object): 5 | """An upper or lower bound for metrics""" 6 | __slots__ = ('_bound', '_upper') 7 | 8 | def __init__(self, bound, is_upper): 9 | self._bound = bound 10 | self._upper = is_upper 11 | 12 | @staticmethod 13 | def upper_bound(upper_bound): 14 | return Quota(upper_bound, True) 15 | 16 | @staticmethod 17 | def lower_bound(lower_bound): 18 | return Quota(lower_bound, False) 19 | 20 | def is_upper_bound(self): 21 | return self._upper 22 | 23 | @property 24 | def bound(self): 25 | return self._bound 26 | 27 | def is_acceptable(self, value): 28 | return ((self.is_upper_bound() and value <= self.bound) or 29 | (not self.is_upper_bound() and value >= self.bound)) 30 | 31 | def __hash__(self): 32 | prime = 31 33 | result = prime + self.bound 34 | return prime * result + self.is_upper_bound() 35 | 36 | def __eq__(self, other): 37 | if self is other: 38 | return True 39 | return (isinstance(self, type(other)) and 40 | self.bound == other.bound and 41 | self.is_upper_bound() == other.is_upper_bound()) 42 | 43 | def __ne__(self, other): 44 | return not self.__eq__(other) 45 | -------------------------------------------------------------------------------- /kafka/metrics/stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.vendor.six import add_metaclass 6 | 7 | 8 | @add_metaclass(abc.ABCMeta) 9 | class AbstractStat(object): 10 | """ 11 | An AbstractStat is a quantity such as average, max, etc that is computed 12 | off the stream of updates to a sensor 13 | """ 14 | @abc.abstractmethod 15 | def record(self, config, value, time_ms): 16 | """ 17 | Record the given value 18 | 19 | Arguments: 20 | config (MetricConfig): The configuration to use for this metric 21 | value (float): The value to record 22 | timeMs (int): The POSIX time in milliseconds this value occurred 23 | """ 24 | raise NotImplementedError 25 | -------------------------------------------------------------------------------- /kafka/metrics/stats/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.avg import Avg 4 | from kafka.metrics.stats.count import Count 5 | from kafka.metrics.stats.histogram import Histogram 6 | from kafka.metrics.stats.max_stat import Max 7 | from kafka.metrics.stats.min_stat import Min 8 | from kafka.metrics.stats.percentile import Percentile 9 | from kafka.metrics.stats.percentiles import Percentiles 10 | from kafka.metrics.stats.rate import Rate 11 | from kafka.metrics.stats.sensor import Sensor 12 | from kafka.metrics.stats.total import Total 13 | 14 | __all__ = [ 15 | 'Avg', 'Count', 'Histogram', 'Max', 'Min', 'Percentile', 'Percentiles', 16 | 'Rate', 'Sensor', 'Total' 17 | ] 18 | -------------------------------------------------------------------------------- /kafka/metrics/stats/avg.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Avg(AbstractSampledStat): 7 | """ 8 | An AbstractSampledStat that maintains a simple average over its samples. 9 | """ 10 | __slots__ = ('_initial_value', '_samples', '_current') 11 | 12 | def __init__(self): 13 | super(Avg, self).__init__(0.0) 14 | 15 | def update(self, sample, config, value, now): 16 | sample.value += value 17 | 18 | def combine(self, samples, config, now): 19 | total_sum = 0 20 | total_count = 0 21 | for sample in samples: 22 | total_sum += sample.value 23 | total_count += sample.event_count 24 | if not total_count: 25 | return 0 26 | return float(total_sum) / total_count 27 | -------------------------------------------------------------------------------- /kafka/metrics/stats/count.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Count(AbstractSampledStat): 7 | """ 8 | An AbstractSampledStat that maintains a simple count of what it has seen. 9 | """ 10 | __slots__ = ('_initial_value', '_samples', '_current') 11 | 12 | def __init__(self): 13 | super(Count, self).__init__(0.0) 14 | 15 | def update(self, sample, config, value, now): 16 | sample.value += 1.0 17 | 18 | def combine(self, samples, config, now): 19 | return float(sum(sample.value for sample in samples)) 20 | -------------------------------------------------------------------------------- /kafka/metrics/stats/max_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 4 | 5 | 6 | class Max(AbstractSampledStat): 7 | """An AbstractSampledStat that gives the max over its samples.""" 8 | __slots__ = ('_initial_value', '_samples', '_current') 9 | 10 | def __init__(self): 11 | super(Max, self).__init__(float('-inf')) 12 | 13 | def update(self, sample, config, value, now): 14 | sample.value = max(sample.value, value) 15 | 16 | def combine(self, samples, config, now): 17 | if not samples: 18 | return float('-inf') 19 | return float(max(sample.value for sample in samples)) 20 | -------------------------------------------------------------------------------- /kafka/metrics/stats/min_stat.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import sys 4 | 5 | from kafka.metrics.stats.sampled_stat import AbstractSampledStat 6 | 7 | 8 | class Min(AbstractSampledStat): 9 | """An AbstractSampledStat that gives the min over its samples.""" 10 | __slots__ = ('_initial_value', '_samples', '_current') 11 | 12 | def __init__(self): 13 | super(Min, self).__init__(float(sys.maxsize)) 14 | 15 | def update(self, sample, config, value, now): 16 | sample.value = min(sample.value, value) 17 | 18 | def combine(self, samples, config, now): 19 | if not samples: 20 | return float(sys.maxsize) 21 | return float(min(sample.value for sample in samples)) 22 | -------------------------------------------------------------------------------- /kafka/metrics/stats/percentile.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class Percentile(object): 5 | __slots__ = ('_metric_name', '_percentile') 6 | 7 | def __init__(self, metric_name, percentile): 8 | self._metric_name = metric_name 9 | self._percentile = float(percentile) 10 | 11 | @property 12 | def name(self): 13 | return self._metric_name 14 | 15 | @property 16 | def percentile(self): 17 | return self._percentile 18 | -------------------------------------------------------------------------------- /kafka/metrics/stats/total.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.metrics.measurable_stat import AbstractMeasurableStat 4 | 5 | 6 | class Total(AbstractMeasurableStat): 7 | """An un-windowed cumulative total maintained over all time.""" 8 | __slots__ = ('_total') 9 | 10 | def __init__(self, value=0.0): 11 | self._total = value 12 | 13 | def record(self, config, value, now): 14 | self._total += value 15 | 16 | def measure(self, config, now): 17 | return float(self._total) 18 | -------------------------------------------------------------------------------- /kafka/partitioner/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.partitioner.default import DefaultPartitioner, murmur2 4 | 5 | 6 | __all__ = [ 7 | 'DefaultPartitioner', 'murmur2' 8 | ] 9 | -------------------------------------------------------------------------------- /kafka/producer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.producer.kafka import KafkaProducer 4 | 5 | __all__ = [ 6 | 'KafkaProducer' 7 | ] 8 | -------------------------------------------------------------------------------- /kafka/protocol/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | API_KEYS = { 5 | 0: 'Produce', 6 | 1: 'Fetch', 7 | 2: 'ListOffsets', 8 | 3: 'Metadata', 9 | 4: 'LeaderAndIsr', 10 | 5: 'StopReplica', 11 | 6: 'UpdateMetadata', 12 | 7: 'ControlledShutdown', 13 | 8: 'OffsetCommit', 14 | 9: 'OffsetFetch', 15 | 10: 'FindCoordinator', 16 | 11: 'JoinGroup', 17 | 12: 'Heartbeat', 18 | 13: 'LeaveGroup', 19 | 14: 'SyncGroup', 20 | 15: 'DescribeGroups', 21 | 16: 'ListGroups', 22 | 17: 'SaslHandshake', 23 | 18: 'ApiVersions', 24 | 19: 'CreateTopics', 25 | 20: 'DeleteTopics', 26 | 21: 'DeleteRecords', 27 | 22: 'InitProducerId', 28 | 23: 'OffsetForLeaderEpoch', 29 | 24: 'AddPartitionsToTxn', 30 | 25: 'AddOffsetsToTxn', 31 | 26: 'EndTxn', 32 | 27: 'WriteTxnMarkers', 33 | 28: 'TxnOffsetCommit', 34 | 29: 'DescribeAcls', 35 | 30: 'CreateAcls', 36 | 31: 'DeleteAcls', 37 | 32: 'DescribeConfigs', 38 | 33: 'AlterConfigs', 39 | 36: 'SaslAuthenticate', 40 | 37: 'CreatePartitions', 41 | 38: 'CreateDelegationToken', 42 | 39: 'RenewDelegationToken', 43 | 40: 'ExpireDelegationToken', 44 | 41: 'DescribeDelegationToken', 45 | 42: 'DeleteGroups', 46 | 45: 'AlterPartitionReassignments', 47 | 46: 'ListPartitionReassignments', 48 | 48: 'DescribeClientQuotas', 49 | } 50 | -------------------------------------------------------------------------------- /kafka/protocol/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.vendor.six import add_metaclass 6 | 7 | 8 | @add_metaclass(abc.ABCMeta) 9 | class AbstractType(object): 10 | @abc.abstractmethod 11 | def encode(cls, value): # pylint: disable=no-self-argument 12 | pass 13 | 14 | @abc.abstractmethod 15 | def decode(cls, data): # pylint: disable=no-self-argument 16 | pass 17 | 18 | @classmethod 19 | def repr(cls, value): 20 | return repr(value) 21 | -------------------------------------------------------------------------------- /kafka/protocol/add_offsets_to_txn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Int16, Int32, Int64, Schema, String 5 | 6 | 7 | class AddOffsetsToTxnResponse_v0(Response): 8 | API_KEY = 25 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('throttle_time_ms', Int32), 12 | ('error_code', Int16), 13 | ) 14 | 15 | 16 | class AddOffsetsToTxnResponse_v1(Response): 17 | API_KEY = 25 18 | API_VERSION = 1 19 | SCHEMA = AddOffsetsToTxnResponse_v0.SCHEMA 20 | 21 | 22 | class AddOffsetsToTxnResponse_v2(Response): 23 | API_KEY = 25 24 | API_VERSION = 2 25 | SCHEMA = AddOffsetsToTxnResponse_v1.SCHEMA 26 | 27 | 28 | class AddOffsetsToTxnRequest_v0(Request): 29 | API_KEY = 25 30 | API_VERSION = 0 31 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v0 32 | SCHEMA = Schema( 33 | ('transactional_id', String('utf-8')), 34 | ('producer_id', Int64), 35 | ('producer_epoch', Int16), 36 | ('group_id', String('utf-8')), 37 | ) 38 | 39 | 40 | class AddOffsetsToTxnRequest_v1(Request): 41 | API_KEY = 25 42 | API_VERSION = 1 43 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v1 44 | SCHEMA = AddOffsetsToTxnRequest_v0.SCHEMA 45 | 46 | 47 | class AddOffsetsToTxnRequest_v2(Request): 48 | API_KEY = 25 49 | API_VERSION = 2 50 | RESPONSE_TYPE = AddOffsetsToTxnResponse_v2 51 | SCHEMA = AddOffsetsToTxnRequest_v1.SCHEMA 52 | 53 | 54 | AddOffsetsToTxnRequest = [ 55 | AddOffsetsToTxnRequest_v0, AddOffsetsToTxnRequest_v1, AddOffsetsToTxnRequest_v2, 56 | ] 57 | AddOffsetsToTxnResponse = [ 58 | AddOffsetsToTxnResponse_v0, AddOffsetsToTxnResponse_v1, AddOffsetsToTxnResponse_v2, 59 | ] 60 | -------------------------------------------------------------------------------- /kafka/protocol/add_partitions_to_txn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String 5 | 6 | 7 | class AddPartitionsToTxnResponse_v0(Response): 8 | API_KEY = 24 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('throttle_time_ms', Int32), 12 | ('results', Array( 13 | ('topic', String('utf-8')), 14 | ('partitions', Array( 15 | ('partition', Int32), 16 | ('error_code', Int16)))))) 17 | 18 | 19 | class AddPartitionsToTxnResponse_v1(Response): 20 | API_KEY = 24 21 | API_VERSION = 1 22 | SCHEMA = AddPartitionsToTxnResponse_v0.SCHEMA 23 | 24 | 25 | class AddPartitionsToTxnResponse_v2(Response): 26 | API_KEY = 24 27 | API_VERSION = 2 28 | SCHEMA = AddPartitionsToTxnResponse_v1.SCHEMA 29 | 30 | 31 | class AddPartitionsToTxnRequest_v0(Request): 32 | API_KEY = 24 33 | API_VERSION = 0 34 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v0 35 | SCHEMA = Schema( 36 | ('transactional_id', String('utf-8')), 37 | ('producer_id', Int64), 38 | ('producer_epoch', Int16), 39 | ('topics', Array( 40 | ('topic', String('utf-8')), 41 | ('partitions', Array(Int32))))) 42 | 43 | 44 | class AddPartitionsToTxnRequest_v1(Request): 45 | API_KEY = 24 46 | API_VERSION = 1 47 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v1 48 | SCHEMA = AddPartitionsToTxnRequest_v0.SCHEMA 49 | 50 | 51 | class AddPartitionsToTxnRequest_v2(Request): 52 | API_KEY = 24 53 | API_VERSION = 2 54 | RESPONSE_TYPE = AddPartitionsToTxnResponse_v2 55 | SCHEMA = AddPartitionsToTxnRequest_v1.SCHEMA 56 | 57 | 58 | AddPartitionsToTxnRequest = [ 59 | AddPartitionsToTxnRequest_v0, AddPartitionsToTxnRequest_v1, AddPartitionsToTxnRequest_v2, 60 | ] 61 | AddPartitionsToTxnResponse = [ 62 | AddPartitionsToTxnResponse_v0, AddPartitionsToTxnResponse_v1, AddPartitionsToTxnResponse_v2, 63 | ] 64 | -------------------------------------------------------------------------------- /kafka/protocol/end_txn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Boolean, Int16, Int32, Int64, Schema, String 5 | 6 | 7 | class EndTxnResponse_v0(Response): 8 | API_KEY = 26 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('throttle_time_ms', Int32), 12 | ('error_code', Int16), 13 | ) 14 | 15 | 16 | class EndTxnResponse_v1(Response): 17 | API_KEY = 26 18 | API_VERSION = 1 19 | SCHEMA = EndTxnResponse_v0.SCHEMA 20 | 21 | 22 | class EndTxnResponse_v2(Response): 23 | API_KEY = 26 24 | API_VERSION = 2 25 | SCHEMA = EndTxnResponse_v1.SCHEMA 26 | 27 | 28 | class EndTxnRequest_v0(Request): 29 | API_KEY = 26 30 | API_VERSION = 0 31 | RESPONSE_TYPE = EndTxnResponse_v0 32 | SCHEMA = Schema( 33 | ('transactional_id', String('utf-8')), 34 | ('producer_id', Int64), 35 | ('producer_epoch', Int16), 36 | ('committed', Boolean)) 37 | 38 | 39 | class EndTxnRequest_v1(Request): 40 | API_KEY = 26 41 | API_VERSION = 1 42 | RESPONSE_TYPE = EndTxnResponse_v1 43 | SCHEMA = EndTxnRequest_v0.SCHEMA 44 | 45 | 46 | class EndTxnRequest_v2(Request): 47 | API_KEY = 26 48 | API_VERSION = 2 49 | RESPONSE_TYPE = EndTxnResponse_v2 50 | SCHEMA = EndTxnRequest_v1.SCHEMA 51 | 52 | 53 | EndTxnRequest = [ 54 | EndTxnRequest_v0, EndTxnRequest_v1, EndTxnRequest_v2, 55 | ] 56 | EndTxnResponse = [ 57 | EndTxnResponse_v0, EndTxnResponse_v1, EndTxnResponse_v2, 58 | ] 59 | -------------------------------------------------------------------------------- /kafka/protocol/find_coordinator.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Int8, Int16, Int32, Schema, String 5 | 6 | 7 | class FindCoordinatorResponse_v0(Response): 8 | API_KEY = 10 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('error_code', Int16), 12 | ('coordinator_id', Int32), 13 | ('host', String('utf-8')), 14 | ('port', Int32) 15 | ) 16 | 17 | 18 | class FindCoordinatorResponse_v1(Response): 19 | API_KEY = 10 20 | API_VERSION = 1 21 | SCHEMA = Schema( 22 | ('throttle_time_ms', Int32), 23 | ('error_code', Int16), 24 | ('error_message', String('utf-8')), 25 | ('coordinator_id', Int32), 26 | ('host', String('utf-8')), 27 | ('port', Int32) 28 | ) 29 | 30 | 31 | class FindCoordinatorResponse_v2(Response): 32 | API_KEY = 10 33 | API_VERSION = 2 34 | SCHEMA = FindCoordinatorResponse_v1.SCHEMA 35 | 36 | 37 | class FindCoordinatorRequest_v0(Request): 38 | API_KEY = 10 39 | API_VERSION = 0 40 | RESPONSE_TYPE = FindCoordinatorResponse_v0 41 | SCHEMA = Schema( 42 | ('consumer_group', String('utf-8')) 43 | ) 44 | 45 | 46 | class FindCoordinatorRequest_v1(Request): 47 | API_KEY = 10 48 | API_VERSION = 1 49 | RESPONSE_TYPE = FindCoordinatorResponse_v1 50 | SCHEMA = Schema( 51 | ('coordinator_key', String('utf-8')), 52 | ('coordinator_type', Int8) # 0: consumer, 1: transaction 53 | ) 54 | 55 | 56 | class FindCoordinatorRequest_v2(Request): 57 | API_KEY = 10 58 | API_VERSION = 2 59 | RESPONSE_TYPE = FindCoordinatorResponse_v2 60 | SCHEMA = FindCoordinatorRequest_v1.SCHEMA 61 | 62 | 63 | FindCoordinatorRequest = [FindCoordinatorRequest_v0, FindCoordinatorRequest_v1, FindCoordinatorRequest_v2] 64 | FindCoordinatorResponse = [FindCoordinatorResponse_v0, FindCoordinatorResponse_v1, FindCoordinatorResponse_v2] 65 | -------------------------------------------------------------------------------- /kafka/protocol/frame.py: -------------------------------------------------------------------------------- 1 | class KafkaBytes(bytearray): 2 | def __init__(self, size): 3 | super(KafkaBytes, self).__init__(size) 4 | self._idx = 0 5 | 6 | def read(self, nbytes=None): 7 | if nbytes is None: 8 | nbytes = len(self) - self._idx 9 | start = self._idx 10 | self._idx += nbytes 11 | if self._idx > len(self): 12 | self._idx = len(self) 13 | return bytes(self[start:self._idx]) 14 | 15 | def write(self, data): 16 | start = self._idx 17 | self._idx += len(data) 18 | self[start:self._idx] = data 19 | 20 | def seek(self, idx): 21 | self._idx = idx 22 | 23 | def tell(self): 24 | return self._idx 25 | 26 | def __str__(self): 27 | return 'KafkaBytes(%d)' % len(self) 28 | 29 | def __repr__(self): 30 | return str(self) 31 | -------------------------------------------------------------------------------- /kafka/protocol/init_producer_id.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Int16, Int32, Int64, Schema, String 5 | 6 | 7 | class InitProducerIdResponse_v0(Response): 8 | API_KEY = 22 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('throttle_time_ms', Int32), 12 | ('error_code', Int16), 13 | ('producer_id', Int64), 14 | ('producer_epoch', Int16), 15 | ) 16 | 17 | 18 | class InitProducerIdResponse_v1(Response): 19 | API_KEY = 22 20 | API_VERSION = 1 21 | SCHEMA = InitProducerIdResponse_v0.SCHEMA 22 | 23 | 24 | class InitProducerIdRequest_v0(Request): 25 | API_KEY = 22 26 | API_VERSION = 0 27 | RESPONSE_TYPE = InitProducerIdResponse_v0 28 | SCHEMA = Schema( 29 | ('transactional_id', String('utf-8')), 30 | ('transaction_timeout_ms', Int32), 31 | ) 32 | 33 | 34 | class InitProducerIdRequest_v1(Request): 35 | API_KEY = 22 36 | API_VERSION = 1 37 | RESPONSE_TYPE = InitProducerIdResponse_v1 38 | SCHEMA = InitProducerIdRequest_v0.SCHEMA 39 | 40 | 41 | InitProducerIdRequest = [ 42 | InitProducerIdRequest_v0, InitProducerIdRequest_v1, 43 | ] 44 | InitProducerIdResponse = [ 45 | InitProducerIdResponse_v0, InitProducerIdResponse_v1, 46 | ] 47 | -------------------------------------------------------------------------------- /kafka/protocol/pickle.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | try: 4 | import copyreg # pylint: disable=import-error 5 | except ImportError: 6 | import copy_reg as copyreg # pylint: disable=import-error 7 | 8 | import types 9 | 10 | 11 | def _pickle_method(method): 12 | try: 13 | func_name = method.__func__.__name__ 14 | obj = method.__self__ 15 | cls = method.__self__.__class__ 16 | except AttributeError: 17 | func_name = method.im_func.__name__ 18 | obj = method.im_self 19 | cls = method.im_class 20 | 21 | return _unpickle_method, (func_name, obj, cls) 22 | 23 | 24 | def _unpickle_method(func_name, obj, cls): 25 | for cls in cls.mro(): 26 | try: 27 | func = cls.__dict__[func_name] 28 | except KeyError: 29 | pass 30 | else: 31 | break 32 | return func.__get__(obj, cls) 33 | 34 | # https://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods 35 | copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method) 36 | -------------------------------------------------------------------------------- /kafka/protocol/sasl_authenticate.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Bytes, Int16, Int64, Schema, String 5 | 6 | 7 | class SaslAuthenticateResponse_v0(Response): 8 | API_KEY = 36 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('error_code', Int16), 12 | ('error_message', String('utf-8')), 13 | ('auth_bytes', Bytes)) 14 | 15 | 16 | class SaslAuthenticateResponse_v1(Response): 17 | API_KEY = 36 18 | API_VERSION = 1 19 | SCHEMA = Schema( 20 | ('error_code', Int16), 21 | ('error_message', String('utf-8')), 22 | ('auth_bytes', Bytes), 23 | ('session_lifetime_ms', Int64)) 24 | 25 | 26 | class SaslAuthenticateRequest_v0(Request): 27 | API_KEY = 36 28 | API_VERSION = 0 29 | RESPONSE_TYPE = SaslAuthenticateResponse_v0 30 | SCHEMA = Schema( 31 | ('auth_bytes', Bytes)) 32 | 33 | 34 | class SaslAuthenticateRequest_v1(Request): 35 | API_KEY = 36 36 | API_VERSION = 1 37 | RESPONSE_TYPE = SaslAuthenticateResponse_v1 38 | SCHEMA = SaslAuthenticateRequest_v0.SCHEMA 39 | 40 | 41 | SaslAuthenticateRequest = [SaslAuthenticateRequest_v0, SaslAuthenticateRequest_v1] 42 | SaslAuthenticateResponse = [SaslAuthenticateResponse_v0, SaslAuthenticateResponse_v1] 43 | -------------------------------------------------------------------------------- /kafka/protocol/sasl_handshake.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Array, Int16, Schema, String 5 | 6 | 7 | class SaslHandshakeResponse_v0(Response): 8 | API_KEY = 17 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('error_code', Int16), 12 | ('enabled_mechanisms', Array(String('utf-8'))) 13 | ) 14 | 15 | 16 | class SaslHandshakeResponse_v1(Response): 17 | API_KEY = 17 18 | API_VERSION = 1 19 | SCHEMA = SaslHandshakeResponse_v0.SCHEMA 20 | 21 | 22 | class SaslHandshakeRequest_v0(Request): 23 | API_KEY = 17 24 | API_VERSION = 0 25 | RESPONSE_TYPE = SaslHandshakeResponse_v0 26 | SCHEMA = Schema( 27 | ('mechanism', String('utf-8')) 28 | ) 29 | 30 | 31 | class SaslHandshakeRequest_v1(Request): 32 | API_KEY = 17 33 | API_VERSION = 1 34 | RESPONSE_TYPE = SaslHandshakeResponse_v1 35 | SCHEMA = SaslHandshakeRequest_v0.SCHEMA 36 | 37 | 38 | SaslHandshakeRequest = [SaslHandshakeRequest_v0, SaslHandshakeRequest_v1] 39 | SaslHandshakeResponse = [SaslHandshakeResponse_v0, SaslHandshakeResponse_v1] 40 | -------------------------------------------------------------------------------- /kafka/protocol/struct.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from io import BytesIO 4 | 5 | from kafka.protocol.abstract import AbstractType 6 | from kafka.protocol.types import Schema 7 | 8 | from kafka.util import WeakMethod 9 | 10 | 11 | class Struct(AbstractType): 12 | SCHEMA = Schema() 13 | 14 | def __init__(self, *args, **kwargs): 15 | if len(args) == len(self.SCHEMA.fields): 16 | for i, name in enumerate(self.SCHEMA.names): 17 | self.__dict__[name] = args[i] 18 | elif len(args) > 0: 19 | raise ValueError('Args must be empty or mirror schema') 20 | else: 21 | for name in self.SCHEMA.names: 22 | self.__dict__[name] = kwargs.pop(name, None) 23 | if kwargs: 24 | raise ValueError('Keyword(s) not in schema %s: %s' 25 | % (list(self.SCHEMA.names), 26 | ', '.join(kwargs.keys()))) 27 | 28 | # overloading encode() to support both class and instance 29 | # Without WeakMethod() this creates circular ref, which 30 | # causes instances to "leak" to garbage 31 | self.encode = WeakMethod(self._encode_self) 32 | 33 | 34 | @classmethod 35 | def encode(cls, item): # pylint: disable=E0202 36 | bits = [] 37 | for i, field in enumerate(cls.SCHEMA.fields): 38 | bits.append(field.encode(item[i])) 39 | return b''.join(bits) 40 | 41 | def _encode_self(self): 42 | return self.SCHEMA.encode( 43 | [self.__dict__[name] for name in self.SCHEMA.names] 44 | ) 45 | 46 | @classmethod 47 | def decode(cls, data): 48 | if isinstance(data, bytes): 49 | data = BytesIO(data) 50 | return cls(*[field.decode(data) for field in cls.SCHEMA.fields]) 51 | 52 | def get_item(self, name): 53 | if name not in self.SCHEMA.names: 54 | raise KeyError("%s is not in the schema" % name) 55 | return self.__dict__[name] 56 | 57 | def __repr__(self): 58 | key_vals = [] 59 | for name, field in zip(self.SCHEMA.names, self.SCHEMA.fields): 60 | key_vals.append('%s=%s' % (name, field.repr(self.__dict__[name]))) 61 | return self.__class__.__name__ + '(' + ', '.join(key_vals) + ')' 62 | 63 | def __hash__(self): 64 | return hash(self.encode()) 65 | 66 | def __eq__(self, other): 67 | if self.SCHEMA != other.SCHEMA: 68 | return False 69 | for attr in self.SCHEMA.names: 70 | if self.__dict__[attr] != other.__dict__[attr]: 71 | return False 72 | return True 73 | -------------------------------------------------------------------------------- /kafka/protocol/txn_offset_commit.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.protocol.api import Request, Response 4 | from kafka.protocol.types import Array, Int16, Int32, Int64, Schema, String 5 | 6 | 7 | class TxnOffsetCommitResponse_v0(Response): 8 | API_KEY = 28 9 | API_VERSION = 0 10 | SCHEMA = Schema( 11 | ('throttle_time_ms', Int32), 12 | ('topics', Array( 13 | ('topic', String('utf-8')), 14 | ('partitions', Array( 15 | ('partition', Int32), 16 | ('error_code', Int16)))))) 17 | 18 | 19 | class TxnOffsetCommitResponse_v1(Response): 20 | API_KEY = 28 21 | API_VERSION = 1 22 | SCHEMA = TxnOffsetCommitResponse_v0.SCHEMA 23 | 24 | 25 | class TxnOffsetCommitResponse_v2(Response): 26 | API_KEY = 28 27 | API_VERSION = 2 28 | SCHEMA = TxnOffsetCommitResponse_v1.SCHEMA 29 | 30 | 31 | class TxnOffsetCommitRequest_v0(Request): 32 | API_KEY = 28 33 | API_VERSION = 0 34 | RESPONSE_TYPE = TxnOffsetCommitResponse_v0 35 | SCHEMA = Schema( 36 | ('transactional_id', String('utf-8')), 37 | ('group_id', String('utf-8')), 38 | ('producer_id', Int64), 39 | ('producer_epoch', Int16), 40 | ('topics', Array( 41 | ('topic', String('utf-8')), 42 | ('partitions', Array( 43 | ('partition', Int32), 44 | ('offset', Int64), 45 | ('metadata', String('utf-8'))))))) 46 | 47 | 48 | class TxnOffsetCommitRequest_v1(Request): 49 | API_KEY = 28 50 | API_VERSION = 1 51 | RESPONSE_TYPE = TxnOffsetCommitResponse_v1 52 | SCHEMA = TxnOffsetCommitRequest_v0.SCHEMA 53 | 54 | 55 | class TxnOffsetCommitRequest_v2(Request): 56 | API_KEY = 28 57 | API_VERSION = 2 58 | RESPONSE_TYPE = TxnOffsetCommitResponse_v2 59 | SCHEMA = Schema( 60 | ('transactional_id', String('utf-8')), 61 | ('group_id', String('utf-8')), 62 | ('producer_id', Int64), 63 | ('producer_epoch', Int16), 64 | ('topics', Array( 65 | ('topic', String('utf-8')), 66 | ('partitions', Array( 67 | ('partition', Int32), 68 | ('offset', Int64), 69 | ('leader_epoch', Int32), 70 | ('metadata', String('utf-8'))))))) 71 | 72 | 73 | TxnOffsetCommitRequest = [ 74 | TxnOffsetCommitRequest_v0, TxnOffsetCommitRequest_v1, TxnOffsetCommitRequest_v2, 75 | ] 76 | TxnOffsetCommitResponse = [ 77 | TxnOffsetCommitResponse_v0, TxnOffsetCommitResponse_v1, TxnOffsetCommitResponse_v2, 78 | ] 79 | -------------------------------------------------------------------------------- /kafka/record/README: -------------------------------------------------------------------------------- 1 | Module structured mostly based on 2 | kafka/clients/src/main/java/org/apache/kafka/common/record/ module of Java 3 | Client. 4 | 5 | See abc.py for abstract declarations. `ABCRecords` is used as a facade to hide 6 | version differences. `ABCRecordBatch` subclasses will implement actual parsers 7 | for different versions (v0/v1 as LegacyBatch and v2 as DefaultBatch. Names 8 | taken from Java). 9 | -------------------------------------------------------------------------------- /kafka/record/__init__.py: -------------------------------------------------------------------------------- 1 | from kafka.record.memory_records import MemoryRecords, MemoryRecordsBuilder 2 | 3 | __all__ = ["MemoryRecords", "MemoryRecordsBuilder"] 4 | -------------------------------------------------------------------------------- /kafka/sasl/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import platform 4 | 5 | from kafka.sasl.gssapi import SaslMechanismGSSAPI 6 | from kafka.sasl.msk import SaslMechanismAwsMskIam 7 | from kafka.sasl.oauth import SaslMechanismOAuth 8 | from kafka.sasl.plain import SaslMechanismPlain 9 | from kafka.sasl.scram import SaslMechanismScram 10 | from kafka.sasl.sspi import SaslMechanismSSPI 11 | 12 | 13 | SASL_MECHANISMS = {} 14 | 15 | 16 | def register_sasl_mechanism(name, klass, overwrite=False): 17 | if not overwrite and name in SASL_MECHANISMS: 18 | raise ValueError('Sasl mechanism %s already defined!' % name) 19 | SASL_MECHANISMS[name] = klass 20 | 21 | 22 | def get_sasl_mechanism(name): 23 | return SASL_MECHANISMS[name] 24 | 25 | 26 | register_sasl_mechanism('AWS_MSK_IAM', SaslMechanismAwsMskIam) 27 | if platform.system() == 'Windows': 28 | register_sasl_mechanism('GSSAPI', SaslMechanismSSPI) 29 | else: 30 | register_sasl_mechanism('GSSAPI', SaslMechanismGSSAPI) 31 | register_sasl_mechanism('OAUTHBEARER', SaslMechanismOAuth) 32 | register_sasl_mechanism('PLAIN', SaslMechanismPlain) 33 | register_sasl_mechanism('SCRAM-SHA-256', SaslMechanismScram) 34 | register_sasl_mechanism('SCRAM-SHA-512', SaslMechanismScram) 35 | -------------------------------------------------------------------------------- /kafka/sasl/abc.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | from kafka.vendor.six import add_metaclass 6 | 7 | 8 | @add_metaclass(abc.ABCMeta) 9 | class SaslMechanism(object): 10 | @abc.abstractmethod 11 | def __init__(self, **config): 12 | pass 13 | 14 | @abc.abstractmethod 15 | def auth_bytes(self): 16 | pass 17 | 18 | @abc.abstractmethod 19 | def receive(self, auth_bytes): 20 | pass 21 | 22 | @abc.abstractmethod 23 | def is_done(self): 24 | pass 25 | 26 | @abc.abstractmethod 27 | def is_authenticated(self): 28 | pass 29 | 30 | def auth_details(self): 31 | if not self.is_authenticated: 32 | raise RuntimeError('Not authenticated yet!') 33 | return 'Authenticated via SASL' 34 | -------------------------------------------------------------------------------- /kafka/sasl/plain.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import logging 4 | 5 | from kafka.sasl.abc import SaslMechanism 6 | 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class SaslMechanismPlain(SaslMechanism): 12 | 13 | def __init__(self, **config): 14 | if config.get('security_protocol', '') == 'SASL_PLAINTEXT': 15 | log.warning('Sending username and password in the clear') 16 | assert 'sasl_plain_username' in config, 'sasl_plain_username required for PLAIN sasl' 17 | assert 'sasl_plain_password' in config, 'sasl_plain_password required for PLAIN sasl' 18 | 19 | self.username = config['sasl_plain_username'] 20 | self.password = config['sasl_plain_password'] 21 | self._is_done = False 22 | self._is_authenticated = False 23 | 24 | def auth_bytes(self): 25 | # Send PLAIN credentials per RFC-4616 26 | return bytes('\0'.join([self.username, self.username, self.password]).encode('utf-8')) 27 | 28 | def receive(self, auth_bytes): 29 | self._is_done = True 30 | self._is_authenticated = auth_bytes == b'' 31 | 32 | def is_done(self): 33 | return self._is_done 34 | 35 | def is_authenticated(self): 36 | return self._is_authenticated 37 | 38 | def auth_details(self): 39 | if not self.is_authenticated: 40 | raise RuntimeError('Not authenticated yet!') 41 | return 'Authenticated as %s via SASL / Plain' % self.username 42 | -------------------------------------------------------------------------------- /kafka/serializer/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from kafka.serializer.abstract import Serializer, Deserializer 4 | -------------------------------------------------------------------------------- /kafka/serializer/abstract.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import abc 4 | 5 | 6 | class Serializer(object): 7 | __meta__ = abc.ABCMeta 8 | 9 | def __init__(self, **config): 10 | pass 11 | 12 | @abc.abstractmethod 13 | def serialize(self, topic, value): 14 | pass 15 | 16 | def close(self): 17 | pass 18 | 19 | 20 | class Deserializer(object): 21 | __meta__ = abc.ABCMeta 22 | 23 | def __init__(self, **config): 24 | pass 25 | 26 | @abc.abstractmethod 27 | def deserialize(self, topic, bytes_): 28 | pass 29 | 30 | def close(self): 31 | pass 32 | -------------------------------------------------------------------------------- /kafka/vendor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/kafka/vendor/__init__.py -------------------------------------------------------------------------------- /kafka/vendor/socketpair.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | # vendored from https://github.com/mhils/backports.socketpair 3 | from __future__ import absolute_import 4 | 5 | import sys 6 | import socket 7 | import errno 8 | 9 | _LOCALHOST = '127.0.0.1' 10 | _LOCALHOST_V6 = '::1' 11 | 12 | if not hasattr(socket, "socketpair"): 13 | # Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. 14 | def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): 15 | if family == socket.AF_INET: 16 | host = _LOCALHOST 17 | elif family == socket.AF_INET6: 18 | host = _LOCALHOST_V6 19 | else: 20 | raise ValueError("Only AF_INET and AF_INET6 socket address families " 21 | "are supported") 22 | if type != socket.SOCK_STREAM: 23 | raise ValueError("Only SOCK_STREAM socket type is supported") 24 | if proto != 0: 25 | raise ValueError("Only protocol zero is supported") 26 | 27 | # We create a connected TCP socket. Note the trick with 28 | # setblocking(False) that prevents us from having to create a thread. 29 | lsock = socket.socket(family, type, proto) 30 | try: 31 | lsock.bind((host, 0)) 32 | lsock.listen(min(socket.SOMAXCONN, 128)) 33 | # On IPv6, ignore flow_info and scope_id 34 | addr, port = lsock.getsockname()[:2] 35 | csock = socket.socket(family, type, proto) 36 | try: 37 | csock.setblocking(False) 38 | if sys.version_info >= (3, 0): 39 | try: 40 | csock.connect((addr, port)) 41 | except (BlockingIOError, InterruptedError): 42 | pass 43 | else: 44 | try: 45 | csock.connect((addr, port)) 46 | except socket.error as e: 47 | if e.errno != errno.WSAEWOULDBLOCK: 48 | raise 49 | csock.setblocking(True) 50 | ssock, _ = lsock.accept() 51 | except Exception: 52 | csock.close() 53 | raise 54 | finally: 55 | lsock.close() 56 | 57 | # Authenticating avoids using a connection from something else 58 | # able to connect to {host}:{port} instead of us. 59 | # We expect only AF_INET and AF_INET6 families. 60 | try: 61 | if ( 62 | ssock.getsockname() != csock.getpeername() 63 | or csock.getsockname() != ssock.getpeername() 64 | ): 65 | raise ConnectionError("Unexpected peer connection") 66 | except: 67 | # getsockname() and getpeername() can fail 68 | # if either socket isn't connected. 69 | ssock.close() 70 | csock.close() 71 | raise 72 | 73 | return (ssock, csock) 74 | 75 | socket.socketpair = socketpair 76 | -------------------------------------------------------------------------------- /kafka/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '2.2.10' 2 | -------------------------------------------------------------------------------- /pylint.rc: -------------------------------------------------------------------------------- 1 | [TYPECHECK] 2 | ignored-classes=SyncManager,_socketobject 3 | ignored-modules=kafka.vendor.six.moves 4 | generated-members=py.* 5 | 6 | [MESSAGES CONTROL] 7 | disable=E1129 8 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.2"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "kafka-python" 7 | dynamic = ["version"] 8 | authors = [{name = "Dana Powers", email = "dana.powers@gmail.com"}] 9 | description = "Pure Python client for Apache Kafka" 10 | keywords = ["apache kafka", "kafka"] 11 | readme = "README.rst" 12 | classifiers = [ 13 | "Development Status :: 5 - Production/Stable", 14 | "Intended Audience :: Developers", 15 | "License :: OSI Approved :: Apache Software License", 16 | "Programming Language :: Python", 17 | "Programming Language :: Python :: 2", 18 | "Programming Language :: Python :: 2.7", 19 | "Programming Language :: Python :: 3", 20 | "Programming Language :: Python :: 3.4", 21 | "Programming Language :: Python :: 3.5", 22 | "Programming Language :: Python :: 3.6", 23 | "Programming Language :: Python :: 3.7", 24 | "Programming Language :: Python :: 3.8", 25 | "Programming Language :: Python :: 3.9", 26 | "Programming Language :: Python :: 3.10", 27 | "Programming Language :: Python :: 3.11", 28 | "Programming Language :: Python :: 3.12", 29 | "Programming Language :: Python :: 3.13", 30 | "Programming Language :: Python :: Implementation :: CPython", 31 | "Programming Language :: Python :: Implementation :: PyPy", 32 | "Topic :: Software Development :: Libraries :: Python Modules", 33 | ] 34 | urls = {Homepage = "https://github.com/dpkp/kafka-python"} 35 | 36 | [project.optional-dependencies] 37 | crc32c = ["crc32c"] 38 | lz4 = ["lz4"] 39 | snappy = ["python-snappy"] 40 | zstd = ["zstandard"] 41 | testing = ["pytest", "mock; python_version < '3.3'", "pytest-mock", "pytest-timeout"] 42 | benchmarks = ["pyperf"] 43 | 44 | [tool.setuptools] 45 | include-package-data = false 46 | license-files = [] # workaround for https://github.com/pypa/setuptools/issues/4759 47 | 48 | [tool.setuptools.packages.find] 49 | exclude = ["test"] 50 | namespaces = false 51 | 52 | [tool.distutils.bdist_wheel] 53 | universal = 1 54 | 55 | [tool.setuptools.dynamic] 56 | version = {attr = "kafka.__version__"} 57 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | log_format = %(asctime)s.%(msecs)03d %(levelname)-8s %(thread)d:%(threadName)s %(name)-23s %(message)s 3 | log_level = DEBUG 4 | addopts = --durations=10 --timeout=300 5 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | coveralls 2 | crc32c 3 | docker-py 4 | flake8 5 | lz4 6 | mock; python_version < '3.3' 7 | py 8 | pylint 9 | pyperf 10 | pytest 11 | pytest-cov 12 | pytest-mock 13 | pytest-pylint 14 | pytest-timeout 15 | python-snappy 16 | Sphinx 17 | sphinx-rtd-theme 18 | xxhash 19 | zstandard 20 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.10.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.11.0.3/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.cleanup.interval.mins=1 52 | 53 | ############################# Zookeeper ############################# 54 | 55 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 56 | 57 | # Timeout in ms for connecting to zookeeper 58 | zookeeper.connection.timeout.ms=1000000 59 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 60 | zookeeper.session.timeout.ms=500 61 | 62 | kafka.metrics.polling.interval.secs=5 63 | kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter 64 | kafka.csv.metrics.dir={tmp_dir} 65 | kafka.csv.metrics.reporter.enabled=false 66 | 67 | log.cleanup.policy=delete 68 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/kafka.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | ############################# Server Basics ############################# 17 | 18 | broker.id={broker_id} 19 | 20 | ############################# Socket Server Settings ############################# 21 | 22 | port={port} 23 | host.name={host} 24 | 25 | num.network.threads=2 26 | num.io.threads=2 27 | 28 | socket.send.buffer.bytes=1048576 29 | socket.receive.buffer.bytes=1048576 30 | socket.request.max.bytes=104857600 31 | 32 | ############################# Log Basics ############################# 33 | 34 | log.dirs={tmp_dir}/data 35 | num.partitions={partitions} 36 | default.replication.factor={replicas} 37 | 38 | ## Short Replica Lag -- Drops failed brokers out of ISR 39 | replica.lag.time.max.ms=1000 40 | replica.socket.timeout.ms=1000 41 | 42 | ############################# Log Flush Policy ############################# 43 | 44 | log.flush.interval.messages=10000 45 | log.flush.interval.ms=1000 46 | 47 | ############################# Log Retention Policy ############################# 48 | 49 | log.retention.hours=168 50 | log.segment.bytes=536870912 51 | log.retention.check.interval.ms=60000 52 | log.cleanup.interval.mins=1 53 | log.cleaner.enable=false 54 | 55 | ############################# Zookeeper ############################# 56 | 57 | # Zookeeper connection string (see zookeeper docs for details). 58 | # This is a comma separated host:port pairs, each corresponding to a zk 59 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 60 | # You can also append an optional chroot string to the urls to specify the 61 | # root directory for all kafka znodes. 62 | zookeeper.connect={zk_host}:{zk_port}/{zk_chroot} 63 | 64 | # Timeout in ms for connecting to zookeeper 65 | zookeeper.connection.timeout.ms=1000000 66 | # We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly 67 | zookeeper.session.timeout.ms=500 68 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | dataDir={tmp_dir} 17 | clientPortAddress={host} 18 | clientPort={port} 19 | maxClientCnxns=0 20 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.8.2.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/0.9.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.0.2/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.0.2/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/1.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/1.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.0.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.0.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.1.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.1.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.2.1/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.2.1/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.3.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.3.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.4.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.4.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/2.5.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.5.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; 5 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/2.6.0/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/resources/default/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/resources/default/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, logfile 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.appender.logfile=org.apache.log4j.FileAppender 23 | log4j.appender.logfile.File=${kafka.logs.dir}/server.log 24 | log4j.appender.logfile.layout=org.apache.log4j.PatternLayout 25 | log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n 26 | -------------------------------------------------------------------------------- /servers/resources/default/sasl_command.conf: -------------------------------------------------------------------------------- 1 | security.protocol={transport} 2 | sasl.mechanism={sasl_mechanism} 3 | sasl.jaas.config={jaas_config} 4 | -------------------------------------------------------------------------------- /servers/resources/default/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | admin.enableServer=false 23 | -------------------------------------------------------------------------------- /servers/trunk/resources/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer {{ 2 | {jaas_config} 3 | }}; 4 | Client {{}}; -------------------------------------------------------------------------------- /servers/trunk/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 21 | 22 | log4j.logger.kafka=DEBUG, stdout 23 | log4j.logger.org.I0Itec.zkclient.ZkClient=INFO, stdout 24 | log4j.logger.org.apache.zookeeper=INFO, stdout 25 | -------------------------------------------------------------------------------- /servers/trunk/resources/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir={tmp_dir} 17 | # the port at which the clients will connect 18 | clientPort={port} 19 | clientPortAddress={host} 20 | # disable the per-ip limit on the number of connections since this is a non-production config 21 | maxClientCnxns=0 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # See pyproject.toml for project / build configuration 2 | from setuptools import setup 3 | 4 | setup() 5 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | # Set default logging handler to avoid "No handler found" warnings. 4 | import logging 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | from kafka.future import Future 8 | Future.error_on_callbacks = True # always fail during testing 9 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def metrics(): 8 | from kafka.metrics import Metrics 9 | 10 | metrics = Metrics() 11 | try: 12 | yield metrics 13 | finally: 14 | metrics.close() 15 | 16 | 17 | @pytest.fixture 18 | def conn(mocker): 19 | """Return a connection mocker fixture""" 20 | from kafka.conn import ConnectionStates 21 | from kafka.future import Future 22 | from kafka.protocol.metadata import MetadataResponse 23 | conn = mocker.patch('kafka.client_async.BrokerConnection') 24 | conn.return_value = conn 25 | conn.state = ConnectionStates.CONNECTED 26 | conn.send.return_value = Future().success( 27 | MetadataResponse[0]( 28 | [(0, 'foo', 12), (1, 'bar', 34)], # brokers 29 | [])) # topics 30 | conn.connection_delay.return_value = 0 31 | conn.blacked_out.return_value = False 32 | conn.next_ifr_request_timeout_ms.return_value = float('inf') 33 | def _set_conn_state(state): 34 | conn.state = state 35 | return state 36 | conn._set_conn_state = _set_conn_state 37 | conn.connect.side_effect = lambda: conn.state 38 | conn.connect_blocking.return_value = True 39 | conn.connecting = lambda: conn.state in (ConnectionStates.CONNECTING, 40 | ConnectionStates.HANDSHAKE) 41 | conn.connected = lambda: conn.state is ConnectionStates.CONNECTED 42 | conn.disconnected = lambda: conn.state is ConnectionStates.DISCONNECTED 43 | return conn 44 | 45 | 46 | @pytest.fixture 47 | def client(conn, mocker): 48 | from kafka import KafkaClient 49 | 50 | cli = KafkaClient(api_version=(0, 9)) 51 | mocker.patch.object(cli, '_init_connect', return_value=True) 52 | try: 53 | yield cli 54 | finally: 55 | cli._close() 56 | -------------------------------------------------------------------------------- /test/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dpkp/kafka-python/e6abbbf284a1556536941fb8d99fb5ca03aa1e22/test/integration/__init__.py -------------------------------------------------------------------------------- /test/sasl/test_gssapi.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | try: 4 | from unittest import mock 5 | except ImportError: 6 | import mock 7 | 8 | from kafka.sasl import get_sasl_mechanism 9 | import kafka.sasl.gssapi 10 | 11 | 12 | def test_gssapi(): 13 | config = { 14 | 'sasl_kerberos_domain_name': 'foo', 15 | 'sasl_kerberos_service_name': 'bar', 16 | } 17 | client_ctx = mock.Mock() 18 | client_ctx.step.side_effect = [b'init', b'exchange', b'complete', b'xxxx'] 19 | client_ctx.complete = False 20 | def mocked_message_wrapper(msg, *args): 21 | wrapped = mock.Mock() 22 | type(wrapped).message = mock.PropertyMock(return_value=msg) 23 | return wrapped 24 | client_ctx.unwrap.side_effect = mocked_message_wrapper 25 | client_ctx.wrap.side_effect = mocked_message_wrapper 26 | kafka.sasl.gssapi.gssapi = mock.Mock() 27 | kafka.sasl.gssapi.gssapi.SecurityContext.return_value = client_ctx 28 | gssapi = get_sasl_mechanism('GSSAPI')(**config) 29 | assert isinstance(gssapi, kafka.sasl.gssapi.SaslMechanismGSSAPI) 30 | client_ctx.step.assert_called_with(None) 31 | 32 | while not gssapi.is_done(): 33 | send_token = gssapi.auth_bytes() 34 | receive_token = send_token # not realistic, but enough for testing 35 | if send_token == b'\x00cbar@foo': # final wrapped message 36 | receive_token = b'' # final message gets an empty response 37 | gssapi.receive(receive_token) 38 | if client_ctx.step.call_count == 3: 39 | client_ctx.complete = True 40 | 41 | assert gssapi.is_done() 42 | assert gssapi.is_authenticated() 43 | -------------------------------------------------------------------------------- /test/sasl/test_msk.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import sys 4 | 5 | from kafka.sasl.msk import AwsMskIamClient 6 | 7 | try: 8 | from unittest import mock 9 | except ImportError: 10 | import mock 11 | 12 | 13 | def client_factory(token=None): 14 | if sys.version_info >= (3, 3): 15 | now = datetime.datetime.fromtimestamp(1629321911, datetime.timezone.utc) 16 | else: 17 | now = datetime.datetime.utcfromtimestamp(1629321911) 18 | with mock.patch('kafka.sasl.msk.datetime') as mock_dt: 19 | mock_dt.datetime.utcnow = mock.Mock(return_value=now) 20 | return AwsMskIamClient( 21 | host='localhost', 22 | access_key='XXXXXXXXXXXXXXXXXXXX', 23 | secret_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX', 24 | region='us-east-1', 25 | token=token, 26 | ) 27 | 28 | 29 | def test_aws_msk_iam_client_permanent_credentials(): 30 | client = client_factory(token=None) 31 | msg = client.first_message() 32 | assert msg 33 | assert isinstance(msg, bytes) 34 | actual = json.loads(msg) 35 | 36 | expected = { 37 | 'version': '2020_10_22', 38 | 'host': 'localhost', 39 | 'user-agent': 'kafka-python', 40 | 'action': 'kafka-cluster:Connect', 41 | 'x-amz-algorithm': 'AWS4-HMAC-SHA256', 42 | 'x-amz-credential': 'XXXXXXXXXXXXXXXXXXXX/20210818/us-east-1/kafka-cluster/aws4_request', 43 | 'x-amz-date': '20210818T212511Z', 44 | 'x-amz-signedheaders': 'host', 45 | 'x-amz-expires': '900', 46 | 'x-amz-signature': '0fa42ae3d5693777942a7a4028b564f0b372bafa2f71c1a19ad60680e6cb994b', 47 | } 48 | assert actual == expected 49 | 50 | 51 | def test_aws_msk_iam_client_temporary_credentials(): 52 | client = client_factory(token='XXXXX') 53 | msg = client.first_message() 54 | assert msg 55 | assert isinstance(msg, bytes) 56 | actual = json.loads(msg) 57 | 58 | expected = { 59 | 'version': '2020_10_22', 60 | 'host': 'localhost', 61 | 'user-agent': 'kafka-python', 62 | 'action': 'kafka-cluster:Connect', 63 | 'x-amz-algorithm': 'AWS4-HMAC-SHA256', 64 | 'x-amz-credential': 'XXXXXXXXXXXXXXXXXXXX/20210818/us-east-1/kafka-cluster/aws4_request', 65 | 'x-amz-date': '20210818T212511Z', 66 | 'x-amz-signedheaders': 'host', 67 | 'x-amz-expires': '900', 68 | 'x-amz-signature': 'b0619c50b7ecb4a7f6f92bd5f733770df5710e97b25146f97015c0b1db783b05', 69 | 'x-amz-security-token': 'XXXXX', 70 | } 71 | assert actual == expected 72 | -------------------------------------------------------------------------------- /test/test_acl_comparisons.py: -------------------------------------------------------------------------------- 1 | from kafka.admin.acl_resource import ACL 2 | from kafka.admin.acl_resource import ACLOperation 3 | from kafka.admin.acl_resource import ACLPermissionType 4 | from kafka.admin.acl_resource import ResourcePattern 5 | from kafka.admin.acl_resource import ResourceType 6 | from kafka.admin.acl_resource import ACLResourcePatternType 7 | 8 | 9 | def test_different_acls_are_different(): 10 | one = ACL( 11 | principal='User:A', 12 | host='*', 13 | operation=ACLOperation.ALL, 14 | permission_type=ACLPermissionType.ALLOW, 15 | resource_pattern=ResourcePattern( 16 | resource_type=ResourceType.TOPIC, 17 | resource_name='some-topic', 18 | pattern_type=ACLResourcePatternType.LITERAL 19 | ) 20 | ) 21 | 22 | two = ACL( 23 | principal='User:B', # Different principal 24 | host='*', 25 | operation=ACLOperation.ALL, 26 | permission_type=ACLPermissionType.ALLOW, 27 | resource_pattern=ResourcePattern( 28 | resource_type=ResourceType.TOPIC, 29 | resource_name='some-topic', 30 | pattern_type=ACLResourcePatternType.LITERAL 31 | ) 32 | ) 33 | 34 | assert one != two 35 | assert hash(one) != hash(two) 36 | 37 | def test_different_acls_are_different_with_glob_topics(): 38 | one = ACL( 39 | principal='User:A', 40 | host='*', 41 | operation=ACLOperation.ALL, 42 | permission_type=ACLPermissionType.ALLOW, 43 | resource_pattern=ResourcePattern( 44 | resource_type=ResourceType.TOPIC, 45 | resource_name='*', 46 | pattern_type=ACLResourcePatternType.LITERAL 47 | ) 48 | ) 49 | 50 | two = ACL( 51 | principal='User:B', # Different principal 52 | host='*', 53 | operation=ACLOperation.ALL, 54 | permission_type=ACLPermissionType.ALLOW, 55 | resource_pattern=ResourcePattern( 56 | resource_type=ResourceType.TOPIC, 57 | resource_name='*', 58 | pattern_type=ACLResourcePatternType.LITERAL 59 | ) 60 | ) 61 | 62 | assert one != two 63 | assert hash(one) != hash(two) 64 | 65 | def test_same_acls_are_same(): 66 | one = ACL( 67 | principal='User:A', 68 | host='*', 69 | operation=ACLOperation.ALL, 70 | permission_type=ACLPermissionType.ALLOW, 71 | resource_pattern=ResourcePattern( 72 | resource_type=ResourceType.TOPIC, 73 | resource_name='some-topic', 74 | pattern_type=ACLResourcePatternType.LITERAL 75 | ) 76 | ) 77 | 78 | two = ACL( 79 | principal='User:A', 80 | host='*', 81 | operation=ACLOperation.ALL, 82 | permission_type=ACLPermissionType.ALLOW, 83 | resource_pattern=ResourcePattern( 84 | resource_type=ResourceType.TOPIC, 85 | resource_name='some-topic', 86 | pattern_type=ACLResourcePatternType.LITERAL 87 | ) 88 | ) 89 | 90 | assert one == two 91 | assert hash(one) == hash(two) 92 | assert len(set((one, two))) == 1 93 | -------------------------------------------------------------------------------- /test/test_api_object_implementation.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import pytest 3 | 4 | from kafka.protocol.api import Request 5 | from kafka.protocol.api import Response 6 | 7 | 8 | attr_names = [n for n in dir(Request) if isinstance(getattr(Request, n), abc.abstractproperty)] 9 | @pytest.mark.parametrize('klass', Request.__subclasses__()) 10 | @pytest.mark.parametrize('attr_name', attr_names) 11 | def test_request_type_conformance(klass, attr_name): 12 | assert hasattr(klass, attr_name) 13 | 14 | attr_names = [n for n in dir(Response) if isinstance(getattr(Response, n), abc.abstractproperty)] 15 | @pytest.mark.parametrize('klass', Response.__subclasses__()) 16 | @pytest.mark.parametrize('attr_name', attr_names) 17 | def test_response_type_conformance(klass, attr_name): 18 | assert hasattr(klass, attr_name) 19 | -------------------------------------------------------------------------------- /test/test_consumer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import pytest 4 | 5 | from kafka import KafkaConsumer, TopicPartition 6 | from kafka.errors import KafkaConfigurationError, IllegalStateError 7 | 8 | 9 | def test_session_timeout_larger_than_request_timeout_raises(): 10 | with pytest.raises(KafkaConfigurationError): 11 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), group_id='foo', session_timeout_ms=50000, request_timeout_ms=40000) 12 | 13 | 14 | def test_fetch_max_wait_larger_than_request_timeout_raises(): 15 | with pytest.raises(KafkaConfigurationError): 16 | KafkaConsumer(bootstrap_servers='localhost:9092', fetch_max_wait_ms=50000, request_timeout_ms=40000) 17 | 18 | 19 | def test_request_timeout_larger_than_connections_max_idle_ms_raises(): 20 | with pytest.raises(KafkaConfigurationError): 21 | KafkaConsumer(bootstrap_servers='localhost:9092', api_version=(0, 9), request_timeout_ms=50000, connections_max_idle_ms=40000) 22 | 23 | 24 | def test_subscription_copy(): 25 | consumer = KafkaConsumer('foo', api_version=(0, 10, 0)) 26 | sub = consumer.subscription() 27 | assert sub is not consumer.subscription() 28 | assert sub == set(['foo']) 29 | sub.add('fizz') 30 | assert consumer.subscription() == set(['foo']) 31 | 32 | 33 | def test_assign(): 34 | # Consumer w/ subscription to topic 'foo' 35 | consumer = KafkaConsumer('foo', api_version=(0, 10, 0)) 36 | assert consumer.assignment() == set() 37 | # Cannot assign manually 38 | with pytest.raises(IllegalStateError): 39 | consumer.assign([TopicPartition('foo', 0)]) 40 | 41 | assert 'foo' in consumer._client._topics 42 | 43 | consumer = KafkaConsumer(api_version=(0, 10, 0)) 44 | assert consumer.assignment() == set() 45 | consumer.assign([TopicPartition('foo', 0)]) 46 | assert consumer.assignment() == set([TopicPartition('foo', 0)]) 47 | assert 'foo' in consumer._client._topics 48 | # Cannot subscribe 49 | with pytest.raises(IllegalStateError): 50 | consumer.subscribe(topics=['foo']) 51 | consumer.assign([]) 52 | assert consumer.assignment() == set() 53 | -------------------------------------------------------------------------------- /test/test_package.py: -------------------------------------------------------------------------------- 1 | class TestPackage: 2 | def test_top_level_namespace(self): 3 | import kafka as kafka1 4 | assert kafka1.KafkaConsumer.__name__ == "KafkaConsumer" 5 | assert kafka1.consumer.__name__ == "kafka.consumer" 6 | assert kafka1.codec.__name__ == "kafka.codec" 7 | 8 | def test_submodule_namespace(self): 9 | import kafka.client_async as client1 10 | assert client1.__name__ == "kafka.client_async" 11 | 12 | from kafka import client_async as client2 13 | assert client2.__name__ == "kafka.client_async" 14 | 15 | from kafka.client_async import KafkaClient as KafkaClient1 16 | assert KafkaClient1.__name__ == "KafkaClient" 17 | 18 | from kafka import KafkaClient as KafkaClient2 19 | assert KafkaClient2.__name__ == "KafkaClient" 20 | 21 | from kafka.codec import gzip_encode as gzip_encode1 22 | assert gzip_encode1.__name__ == "gzip_encode" 23 | 24 | from kafka.codec import snappy_encode 25 | assert snappy_encode.__name__ == "snappy_encode" 26 | -------------------------------------------------------------------------------- /test/test_partition_movements.py: -------------------------------------------------------------------------------- 1 | from kafka.structs import TopicPartition 2 | 3 | from kafka.coordinator.assignors.sticky.partition_movements import PartitionMovements 4 | 5 | 6 | def test_empty_movements_are_sticky(): 7 | partition_movements = PartitionMovements() 8 | assert partition_movements.are_sticky() 9 | 10 | 11 | def test_sticky_movements(): 12 | partition_movements = PartitionMovements() 13 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 14 | partition_movements.move_partition(TopicPartition('t', 1), 'C2', 'C3') 15 | partition_movements.move_partition(TopicPartition('t', 1), 'C3', 'C1') 16 | assert partition_movements.are_sticky() 17 | 18 | 19 | def test_should_detect_non_sticky_assignment(): 20 | partition_movements = PartitionMovements() 21 | partition_movements.move_partition(TopicPartition('t', 1), 'C1', 'C2') 22 | partition_movements.move_partition(TopicPartition('t', 2), 'C2', 'C1') 23 | assert not partition_movements.are_sticky() 24 | -------------------------------------------------------------------------------- /test/test_partitioner.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import pytest 4 | 5 | from kafka.partitioner import DefaultPartitioner, murmur2 6 | 7 | 8 | def test_default_partitioner(): 9 | partitioner = DefaultPartitioner() 10 | all_partitions = available = list(range(100)) 11 | # partitioner should return the same partition for the same key 12 | p1 = partitioner(b'foo', all_partitions, available) 13 | p2 = partitioner(b'foo', all_partitions, available) 14 | assert p1 == p2 15 | assert p1 in all_partitions 16 | 17 | # when key is None, choose one of available partitions 18 | assert partitioner(None, all_partitions, [123]) == 123 19 | 20 | # with fallback to all_partitions 21 | assert partitioner(None, all_partitions, []) in all_partitions 22 | 23 | 24 | @pytest.mark.parametrize("bytes_payload,partition_number", [ 25 | (b'', 681), (b'a', 524), (b'ab', 434), (b'abc', 107), (b'123456789', 566), 26 | (b'\x00 ', 742) 27 | ]) 28 | def test_murmur2_java_compatibility(bytes_payload, partition_number): 29 | partitioner = DefaultPartitioner() 30 | all_partitions = available = list(range(1000)) 31 | # compare with output from Kafka's org.apache.kafka.clients.producer.Partitioner 32 | assert partitioner(bytes_payload, all_partitions, available) == partition_number 33 | 34 | 35 | def test_murmur2_not_ascii(): 36 | # Verify no regression of murmur2() bug encoding py2 bytes that don't ascii encode 37 | murmur2(b'\xa4') 38 | murmur2(b'\x81' * 1000) 39 | -------------------------------------------------------------------------------- /test/test_producer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import gc 4 | import platform 5 | import threading 6 | 7 | import pytest 8 | 9 | from kafka import KafkaProducer 10 | from kafka.cluster import ClusterMetadata 11 | from kafka.producer.transaction_manager import TransactionManager, ProducerIdAndEpoch 12 | 13 | 14 | def test_kafka_producer_thread_close(): 15 | threads = threading.active_count() 16 | producer = KafkaProducer(api_version=(2, 1)) # set api_version explicitly to avoid auto-detection 17 | assert threading.active_count() == threads + 1 18 | producer.close() 19 | assert threading.active_count() == threads 20 | 21 | 22 | def test_idempotent_producer_reset_producer_id(): 23 | transaction_manager = TransactionManager( 24 | transactional_id=None, 25 | transaction_timeout_ms=1000, 26 | retry_backoff_ms=100, 27 | api_version=(0, 11), 28 | metadata=ClusterMetadata(), 29 | ) 30 | 31 | test_producer_id_and_epoch = ProducerIdAndEpoch(123, 456) 32 | transaction_manager.set_producer_id_and_epoch(test_producer_id_and_epoch) 33 | assert transaction_manager.producer_id_and_epoch == test_producer_id_and_epoch 34 | transaction_manager.reset_producer_id() 35 | assert transaction_manager.producer_id_and_epoch == ProducerIdAndEpoch(-1, -1) 36 | -------------------------------------------------------------------------------- /test/test_subscription_state.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import pytest 4 | 5 | from kafka import TopicPartition 6 | from kafka.consumer.subscription_state import SubscriptionState, TopicPartitionState 7 | from kafka.vendor import six 8 | 9 | 10 | def test_type_error(): 11 | s = SubscriptionState() 12 | with pytest.raises(TypeError): 13 | s.subscribe(topics='foo') 14 | 15 | s.subscribe(topics=['foo']) 16 | 17 | 18 | def test_change_subscription(): 19 | s = SubscriptionState() 20 | s.subscribe(topics=['foo']) 21 | assert s.subscription == set(['foo']) 22 | s.change_subscription(['bar']) 23 | assert s.subscription == set(['bar']) 24 | 25 | 26 | def test_group_subscribe(): 27 | s = SubscriptionState() 28 | s.subscribe(topics=['foo']) 29 | assert s.subscription == set(['foo']) 30 | s.group_subscribe(['bar']) 31 | assert s.subscription == set(['foo']) 32 | assert s._group_subscription == set(['foo', 'bar']) 33 | 34 | s.reset_group_subscription() 35 | assert s.subscription == set(['foo']) 36 | assert s._group_subscription == set(['foo']) 37 | 38 | 39 | def test_assign_from_subscribed(): 40 | s = SubscriptionState() 41 | s.subscribe(topics=['foo']) 42 | with pytest.raises(ValueError): 43 | s.assign_from_subscribed([TopicPartition('bar', 0)]) 44 | 45 | s.assign_from_subscribed([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 46 | assert set(s.assignment.keys()) == set([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 47 | assert all([isinstance(tps, TopicPartitionState) for tps in six.itervalues(s.assignment)]) 48 | assert all([not tps.has_valid_position for tps in six.itervalues(s.assignment)]) 49 | 50 | 51 | def test_change_subscription_after_assignment(): 52 | s = SubscriptionState() 53 | s.subscribe(topics=['foo']) 54 | s.assign_from_subscribed([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 55 | # Changing subscription retains existing assignment until next rebalance 56 | s.change_subscription(['bar']) 57 | assert set(s.assignment.keys()) == set([TopicPartition('foo', 0), TopicPartition('foo', 1)]) 58 | -------------------------------------------------------------------------------- /test/test_util.py: -------------------------------------------------------------------------------- 1 | # pylint: skip-file 2 | from __future__ import absolute_import 3 | 4 | import pytest 5 | 6 | from kafka.util import ensure_valid_topic_name 7 | 8 | @pytest.mark.parametrize(('topic_name', 'expectation'), [ 9 | (0, pytest.raises(TypeError)), 10 | (None, pytest.raises(TypeError)), 11 | ('', pytest.raises(ValueError)), 12 | ('.', pytest.raises(ValueError)), 13 | ('..', pytest.raises(ValueError)), 14 | ('a' * 250, pytest.raises(ValueError)), 15 | ('abc/123', pytest.raises(ValueError)), 16 | ('/abc/123', pytest.raises(ValueError)), 17 | ('/abc123', pytest.raises(ValueError)), 18 | ('name with space', pytest.raises(ValueError)), 19 | ('name*with*stars', pytest.raises(ValueError)), 20 | ('name+with+plus', pytest.raises(ValueError)), 21 | ]) 22 | def test_topic_name_validation(topic_name, expectation): 23 | with expectation: 24 | ensure_valid_topic_name(topic_name) 25 | -------------------------------------------------------------------------------- /test/testutil.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import os 4 | import random 5 | import re 6 | import string 7 | import time 8 | 9 | import pytest 10 | 11 | import kafka.codec 12 | 13 | 14 | def special_to_underscore(string, _matcher=re.compile(r'[^a-zA-Z0-9_]+')): 15 | return _matcher.sub('_', string) 16 | 17 | 18 | def random_string(length): 19 | return "".join(random.choice(string.ascii_letters) for i in range(length)) 20 | 21 | 22 | def env_kafka_version(): 23 | """Return the Kafka version set in the OS environment as a tuple. 24 | 25 | Example: '0.8.1.1' --> (0, 8, 1, 1) 26 | """ 27 | if 'KAFKA_VERSION' not in os.environ: 28 | return () 29 | return tuple(map(int, os.environ['KAFKA_VERSION'].split('.'))) 30 | 31 | 32 | def assert_message_count(messages, num_messages): 33 | """Check that we received the expected number of messages with no duplicates.""" 34 | # Make sure we got them all 35 | assert len(messages) == num_messages, 'Expected %d messages, got %d' % (num_messages, len(messages)) 36 | # Make sure there are no duplicates 37 | # Note: Currently duplicates are identified only using key/value. Other attributes like topic, partition, headers, 38 | # timestamp, etc are ignored... this could be changed if necessary, but will be more tolerant of dupes. 39 | unique_messages = {(m.key, m.value) for m in messages} 40 | assert len(unique_messages) == num_messages, 'Expected %d unique messages, got %d' % (num_messages, len(unique_messages)) 41 | 42 | 43 | def maybe_skip_unsupported_compression(compression_type): 44 | codecs = {1: 'gzip', 2: 'snappy', 3: 'lz4', 4: 'zstd'} 45 | if not compression_type: 46 | return 47 | elif compression_type in codecs: 48 | compression_type = codecs[compression_type] 49 | 50 | checker = getattr(kafka.codec, 'has_' + compression_type, None) 51 | if checker and not checker(): 52 | pytest.skip("Compression libraries not installed for %s" % (compression_type,)) 53 | 54 | 55 | class Timer(object): 56 | def __enter__(self): 57 | self.start = time.time() 58 | return self 59 | 60 | def __exit__(self, *args): 61 | self.end = time.time() 62 | self.interval = self.end - self.start 63 | --------------------------------------------------------------------------------