├── .editorconfig ├── .github ├── semantic.yml └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── kafka-api ├── Cargo.toml └── src │ ├── apikey.rs │ ├── bytebuffer │ ├── format.rs │ ├── impl_traits.rs │ └── mod.rs │ ├── codec │ ├── mod.rs │ ├── readable.rs │ └── writable.rs │ ├── error.rs │ ├── lib.rs │ ├── records │ ├── consts.rs │ ├── mod.rs │ ├── mutable_records.rs │ ├── readonly_records.rs │ ├── record.rs │ └── record_batch.rs │ ├── schemata │ ├── api_versions_request.rs │ ├── api_versions_response.rs │ ├── create_topic_request.rs │ ├── create_topic_response.rs │ ├── fetch_request.rs │ ├── fetch_response.rs │ ├── find_coordinator_request.rs │ ├── find_coordinator_response.rs │ ├── init_producer_id_request.rs │ ├── init_producer_id_response.rs │ ├── join_group_request.rs │ ├── join_group_response.rs │ ├── metadata_request.rs │ ├── metadata_response.rs │ ├── mod.rs │ ├── offset_fetch_request.rs │ ├── offset_fetch_response.rs │ ├── produce_request.rs │ ├── produce_response.rs │ ├── request_header.rs │ ├── response_header.rs │ ├── sync_group_request.rs │ └── sync_group_response.rs │ └── sendable │ └── mod.rs ├── licenserc.toml ├── rust-toolchain.toml ├── rustfmt.toml └── simplesrv ├── Cargo.toml └── src ├── lib.rs └── main.rs /.editorconfig: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | root = true 16 | 17 | [*] 18 | end_of_line = lf 19 | indent_style = space 20 | insert_final_newline = true 21 | trim_trailing_whitespace = true 22 | -------------------------------------------------------------------------------- /.github/semantic.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # The pull request's title should be fulfilled the following pattern: 16 | # 17 | # [optional scope]: 18 | # 19 | # ... where valid types and scopes can be found below; for example: 20 | # 21 | # build(maven): One level down for native profile 22 | # 23 | # More about configurations on https://github.com/Ezard/semantic-prs#configuration 24 | 25 | enabled: true 26 | 27 | titleOnly: true 28 | 29 | types: 30 | - feat 31 | - fix 32 | - docs 33 | - style 34 | - refactor 35 | - perf 36 | - test 37 | - build 38 | - ci 39 | - chore 40 | - revert 41 | 42 | targetUrl: https://github.com/tisonkun/kafka-api/blob/main/.github/semantic.yml 43 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: CI 16 | on: 17 | pull_request: 18 | branches: [ main ] 19 | push: 20 | branches: [ main ] 21 | 22 | # Concurrency strategy: 23 | # github.workflow: distinguish this workflow from others 24 | # github.event_name: distinguish `push` event from `pull_request` event 25 | # github.event.number: set to the number of the pull request if `pull_request` event 26 | # github.run_id: otherwise, it's a `push` event, only cancel if we rerun the workflow 27 | # 28 | # Reference: 29 | # https://docs.github.com/en/actions/using-jobs/using-concurrency 30 | # https://docs.github.com/en/actions/learn-github-actions/contexts#github-context 31 | concurrency: 32 | group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.number || github.run_id }} 33 | cancel-in-progress: true 34 | 35 | jobs: 36 | check: 37 | name: Check 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/checkout@v3 41 | - uses: korandoru/hawkeye@v3 42 | - uses: Swatinem/rust-cache@v2 43 | - name: Check Clippy 44 | run: cargo clippy --tests --all-features --all-targets --workspace -- -D warnings 45 | - name: Check format 46 | run: cargo fmt --all --check 47 | 48 | test: 49 | name: Build and test 50 | runs-on: ubuntu-latest 51 | steps: 52 | - uses: actions/checkout@v3 53 | - uses: Swatinem/rust-cache@v2 54 | - run: cargo build --workspace --all-features --tests --examples --benches 55 | - name: Run tests 56 | run: cargo test --workspace -- --nocapture 57 | 58 | required: 59 | name: Required 60 | runs-on: ubuntu-latest 61 | if: ${{ always() }} 62 | needs: 63 | - check 64 | - test 65 | steps: 66 | - name: Guardian 67 | run: | 68 | if [[ ! ( \ 69 | "${{ needs.check.result }}" == "success" \ 70 | && "${{ needs.test.result }}" == "success" \ 71 | ) ]]; then 72 | echo "Required jobs haven't been completed successfully." 73 | exit -1 74 | fi 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Cargo 2 | target 3 | 4 | # Editor and IDE 5 | .idea 6 | .vscode 7 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "bytes" 7 | version = "1.4.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" 10 | 11 | [[package]] 12 | name = "cfg-if" 13 | version = "1.0.0" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 16 | 17 | [[package]] 18 | name = "getrandom" 19 | version = "0.2.10" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" 22 | dependencies = [ 23 | "cfg-if", 24 | "libc", 25 | "wasi", 26 | ] 27 | 28 | [[package]] 29 | name = "kafka-api" 30 | version = "0.2.3" 31 | dependencies = [ 32 | "bytes", 33 | "tracing", 34 | "uuid", 35 | ] 36 | 37 | [[package]] 38 | name = "lazy_static" 39 | version = "1.4.0" 40 | source = "registry+https://github.com/rust-lang/crates.io-index" 41 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 42 | 43 | [[package]] 44 | name = "libc" 45 | version = "0.2.147" 46 | source = "registry+https://github.com/rust-lang/crates.io-index" 47 | checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" 48 | 49 | [[package]] 50 | name = "log" 51 | version = "0.4.19" 52 | source = "registry+https://github.com/rust-lang/crates.io-index" 53 | checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" 54 | 55 | [[package]] 56 | name = "nu-ansi-term" 57 | version = "0.46.0" 58 | source = "registry+https://github.com/rust-lang/crates.io-index" 59 | checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" 60 | dependencies = [ 61 | "overload", 62 | "winapi", 63 | ] 64 | 65 | [[package]] 66 | name = "once_cell" 67 | version = "1.18.0" 68 | source = "registry+https://github.com/rust-lang/crates.io-index" 69 | checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" 70 | 71 | [[package]] 72 | name = "overload" 73 | version = "0.1.1" 74 | source = "registry+https://github.com/rust-lang/crates.io-index" 75 | checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" 76 | 77 | [[package]] 78 | name = "pin-project-lite" 79 | version = "0.2.10" 80 | source = "registry+https://github.com/rust-lang/crates.io-index" 81 | checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" 82 | 83 | [[package]] 84 | name = "proc-macro2" 85 | version = "1.0.66" 86 | source = "registry+https://github.com/rust-lang/crates.io-index" 87 | checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" 88 | dependencies = [ 89 | "unicode-ident", 90 | ] 91 | 92 | [[package]] 93 | name = "quote" 94 | version = "1.0.31" 95 | source = "registry+https://github.com/rust-lang/crates.io-index" 96 | checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" 97 | dependencies = [ 98 | "proc-macro2", 99 | ] 100 | 101 | [[package]] 102 | name = "sharded-slab" 103 | version = "0.1.4" 104 | source = "registry+https://github.com/rust-lang/crates.io-index" 105 | checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" 106 | dependencies = [ 107 | "lazy_static", 108 | ] 109 | 110 | [[package]] 111 | name = "simplesrv" 112 | version = "0.2.3" 113 | dependencies = [ 114 | "bytes", 115 | "kafka-api", 116 | "tracing", 117 | "tracing-subscriber", 118 | "uuid", 119 | ] 120 | 121 | [[package]] 122 | name = "smallvec" 123 | version = "1.11.0" 124 | source = "registry+https://github.com/rust-lang/crates.io-index" 125 | checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" 126 | 127 | [[package]] 128 | name = "syn" 129 | version = "2.0.26" 130 | source = "registry+https://github.com/rust-lang/crates.io-index" 131 | checksum = "45c3457aacde3c65315de5031ec191ce46604304d2446e803d71ade03308d970" 132 | dependencies = [ 133 | "proc-macro2", 134 | "quote", 135 | "unicode-ident", 136 | ] 137 | 138 | [[package]] 139 | name = "thread_local" 140 | version = "1.1.7" 141 | source = "registry+https://github.com/rust-lang/crates.io-index" 142 | checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" 143 | dependencies = [ 144 | "cfg-if", 145 | "once_cell", 146 | ] 147 | 148 | [[package]] 149 | name = "tracing" 150 | version = "0.1.37" 151 | source = "registry+https://github.com/rust-lang/crates.io-index" 152 | checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" 153 | dependencies = [ 154 | "cfg-if", 155 | "pin-project-lite", 156 | "tracing-attributes", 157 | "tracing-core", 158 | ] 159 | 160 | [[package]] 161 | name = "tracing-attributes" 162 | version = "0.1.26" 163 | source = "registry+https://github.com/rust-lang/crates.io-index" 164 | checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" 165 | dependencies = [ 166 | "proc-macro2", 167 | "quote", 168 | "syn", 169 | ] 170 | 171 | [[package]] 172 | name = "tracing-core" 173 | version = "0.1.31" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" 176 | dependencies = [ 177 | "once_cell", 178 | "valuable", 179 | ] 180 | 181 | [[package]] 182 | name = "tracing-log" 183 | version = "0.1.3" 184 | source = "registry+https://github.com/rust-lang/crates.io-index" 185 | checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" 186 | dependencies = [ 187 | "lazy_static", 188 | "log", 189 | "tracing-core", 190 | ] 191 | 192 | [[package]] 193 | name = "tracing-subscriber" 194 | version = "0.3.17" 195 | source = "registry+https://github.com/rust-lang/crates.io-index" 196 | checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" 197 | dependencies = [ 198 | "nu-ansi-term", 199 | "sharded-slab", 200 | "smallvec", 201 | "thread_local", 202 | "tracing-core", 203 | "tracing-log", 204 | ] 205 | 206 | [[package]] 207 | name = "unicode-ident" 208 | version = "1.0.11" 209 | source = "registry+https://github.com/rust-lang/crates.io-index" 210 | checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" 211 | 212 | [[package]] 213 | name = "uuid" 214 | version = "1.4.1" 215 | source = "registry+https://github.com/rust-lang/crates.io-index" 216 | checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" 217 | dependencies = [ 218 | "getrandom", 219 | ] 220 | 221 | [[package]] 222 | name = "valuable" 223 | version = "0.1.0" 224 | source = "registry+https://github.com/rust-lang/crates.io-index" 225 | checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" 226 | 227 | [[package]] 228 | name = "wasi" 229 | version = "0.11.0+wasi-snapshot-preview1" 230 | source = "registry+https://github.com/rust-lang/crates.io-index" 231 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" 232 | 233 | [[package]] 234 | name = "winapi" 235 | version = "0.3.9" 236 | source = "registry+https://github.com/rust-lang/crates.io-index" 237 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 238 | dependencies = [ 239 | "winapi-i686-pc-windows-gnu", 240 | "winapi-x86_64-pc-windows-gnu", 241 | ] 242 | 243 | [[package]] 244 | name = "winapi-i686-pc-windows-gnu" 245 | version = "0.4.0" 246 | source = "registry+https://github.com/rust-lang/crates.io-index" 247 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 248 | 249 | [[package]] 250 | name = "winapi-x86_64-pc-windows-gnu" 251 | version = "0.4.0" 252 | source = "registry+https://github.com/rust-lang/crates.io-index" 253 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 254 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | [workspace] 16 | members = [ 17 | 'kafka-api', 18 | 'simplesrv', 19 | ] 20 | resolver = "2" 21 | 22 | [workspace.package] 23 | edition = "2021" 24 | version = "0.2.3" 25 | license = "Apache-2.0" 26 | 27 | authors = ["tison "] 28 | repository = "https://github.com/tisonkun/kafka-api" 29 | readme = "README.md" 30 | homepage = "https://github.com/tisonkun/kafka-api" 31 | documentation = "https://docs.rs/kafka-api" 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Archived 2 | 3 | Development moved to [Morax](https://github.com/tisonkun/morax/) under [`api/kafka-api`](https://github.com/tisonkun/morax/tree/main/api/kafka-api). 4 | -------------------------------------------------------------------------------- /kafka-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | [package] 16 | name = "kafka-api" 17 | 18 | authors.workspace = true 19 | edition.workspace = true 20 | version.workspace = true 21 | license.workspace = true 22 | readme.workspace = true 23 | repository.workspace = true 24 | homepage.workspace = true 25 | 26 | description = "The rust language implementation of generic Kafka APIs." 27 | 28 | [dependencies] 29 | bytes = "1.4.0" 30 | uuid = { version = "1.3.4", features = ["v4"] } 31 | tracing = "0.1.37" 32 | -------------------------------------------------------------------------------- /kafka-api/src/bytebuffer/format.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::fmt::{Debug, Formatter}; 16 | 17 | use crate::bytebuffer::ByteBuffer; 18 | 19 | struct ByteBufferRef<'a>(&'a [u8]); 20 | 21 | impl Debug for ByteBufferRef<'_> { 22 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 23 | write!(f, "b\"")?; 24 | for &b in self.0 { 25 | // https://doc.rust-lang.org/reference/tokens.html#byte-escapes 26 | if b == b'\n' { 27 | write!(f, "\\n")?; 28 | } else if b == b'\r' { 29 | write!(f, "\\r")?; 30 | } else if b == b'\t' { 31 | write!(f, "\\t")?; 32 | } else if b == b'\\' || b == b'"' { 33 | write!(f, "\\{}", b as char)?; 34 | } else if b == b'\0' { 35 | write!(f, "\\0")?; 36 | } else if (0x20..0x7f).contains(&b) { 37 | // ASCII printable 38 | write!(f, "{}", b as char)?; 39 | } else { 40 | write!(f, "\\x{:02x}", b)?; 41 | } 42 | } 43 | write!(f, "\"")?; 44 | Ok(()) 45 | } 46 | } 47 | 48 | impl Debug for ByteBuffer { 49 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 50 | Debug::fmt(&ByteBufferRef(self.as_ref()), f) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /kafka-api/src/bytebuffer/impl_traits.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::{borrow::Borrow, cmp, hash, ops::Deref}; 16 | 17 | use crate::bytebuffer::ByteBuffer; 18 | 19 | // impl Hash 20 | 21 | impl hash::Hash for ByteBuffer { 22 | fn hash(&self, state: &mut H) 23 | where 24 | H: hash::Hasher, 25 | { 26 | self.as_bytes().hash(state); 27 | } 28 | } 29 | 30 | // impl Refs 31 | 32 | impl AsRef<[u8]> for ByteBuffer { 33 | fn as_ref(&self) -> &[u8] { 34 | self.as_bytes() 35 | } 36 | } 37 | 38 | impl Borrow<[u8]> for ByteBuffer { 39 | fn borrow(&self) -> &[u8] { 40 | self.as_bytes() 41 | } 42 | } 43 | 44 | impl Deref for ByteBuffer { 45 | type Target = [u8]; 46 | 47 | fn deref(&self) -> &Self::Target { 48 | self.as_bytes() 49 | } 50 | } 51 | 52 | // impl Eq 53 | 54 | impl PartialEq for ByteBuffer { 55 | fn eq(&self, other: &ByteBuffer) -> bool { 56 | self.as_ref() == other.as_ref() 57 | } 58 | } 59 | 60 | impl PartialOrd for ByteBuffer { 61 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 62 | Some(self.cmp(other)) 63 | } 64 | } 65 | 66 | impl Ord for ByteBuffer { 67 | fn cmp(&self, other: &ByteBuffer) -> cmp::Ordering { 68 | self.as_bytes().cmp(other.as_bytes()) 69 | } 70 | } 71 | 72 | impl Eq for ByteBuffer {} 73 | 74 | impl PartialEq<[u8]> for ByteBuffer { 75 | fn eq(&self, other: &[u8]) -> bool { 76 | self.as_bytes() == other 77 | } 78 | } 79 | 80 | impl PartialOrd<[u8]> for ByteBuffer { 81 | fn partial_cmp(&self, other: &[u8]) -> Option { 82 | self.as_bytes().partial_cmp(other) 83 | } 84 | } 85 | 86 | impl PartialEq for [u8] { 87 | fn eq(&self, other: &ByteBuffer) -> bool { 88 | *other == *self 89 | } 90 | } 91 | 92 | impl PartialOrd for [u8] { 93 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 94 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) 95 | } 96 | } 97 | 98 | impl PartialEq for ByteBuffer { 99 | fn eq(&self, other: &str) -> bool { 100 | self.as_bytes() == other.as_bytes() 101 | } 102 | } 103 | 104 | impl PartialOrd for ByteBuffer { 105 | fn partial_cmp(&self, other: &str) -> Option { 106 | self.as_bytes().partial_cmp(other.as_bytes()) 107 | } 108 | } 109 | 110 | impl PartialEq for str { 111 | fn eq(&self, other: &ByteBuffer) -> bool { 112 | *other == *self 113 | } 114 | } 115 | 116 | impl PartialOrd for str { 117 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 118 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) 119 | } 120 | } 121 | 122 | impl PartialEq> for ByteBuffer { 123 | fn eq(&self, other: &Vec) -> bool { 124 | *self == other[..] 125 | } 126 | } 127 | 128 | impl PartialOrd> for ByteBuffer { 129 | fn partial_cmp(&self, other: &Vec) -> Option { 130 | self.as_bytes().partial_cmp(&other[..]) 131 | } 132 | } 133 | 134 | impl PartialEq for Vec { 135 | fn eq(&self, other: &ByteBuffer) -> bool { 136 | *other == *self 137 | } 138 | } 139 | 140 | impl PartialOrd for Vec { 141 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 142 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) 143 | } 144 | } 145 | 146 | impl PartialEq for ByteBuffer { 147 | fn eq(&self, other: &String) -> bool { 148 | *self == other[..] 149 | } 150 | } 151 | 152 | impl PartialOrd for ByteBuffer { 153 | fn partial_cmp(&self, other: &String) -> Option { 154 | self.as_bytes().partial_cmp(other.as_bytes()) 155 | } 156 | } 157 | 158 | impl PartialEq for String { 159 | fn eq(&self, other: &ByteBuffer) -> bool { 160 | *other == *self 161 | } 162 | } 163 | 164 | impl PartialOrd for String { 165 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 166 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) 167 | } 168 | } 169 | 170 | impl PartialEq for &[u8] { 171 | fn eq(&self, other: &ByteBuffer) -> bool { 172 | *other == *self 173 | } 174 | } 175 | 176 | impl PartialOrd for &[u8] { 177 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 178 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other) 179 | } 180 | } 181 | 182 | impl PartialEq for &str { 183 | fn eq(&self, other: &ByteBuffer) -> bool { 184 | *other == *self 185 | } 186 | } 187 | 188 | impl PartialOrd for &str { 189 | fn partial_cmp(&self, other: &ByteBuffer) -> Option { 190 | <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other) 191 | } 192 | } 193 | 194 | impl<'a, T: ?Sized> PartialEq<&'a T> for ByteBuffer 195 | where 196 | ByteBuffer: PartialEq, 197 | { 198 | fn eq(&self, other: &&'a T) -> bool { 199 | *self == **other 200 | } 201 | } 202 | 203 | impl<'a, T: ?Sized> PartialOrd<&'a T> for ByteBuffer 204 | where 205 | ByteBuffer: PartialOrd, 206 | { 207 | fn partial_cmp(&self, other: &&'a T) -> Option { 208 | self.partial_cmp(&**other) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /kafka-api/src/bytebuffer/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use core::slice; 16 | use std::{fmt::Debug, mem::ManuallyDrop, ops::RangeBounds, sync::Arc}; 17 | 18 | use bytes::Buf; 19 | 20 | mod format; 21 | mod impl_traits; 22 | 23 | // Shared represents a deallocate guard 24 | #[derive(Debug, Clone)] 25 | struct Shared { 26 | ptr: *mut u8, 27 | len: usize, 28 | capacity: usize, 29 | } 30 | 31 | // shared ptr never changed + no alloc + in place mutations are managed 32 | unsafe impl Send for Shared {} 33 | unsafe impl Sync for Shared {} 34 | 35 | impl Drop for Shared { 36 | fn drop(&mut self) { 37 | unsafe { drop(Vec::from_raw_parts(self.ptr, self.len, self.capacity)) } 38 | } 39 | } 40 | 41 | #[derive(Clone)] 42 | pub struct ByteBuffer { 43 | start: usize, 44 | end: usize, 45 | shared: Arc, 46 | } 47 | 48 | impl Default for ByteBuffer { 49 | fn default() -> Self { 50 | ByteBuffer::new(vec![]) 51 | } 52 | } 53 | 54 | impl Buf for ByteBuffer { 55 | fn remaining(&self) -> usize { 56 | self.len() 57 | } 58 | 59 | fn chunk(&self) -> &[u8] { 60 | self.as_bytes() 61 | } 62 | 63 | fn advance(&mut self, cnt: usize) { 64 | self.start += cnt 65 | } 66 | } 67 | 68 | impl ByteBuffer { 69 | pub fn new(v: Vec) -> Self { 70 | let mut me = ManuallyDrop::new(v); 71 | let (ptr, len, capacity) = (me.as_mut_ptr(), me.len(), me.capacity()); 72 | let (start, end) = (0, len); 73 | let shared = Arc::new(Shared { ptr, len, capacity }); 74 | ByteBuffer { start, end, shared } 75 | } 76 | 77 | pub fn len(&self) -> usize { 78 | self.end - self.start 79 | } 80 | 81 | pub fn is_empty(&self) -> bool { 82 | self.end <= self.start 83 | } 84 | 85 | #[must_use = "consider ByteBuffer::advance if you don't need the other half"] 86 | pub fn split_to(&mut self, at: usize) -> ByteBuffer { 87 | assert!( 88 | at <= self.len(), 89 | "split_to out of bounds: {:?} <= {:?}", 90 | at, 91 | self.len(), 92 | ); 93 | 94 | let start = self.start; 95 | let end = self.start + at; 96 | self.start = end; 97 | 98 | ByteBuffer { 99 | start, 100 | end, 101 | shared: self.shared.clone(), 102 | } 103 | } 104 | 105 | #[must_use = "consider ByteBuffer::truncate if you don't need the other half"] 106 | pub fn split_off(&mut self, at: usize) -> ByteBuffer { 107 | assert!( 108 | at <= self.len(), 109 | "split_off out of bounds: {:?} <= {:?}", 110 | at, 111 | self.len(), 112 | ); 113 | 114 | let start = self.start + at; 115 | let end = self.end; 116 | self.end = start; 117 | 118 | ByteBuffer { 119 | start, 120 | end, 121 | shared: self.shared.clone(), 122 | } 123 | } 124 | 125 | pub fn truncate(&mut self, len: usize) { 126 | if len <= self.len() { 127 | self.end = self.start + len; 128 | } 129 | } 130 | 131 | pub fn slice(&self, range: impl RangeBounds) -> ByteBuffer { 132 | let (begin, end) = self.check_range(range); 133 | ByteBuffer { 134 | start: self.start + begin, 135 | end: self.start + end, 136 | shared: self.shared.clone(), 137 | } 138 | } 139 | 140 | pub fn as_bytes(&self) -> &[u8] { 141 | unsafe { slice::from_raw_parts_mut(self.ptr(), self.len()) } 142 | } 143 | 144 | // SAFETY - modifications are nonoverlapping 145 | // 146 | // We cannot implement AsMut / DerefMut for this conventions, cause impl trait will be public 147 | // visible, but we need to narrow the mutations within this crate (for in place mutate memory 148 | // batches). 149 | pub(crate) fn mut_slice_in(&mut self, range: impl RangeBounds) -> &mut [u8] { 150 | let (begin, end) = self.check_range(range); 151 | &mut (unsafe { slice::from_raw_parts_mut(self.ptr(), self.len()) }[begin..end]) 152 | } 153 | 154 | fn check_range(&self, range: impl RangeBounds) -> (usize, usize) { 155 | use core::ops::Bound; 156 | 157 | let len = self.len(); 158 | 159 | let begin = match range.start_bound() { 160 | Bound::Included(&n) => n, 161 | Bound::Excluded(&n) => n + 1, 162 | Bound::Unbounded => 0, 163 | }; 164 | 165 | let end = match range.end_bound() { 166 | Bound::Included(&n) => n.checked_add(1).expect("out of range"), 167 | Bound::Excluded(&n) => n, 168 | Bound::Unbounded => len, 169 | }; 170 | 171 | assert!( 172 | begin <= end, 173 | "range start must not be greater than end: {:?} <= {:?}", 174 | begin, 175 | end, 176 | ); 177 | assert!( 178 | end <= len, 179 | "range end out of bounds: {:?} <= {:?}", 180 | end, 181 | len, 182 | ); 183 | 184 | (begin, end) 185 | } 186 | 187 | // SAFETY - always in bound 188 | unsafe fn ptr(&self) -> *mut u8 { 189 | self.shared.ptr.add(self.start) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /kafka-api/src/codec/readable.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::{io, mem::size_of}; 16 | 17 | use bytes::Buf; 18 | 19 | use crate::{bytebuffer::ByteBuffer, err_codec_message, records::MutableRecords, RawTaggedField}; 20 | 21 | fn varint_zigzag(i: i32) -> i32 { 22 | (((i as u32) >> 1) as i32) ^ -(i & 1) 23 | } 24 | 25 | fn varlong_zigzag(i: i64) -> i64 { 26 | (((i as u64) >> 1) as i64) ^ -(i & 1) 27 | } 28 | 29 | pub trait Readable { 30 | fn remaining(&self) -> usize; 31 | fn read_i8(&mut self) -> i8; 32 | fn read_i16(&mut self) -> i16; 33 | fn read_i32(&mut self) -> i32; 34 | fn read_i64(&mut self) -> i64; 35 | fn read_u8(&mut self) -> u8; 36 | fn read_u16(&mut self) -> u16; 37 | fn read_u32(&mut self) -> u32; 38 | fn read_u64(&mut self) -> u64; 39 | fn read_f32(&mut self) -> f32; 40 | fn read_f64(&mut self) -> f64; 41 | 42 | fn read_bytes(&mut self, len: usize) -> ByteBuffer; 43 | 44 | fn read_string(&mut self, len: usize) -> String { 45 | let bs = self.read_bytes(len); 46 | String::from_utf8_lossy(bs.as_bytes()).into_owned() 47 | } 48 | 49 | fn read_unknown_tagged_field(&mut self, tag: i32, size: usize) -> RawTaggedField { 50 | let data = self.read_bytes(size); 51 | RawTaggedField { tag, data } 52 | } 53 | 54 | fn read_records(&mut self, len: usize) -> MutableRecords { 55 | MutableRecords::new(self.read_bytes(len)) 56 | } 57 | 58 | fn read_uuid(&mut self) -> uuid::Uuid { 59 | let msb = self.read_u64(); 60 | let lsb = self.read_u64(); 61 | uuid::Uuid::from_u64_pair(msb, lsb) 62 | } 63 | 64 | fn read_unsigned_varint(&mut self) -> io::Result { 65 | let mut res = 0; 66 | for i in 0.. { 67 | debug_assert!(i < 5); // no larger than i32 68 | if self.remaining() >= size_of::() { 69 | let next = self.read_u8() as i32; 70 | res |= (next & 0x7F) << (i * 7); 71 | if next < 0x80 { 72 | break; 73 | } 74 | } else { 75 | return Err(err_codec_message(format!( 76 | "no enough bytes when decode varint (res: {res}, remaining: {})", 77 | self.remaining() 78 | ))); 79 | } 80 | } 81 | Ok(res) 82 | } 83 | 84 | fn read_unsigned_varlong(&mut self) -> io::Result { 85 | let mut res = 0; 86 | for i in 0.. { 87 | debug_assert!(i < 10); // no larger than i64 88 | if self.remaining() >= size_of::() { 89 | let next = self.read_u8() as i64; 90 | res |= (next & 0x7F) << (i * 7); 91 | if next < 0x80 { 92 | break; 93 | } 94 | } else { 95 | return Err(err_codec_message(format!( 96 | "no enough bytes when decode varlong (res: {res}, remaining: {})", 97 | self.remaining() 98 | ))); 99 | } 100 | } 101 | Ok(res) 102 | } 103 | 104 | fn read_varint(&mut self) -> io::Result { 105 | self.read_unsigned_varint().map(varint_zigzag) 106 | } 107 | 108 | fn read_varlong(&mut self) -> io::Result { 109 | self.read_unsigned_varlong().map(varlong_zigzag) 110 | } 111 | } 112 | 113 | macro_rules! delegate_forward_buf { 114 | () => { 115 | fn remaining(&self) -> usize { 116 | Buf::remaining(self) 117 | } 118 | 119 | fn read_i8(&mut self) -> i8 { 120 | self.get_i8() 121 | } 122 | 123 | fn read_i16(&mut self) -> i16 { 124 | self.get_i16() 125 | } 126 | 127 | fn read_i32(&mut self) -> i32 { 128 | self.get_i32() 129 | } 130 | 131 | fn read_i64(&mut self) -> i64 { 132 | self.get_i64() 133 | } 134 | 135 | fn read_u8(&mut self) -> u8 { 136 | self.get_u8() 137 | } 138 | 139 | fn read_u16(&mut self) -> u16 { 140 | self.get_u16() 141 | } 142 | 143 | fn read_u32(&mut self) -> u32 { 144 | self.get_u32() 145 | } 146 | 147 | fn read_u64(&mut self) -> u64 { 148 | self.get_u64() 149 | } 150 | 151 | fn read_f32(&mut self) -> f32 { 152 | self.get_f32() 153 | } 154 | 155 | fn read_f64(&mut self) -> f64 { 156 | self.get_f64() 157 | } 158 | }; 159 | } 160 | 161 | impl Readable for &[u8] { 162 | delegate_forward_buf!(); 163 | 164 | fn read_bytes(&mut self, _: usize) -> ByteBuffer { 165 | unreachable!("this implementation is only for peeking size") 166 | } 167 | } 168 | 169 | impl Readable for ByteBuffer { 170 | delegate_forward_buf!(); 171 | 172 | fn read_bytes(&mut self, len: usize) -> ByteBuffer { 173 | self.split_to(len) 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /kafka-api/src/codec/writable.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{bytebuffer::ByteBuffer, records::ReadOnlyRecords}; 18 | 19 | pub trait Writable { 20 | fn write_i8(&mut self, n: i8) -> io::Result<()>; 21 | fn write_i16(&mut self, n: i16) -> io::Result<()>; 22 | fn write_i32(&mut self, n: i32) -> io::Result<()>; 23 | fn write_i64(&mut self, n: i64) -> io::Result<()>; 24 | fn write_u8(&mut self, n: u8) -> io::Result<()>; 25 | fn write_u16(&mut self, n: u16) -> io::Result<()>; 26 | fn write_u32(&mut self, n: u32) -> io::Result<()>; 27 | fn write_u64(&mut self, n: u64) -> io::Result<()>; 28 | fn write_f32(&mut self, n: f32) -> io::Result<()>; 29 | fn write_f64(&mut self, n: f64) -> io::Result<()>; 30 | fn write_slice(&mut self, src: &[u8]) -> io::Result<()>; 31 | fn write_bytes(&mut self, buf: &ByteBuffer) -> io::Result<()>; 32 | fn write_records(&mut self, r: &ReadOnlyRecords) -> io::Result<()>; 33 | 34 | fn write_uuid(&mut self, n: uuid::Uuid) -> io::Result<()> { 35 | self.write_slice(n.as_ref()) 36 | } 37 | 38 | fn write_unsigned_varint(&mut self, n: i32) -> io::Result<()> { 39 | let mut v = n; 40 | while v >= 0x80 { 41 | self.write_u8((v as u8) | 0x80)?; 42 | v >>= 7; 43 | } 44 | self.write_u8(v as u8) 45 | } 46 | 47 | fn write_unsigned_varlong(&mut self, n: i64) -> io::Result<()> { 48 | let mut v = n; 49 | while v >= 0x80 { 50 | self.write_u8((v as u8) | 0x80)?; 51 | v >>= 7; 52 | } 53 | self.write_u8(v as u8) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kafka-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | extern crate core; 16 | 17 | use std::{fmt::Display, io}; 18 | 19 | pub use codec::{Deserializable, RawTaggedField, Readable, Serializable, Writable}; 20 | pub use schemata::*; 21 | 22 | pub mod apikey; 23 | pub mod bytebuffer; 24 | pub(crate) mod codec; 25 | pub mod error; 26 | pub mod records; 27 | mod schemata; 28 | pub mod sendable; 29 | 30 | fn err_codec_message(message: String) -> io::Error { 31 | io::Error::new(io::ErrorKind::InvalidData, message) 32 | } 33 | 34 | fn err_decode_message_unsupported(version: i16, schemata: &str) -> io::Error { 35 | err_codec_message(format!("Cannot read version {version} of {schemata}")) 36 | } 37 | 38 | fn err_encode_message_unsupported(version: i16, schemata: &str) -> io::Error { 39 | err_codec_message(format!("Cannot write version {version} of {schemata}")) 40 | } 41 | 42 | fn err_decode_message_null(field: impl Display) -> io::Error { 43 | err_codec_message(format!("non-nullable field {field} was serialized as null")) 44 | } 45 | 46 | fn err_encode_message_null(field: impl Display) -> io::Error { 47 | err_codec_message(format!( 48 | "non-nullable field {field} to be serialized as null" 49 | )) 50 | } 51 | -------------------------------------------------------------------------------- /kafka-api/src/records/consts.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | pub const NO_SEQUENCE: i32 = -1; 16 | 17 | // The current attributes are given below: 18 | // --------------------------------------------------------------------------------------------------------------------------- 19 | // | Unused (7-15) | Delete Horizon Flag (6) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) | 20 | // --------------------------------------------------------------------------------------------------------------------------- 21 | pub const COMPRESSION_CODEC_MASK: u8 = 0x07; 22 | pub const TIMESTAMP_TYPE_MASK: u8 = 0x08; 23 | pub const TRANSACTIONAL_FLAG_MASK: u8 = 0x10; 24 | pub const CONTROL_FLAG_MASK: u8 = 0x20; 25 | pub const DELETE_HORIZON_FLAG_MASK: u8 = 0x40; 26 | 27 | // offset table 28 | pub const BASE_OFFSET_OFFSET: usize = 0; 29 | pub const BASE_OFFSET_LENGTH: usize = 8; 30 | pub const LENGTH_OFFSET: usize = BASE_OFFSET_OFFSET + BASE_OFFSET_LENGTH; 31 | pub const LENGTH_LENGTH: usize = 4; 32 | pub const PARTITION_LEADER_EPOCH_OFFSET: usize = LENGTH_OFFSET + LENGTH_LENGTH; 33 | pub const PARTITION_LEADER_EPOCH_LENGTH: usize = 4; 34 | pub const MAGIC_OFFSET: usize = PARTITION_LEADER_EPOCH_OFFSET + PARTITION_LEADER_EPOCH_LENGTH; 35 | pub const MAGIC_LENGTH: usize = 1; 36 | pub const CRC_OFFSET: usize = MAGIC_OFFSET + MAGIC_LENGTH; 37 | pub const CRC_LENGTH: usize = 4; 38 | pub const ATTRIBUTES_OFFSET: usize = CRC_OFFSET + CRC_LENGTH; 39 | pub const ATTRIBUTE_LENGTH: usize = 2; 40 | pub const LAST_OFFSET_DELTA_OFFSET: usize = ATTRIBUTES_OFFSET + ATTRIBUTE_LENGTH; 41 | pub const LAST_OFFSET_DELTA_LENGTH: usize = 4; 42 | pub const BASE_TIMESTAMP_OFFSET: usize = LAST_OFFSET_DELTA_OFFSET + LAST_OFFSET_DELTA_LENGTH; 43 | pub const BASE_TIMESTAMP_LENGTH: usize = 8; 44 | pub const MAX_TIMESTAMP_OFFSET: usize = BASE_TIMESTAMP_OFFSET + BASE_TIMESTAMP_LENGTH; 45 | pub const MAX_TIMESTAMP_LENGTH: usize = 8; 46 | pub const PRODUCER_ID_OFFSET: usize = MAX_TIMESTAMP_OFFSET + MAX_TIMESTAMP_LENGTH; 47 | pub const PRODUCER_ID_LENGTH: usize = 8; 48 | pub const PRODUCER_EPOCH_OFFSET: usize = PRODUCER_ID_OFFSET + PRODUCER_ID_LENGTH; 49 | pub const PRODUCER_EPOCH_LENGTH: usize = 2; 50 | pub const BASE_SEQUENCE_OFFSET: usize = PRODUCER_EPOCH_OFFSET + PRODUCER_EPOCH_LENGTH; 51 | pub const BASE_SEQUENCE_LENGTH: usize = 4; 52 | pub const RECORDS_COUNT_OFFSET: usize = BASE_SEQUENCE_OFFSET + BASE_SEQUENCE_LENGTH; 53 | pub const RECORDS_COUNT_LENGTH: usize = 4; 54 | pub const RECORDS_OFFSET: usize = RECORDS_COUNT_OFFSET + RECORDS_COUNT_LENGTH; 55 | pub const RECORD_BATCH_OVERHEAD: usize = RECORDS_OFFSET; 56 | 57 | pub const HEADER_SIZE_UP_TO_MAGIC: usize = MAGIC_OFFSET + MAGIC_LENGTH; 58 | pub const LOG_OVERHEAD: usize = LENGTH_OFFSET + LENGTH_LENGTH; 59 | -------------------------------------------------------------------------------- /kafka-api/src/records/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::{ 16 | cell::OnceCell, 17 | fmt::{Debug, Formatter}, 18 | }; 19 | 20 | use bytes::Buf; 21 | pub use consts::*; 22 | pub use mutable_records::*; 23 | pub use readonly_records::*; 24 | pub use record::*; 25 | pub use record_batch::*; 26 | 27 | use crate::bytebuffer::ByteBuffer; 28 | 29 | mod consts; 30 | mod mutable_records; 31 | mod readonly_records; 32 | mod record; 33 | mod record_batch; 34 | 35 | fn load_batches(buf: &ByteBuffer) -> Vec { 36 | let mut batches = vec![]; 37 | 38 | let mut offset = 0; 39 | let mut remaining = buf.len() - offset; 40 | while remaining > 0 { 41 | assert!( 42 | remaining >= HEADER_SIZE_UP_TO_MAGIC, 43 | "no enough bytes when decode records (remaining: {})", 44 | remaining 45 | ); 46 | 47 | let record_size = (&buf[LENGTH_OFFSET..]).get_i32(); 48 | let batch_size = record_size as usize + LOG_OVERHEAD; 49 | 50 | assert!( 51 | remaining >= batch_size, 52 | "no enough bytes when decode records (remaining: {})", 53 | remaining 54 | ); 55 | 56 | let record = match (&buf[MAGIC_OFFSET..]).get_i8() { 57 | 2 => { 58 | let buf = buf.slice(offset..offset + batch_size); 59 | offset += batch_size; 60 | remaining -= batch_size; 61 | RecordBatch { buf } 62 | } 63 | v => unimplemented!("record batch version {}", v), 64 | }; 65 | 66 | batches.push(record); 67 | } 68 | 69 | batches 70 | } 71 | -------------------------------------------------------------------------------- /kafka-api/src/records/mutable_records.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::{ 16 | cell::OnceCell, 17 | fmt::{Debug, Formatter}, 18 | }; 19 | 20 | use tracing::warn; 21 | 22 | use crate::{bytebuffer::ByteBuffer, records::*}; 23 | 24 | #[derive(Default)] 25 | pub struct MutableRecords { 26 | buf: ByteBuffer, 27 | batches: OnceCell>, 28 | } 29 | 30 | impl Clone for MutableRecords { 31 | /// ATTENTION - Cloning Records is a heavy operation. 32 | /// 33 | /// MutableRecords is a public struct and it has a [MutableRecords::mut_batches] method that 34 | /// modifies the underlying [ByteBuffer]. If we only do a shallow clone, then two MutableRecords 35 | /// that doesn't have any ownership overlapping can modify the same underlying bytes. 36 | /// 37 | /// Generally, MutableRecords users iterate over batches with [MutableRecords::batches] or 38 | /// [MutableRecords::mut_batches], and pass ownership instead of clone. This clone behavior is 39 | /// similar to clone a [Vec]. 40 | /// 41 | /// To produce a read-only view without copy, use [MutableRecords::freeze] instead. 42 | fn clone(&self) -> Self { 43 | warn!("Cloning mutable records will copy bytes and is not encouraged; try MutableRecords::freeze."); 44 | MutableRecords { 45 | buf: ByteBuffer::new(self.buf.to_vec()), 46 | batches: OnceCell::new(), 47 | } 48 | } 49 | } 50 | 51 | impl Debug for MutableRecords { 52 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 53 | Debug::fmt(self.batches(), f) 54 | } 55 | } 56 | 57 | impl MutableRecords { 58 | pub fn new(buf: ByteBuffer) -> Self { 59 | let batches = OnceCell::new(); 60 | MutableRecords { buf, batches } 61 | } 62 | 63 | pub fn freeze(self) -> ReadOnlyRecords { 64 | ReadOnlyRecords::ByteBuffer(ByteBufferRecords::new(self.buf)) 65 | } 66 | 67 | pub fn as_bytes(&self) -> &[u8] { 68 | self.buf.as_bytes() 69 | } 70 | 71 | pub fn mut_batches(&mut self) -> &mut [RecordBatch] { 72 | self.batches.get_or_init(|| load_batches(&self.buf)); 73 | // SAFETY - init above 74 | unsafe { self.batches.get_mut().unwrap_unchecked() } 75 | } 76 | 77 | pub fn batches(&self) -> &[RecordBatch] { 78 | self.batches.get_or_init(|| load_batches(&self.buf)) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /kafka-api/src/records/readonly_records.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::records::*; 18 | 19 | #[derive(Debug, Default, Clone)] 20 | pub enum ReadOnlyRecords { 21 | #[default] 22 | None, 23 | ByteBuffer(ByteBufferRecords), 24 | } 25 | 26 | impl ReadOnlyRecords { 27 | pub fn size(&self) -> usize { 28 | match self { 29 | ReadOnlyRecords::None => 0, 30 | ReadOnlyRecords::ByteBuffer(r) => r.buf.len(), 31 | } 32 | } 33 | 34 | pub fn batches(&self) -> &[RecordBatch] { 35 | match self { 36 | ReadOnlyRecords::None => &[], 37 | ReadOnlyRecords::ByteBuffer(r) => r.batches(), 38 | } 39 | } 40 | 41 | pub fn write_to(&self, writer: &mut W) -> io::Result<()> { 42 | match self { 43 | ReadOnlyRecords::None => writer.write_all(&[]), 44 | ReadOnlyRecords::ByteBuffer(r) => writer.write_all(r.buf.as_bytes()), 45 | } 46 | } 47 | } 48 | 49 | #[derive(Default)] 50 | pub struct ByteBufferRecords { 51 | buf: ByteBuffer, 52 | batches: OnceCell>, 53 | } 54 | 55 | impl Clone for ByteBufferRecords { 56 | fn clone(&self) -> Self { 57 | ByteBufferRecords { 58 | buf: self.buf.clone(), 59 | batches: OnceCell::new(), 60 | } 61 | } 62 | } 63 | 64 | impl Debug for ByteBufferRecords { 65 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 66 | Debug::fmt(self.batches(), f) 67 | } 68 | } 69 | 70 | impl ByteBufferRecords { 71 | pub(super) fn new(buf: ByteBuffer) -> ByteBufferRecords { 72 | let batches = OnceCell::new(); 73 | ByteBufferRecords { buf, batches } 74 | } 75 | 76 | fn batches(&self) -> &[RecordBatch] { 77 | self.batches.get_or_init(|| load_batches(&self.buf)) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /kafka-api/src/records/record.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use crate::bytebuffer::ByteBuffer; 16 | 17 | #[derive(Debug, Default, Clone)] 18 | pub struct Record { 19 | pub len: i32, // varint 20 | /// bit 0~7: unused 21 | pub attributes: i8, 22 | pub timestamp_delta: i64, // varlong 23 | pub offset_delta: i32, // varint 24 | pub key_len: i32, // varint 25 | pub key: Option, 26 | pub value_len: i32, // varint 27 | pub value: Option, 28 | pub headers: Vec
, 29 | } 30 | 31 | #[derive(Debug, Default, Clone)] 32 | pub struct Header { 33 | pub key_len: i32, // varint 34 | pub key: Option, 35 | pub value_len: i32, // varint 36 | pub value: Option, 37 | } 38 | 39 | #[derive(Debug, Clone, Copy)] 40 | pub enum TimestampType { 41 | CreateTime, 42 | LogAppendTime, 43 | } 44 | 45 | #[derive(Debug, Default, Clone, Copy)] 46 | pub enum CompressionType { 47 | #[default] 48 | None, 49 | Gzip, 50 | Snappy, 51 | Lz4, 52 | Zstd, 53 | } 54 | 55 | impl From for CompressionType { 56 | fn from(ty: u8) -> Self { 57 | match ty { 58 | 0 => CompressionType::None, 59 | 1 => CompressionType::Gzip, 60 | 2 => CompressionType::Snappy, 61 | 3 => CompressionType::Lz4, 62 | 4 => CompressionType::Zstd, 63 | _ => unreachable!("Unknown compression type id: {}", ty), 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /kafka-api/src/records/record_batch.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::fmt::{Debug, Formatter}; 16 | 17 | use bytes::{Buf, BufMut}; 18 | 19 | use crate::{ 20 | bytebuffer::ByteBuffer, 21 | codec::{Decoder, RecordList}, 22 | records::*, 23 | }; 24 | 25 | #[derive(Default)] 26 | pub struct RecordBatch { 27 | pub(super) buf: ByteBuffer, 28 | } 29 | 30 | impl Debug for RecordBatch { 31 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 32 | let mut de = f.debug_struct("RecordBatch"); 33 | de.field("magic", &self.magic()); 34 | de.field("offset", &(self.base_offset()..=self.last_offset())); 35 | de.field("sequence", &(self.base_sequence()..=self.last_sequence())); 36 | de.field("is_transactional", &self.is_transactional()); 37 | de.field("is_control_batch", &self.is_control_batch()); 38 | de.field("compression_type", &self.compression_type()); 39 | de.field("timestamp_type", &self.timestamp_type()); 40 | de.field("crc", &self.checksum()); 41 | de.field("records_count", &self.records_count()); 42 | de.field("records", &self.records()); 43 | de.finish() 44 | } 45 | } 46 | 47 | /// Similar to [i32::wrapping_add], but wrap to `0` instead of [i32::MIN]. 48 | pub fn increment_sequence(sequence: i32, increment: i32) -> i32 { 49 | if sequence > i32::MAX - increment { 50 | increment - (i32::MAX - sequence) - 1 51 | } else { 52 | sequence + increment 53 | } 54 | } 55 | 56 | /// Similar to [i32::wrapping_add], but wrap at `0` instead of [i32::MIN]. 57 | pub fn decrement_sequence(sequence: i32, decrement: i32) -> i32 { 58 | if sequence < decrement { 59 | i32::MAX - (decrement - sequence) + 1 60 | } else { 61 | sequence - decrement 62 | } 63 | } 64 | 65 | impl RecordBatch { 66 | pub fn set_last_offset(&mut self, offset: i64) { 67 | let base_offset = offset - self.last_offset_delta() as i64; 68 | self.buf 69 | .mut_slice_in(BASE_OFFSET_OFFSET..) 70 | .put_i64(base_offset); 71 | } 72 | 73 | pub fn set_partition_leader_epoch(&mut self, epoch: i32) { 74 | self.buf 75 | .mut_slice_in(PARTITION_LEADER_EPOCH_OFFSET..) 76 | .put_i32(epoch); 77 | } 78 | 79 | pub fn magic(&self) -> i8 { 80 | (&self.buf[MAGIC_OFFSET..]).get_i8() 81 | } 82 | 83 | pub fn base_offset(&self) -> i64 { 84 | (&self.buf[BASE_OFFSET_OFFSET..]).get_i64() 85 | } 86 | 87 | pub fn last_offset(&self) -> i64 { 88 | self.base_offset() + self.last_offset_delta() as i64 89 | } 90 | 91 | pub fn base_sequence(&self) -> i32 { 92 | (&self.buf[BASE_SEQUENCE_OFFSET..]).get_i32() 93 | } 94 | 95 | pub fn last_sequence(&self) -> i32 { 96 | match self.base_sequence() { 97 | NO_SEQUENCE => NO_SEQUENCE, 98 | seq => increment_sequence(seq, self.last_offset_delta()), 99 | } 100 | } 101 | 102 | fn last_offset_delta(&self) -> i32 { 103 | (&self.buf[LAST_OFFSET_DELTA_OFFSET..]).get_i32() 104 | } 105 | 106 | pub fn max_timestamp(&self) -> i64 { 107 | (&self.buf[MAX_TIMESTAMP_OFFSET..]).get_i64() 108 | } 109 | 110 | pub fn records_count(&self) -> i32 { 111 | (&self.buf[RECORDS_COUNT_OFFSET..]).get_i32() 112 | } 113 | 114 | pub fn records(&self) -> Vec { 115 | let mut records = self.buf.slice(RECORDS_COUNT_OFFSET..); 116 | RecordList.decode(&mut records).expect("malformed records") 117 | } 118 | 119 | pub fn checksum(&self) -> u32 { 120 | (&self.buf[CRC_OFFSET..]).get_u32() 121 | } 122 | 123 | pub fn is_transactional(&self) -> bool { 124 | self.attributes() & TRANSACTIONAL_FLAG_MASK > 0 125 | } 126 | 127 | pub fn is_control_batch(&self) -> bool { 128 | self.attributes() & CONTROL_FLAG_MASK > 0 129 | } 130 | 131 | pub fn timestamp_type(&self) -> TimestampType { 132 | if self.attributes() & TIMESTAMP_TYPE_MASK != 0 { 133 | TimestampType::LogAppendTime 134 | } else { 135 | TimestampType::CreateTime 136 | } 137 | } 138 | 139 | pub fn compression_type(&self) -> CompressionType { 140 | (self.attributes() & COMPRESSION_CODEC_MASK).into() 141 | } 142 | 143 | pub fn delete_horizon_ms(&self) -> Option { 144 | if self.has_delete_horizon_ms() { 145 | Some((&self.buf[BASE_TIMESTAMP_OFFSET..]).get_i64()) 146 | } else { 147 | None 148 | } 149 | } 150 | 151 | fn has_delete_horizon_ms(&self) -> bool { 152 | self.attributes() & DELETE_HORIZON_FLAG_MASK > 0 153 | } 154 | 155 | // note we're not using the second byte of attributes 156 | fn attributes(&self) -> u8 { 157 | (&self.buf[ATTRIBUTES_OFFSET..]).get_u16() as u8 158 | } 159 | } 160 | 161 | #[cfg(test)] 162 | mod tests { 163 | use std::io; 164 | 165 | use super::*; 166 | use crate::records::MutableRecords; 167 | 168 | const RECORD: &[u8] = &[ 169 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // first offset 170 | 0x0, 0x0, 0x0, 0x52, // record batch size 171 | 0xFF, 0xFF, 0xFF, 0xFF, // partition leader epoch 172 | 0x2, // magic byte 173 | 0xE2, 0x3F, 0xC9, 0x74, // crc 174 | 0x0, 0x0, // attributes 175 | 0x0, 0x0, 0x0, 0x0, // last offset delta 176 | 0x0, 0x0, 0x1, 0x89, 0xAF, 0x78, 0x40, 0x72, // base timestamp 177 | 0x0, 0x0, 0x1, 0x89, 0xAF, 0x78, 0x40, 0x72, // max timestamp 178 | 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, // producer ID 179 | 0x0, 0x0, // producer epoch 180 | 0x0, 0x0, 0x0, 0x0, // base sequence 181 | 0x0, 0x0, 0x0, 0x1, // record counts 182 | 0x40, // first record size 183 | 0x0, // attribute 184 | 0x0, // timestamp delta 185 | 0x0, // offset delta 186 | 0x1, // key length (zigzag : -1) 187 | // empty key payload 188 | 0x34, // value length (zigzag : 26) 189 | 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x72, 190 | 0x73, 0x74, 0x20, 0x6D, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2E, // value payload 191 | 0x0, // header counts 192 | ]; 193 | 194 | #[test] 195 | fn test_codec_records() -> io::Result<()> { 196 | let records = MutableRecords::new(ByteBuffer::new(RECORD.to_vec())); 197 | let record_batches = records.batches(); 198 | assert_eq!(record_batches.len(), 1); 199 | let record_batch = &record_batches[0]; 200 | assert_eq!(record_batch.records_count(), 1); 201 | let record_vec = record_batch.records(); 202 | assert_eq!(record_vec.len(), 1); 203 | let record = &record_vec[0]; 204 | assert_eq!(record.key_len, -1); 205 | assert_eq!(record.key, None); 206 | assert_eq!(record.value_len, 26); 207 | assert_eq!( 208 | record.value.as_deref().map(String::from_utf8_lossy), 209 | Some("This is the first message.".into()) 210 | ); 211 | Ok(()) 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/api_versions_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null}; 18 | 19 | // Versions 0 through 2 of ApiVersionsRequest are the same. 20 | // 21 | // Version 3 is the first flexible version and adds ClientSoftwareName and ClientSoftwareVersion. 22 | 23 | #[derive(Debug, Default, Clone)] 24 | pub struct ApiVersionsRequest { 25 | /// The name of the client. 26 | pub client_software_name: String, 27 | /// The version of the client. 28 | pub client_software_version: String, 29 | /// Unknown tagged fields. 30 | pub unknown_tagged_fields: Vec, 31 | } 32 | 33 | impl Deserializable for ApiVersionsRequest { 34 | fn read(buf: &mut B, version: i16) -> io::Result { 35 | let mut this = ApiVersionsRequest::default(); 36 | if version >= 3 { 37 | this.client_software_name = NullableString(true) 38 | .decode(buf)? 39 | .ok_or_else(|| err_decode_message_null("client_software_name"))?; 40 | } 41 | if version >= 3 { 42 | this.client_software_version = NullableString(true) 43 | .decode(buf)? 44 | .ok_or_else(|| err_decode_message_null("client_software_version"))?; 45 | } 46 | if version >= 3 { 47 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 48 | } 49 | Ok(this) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/api_versions_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported}; 18 | 19 | // Version 1 adds throttle time to the response. 20 | // 21 | // Starting in version 2, on quota violation, brokers send out responses before throttling. 22 | // 23 | // Version 3 is the first flexible version. Tagged fields are only supported in the body but 24 | // not in the header. The length of the header must not change in order to guarantee the 25 | // backward compatibility. 26 | // 27 | // Starting from Apache Kafka 2.4 (KIP-511), ApiKeys field is populated with the supported 28 | // versions of the ApiVersionsRequest when an UNSUPPORTED_VERSION error is returned. 29 | 30 | #[derive(Debug, Default, Clone)] 31 | pub struct ApiVersionsResponse { 32 | /// The top-level error code. 33 | pub error_code: i16, 34 | /// The APIs supported by the broker. 35 | pub api_keys: Vec, 36 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 37 | /// or zero if the request did not violate any quota. 38 | pub throttle_time_ms: i32, 39 | /// Features supported by the broker. 40 | pub supported_features: Vec, 41 | /// The monotonically increasing epoch for the finalized features information. Valid values are 42 | /// >= 0. A value of -1 is special and represents unknown epoch. 43 | pub finalized_features_epoch: i64, 44 | /// List of cluster-wide finalized features. The information is valid only if 45 | /// FinalizedFeaturesEpoch >= 0. 46 | pub finalized_features: Vec, 47 | /// Set by a KRaft controller if the required configurations for ZK migration are present. 48 | pub zk_migration_ready: bool, 49 | /// Unknown tagged fields. 50 | pub unknown_tagged_fields: Vec, 51 | } 52 | 53 | impl Serializable for ApiVersionsResponse { 54 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 55 | Int16.encode(buf, self.error_code)?; 56 | NullableArray(Struct(version), version >= 3).encode(buf, self.api_keys.as_slice())?; 57 | if version >= 1 { 58 | Int32.encode(buf, self.throttle_time_ms)?; 59 | } 60 | if version >= 3 { 61 | RawTaggedFieldList.encode_with(buf, 3, &self.unknown_tagged_fields, |buf| { 62 | RawTaggedFieldWriter.write_field( 63 | buf, 64 | 0, 65 | NullableArray(Struct(version), version >= 3), 66 | self.supported_features.as_slice(), 67 | )?; 68 | RawTaggedFieldWriter.write_field(buf, 1, Int64, self.finalized_features_epoch)?; 69 | RawTaggedFieldWriter.write_field( 70 | buf, 71 | 2, 72 | NullableArray(Struct(version), version >= 3), 73 | self.finalized_features.as_slice(), 74 | )?; 75 | Ok(()) 76 | })?; 77 | } 78 | Ok(()) 79 | } 80 | 81 | fn calculate_size(&self, version: i16) -> usize { 82 | let mut res = 0; 83 | res += Int16::SIZE; //self.error_code 84 | res += 85 | NullableArray(Struct(version), version >= 3).calculate_size(self.api_keys.as_slice()); 86 | if version >= 1 { 87 | res += Int32::SIZE; // self.throttle_time_ms 88 | } 89 | if version >= 3 { 90 | res += RawTaggedFieldList.calculate_size_with( 91 | 3, 92 | RawTaggedFieldWriter.calculate_field_size( 93 | 0, 94 | NullableArray(Struct(version), version >= 3), 95 | self.supported_features.as_slice(), 96 | ) + RawTaggedFieldWriter.calculate_field_size( 97 | 1, 98 | Int64, 99 | &self.finalized_features_epoch, 100 | ) + RawTaggedFieldWriter.calculate_field_size( 101 | 2, 102 | NullableArray(Struct(version), version >= 3), 103 | self.finalized_features.as_slice(), 104 | ), 105 | &self.unknown_tagged_fields, 106 | ); 107 | } 108 | res 109 | } 110 | } 111 | 112 | #[derive(Debug, Default, Clone)] 113 | pub struct ApiVersion { 114 | /// The API index. 115 | pub api_key: i16, 116 | /// The minimum supported version, inclusive. 117 | pub min_version: i16, 118 | /// The maximum supported version, inclusive. 119 | pub max_version: i16, 120 | /// Unknown tagged fields. 121 | pub unknown_tagged_fields: Vec, 122 | } 123 | 124 | impl Serializable for ApiVersion { 125 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 126 | Int16.encode(buf, self.api_key)?; 127 | Int16.encode(buf, self.min_version)?; 128 | Int16.encode(buf, self.max_version)?; 129 | if version >= 3 { 130 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 131 | } 132 | Ok(()) 133 | } 134 | 135 | fn calculate_size(&self, version: i16) -> usize { 136 | let mut res = 0; 137 | res += Int16::SIZE; // self.api_key 138 | res += Int16::SIZE; // self.min_version 139 | res += Int16::SIZE; // self.max_version 140 | if version >= 3 { 141 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 142 | } 143 | res 144 | } 145 | } 146 | 147 | #[derive(Debug, Default, Clone)] 148 | pub struct SupportedFeatureKey { 149 | /// The name of the feature. 150 | pub name: String, 151 | /// The minimum supported version for the feature. 152 | pub min_version: i16, 153 | /// The maximum supported version for the feature. 154 | pub max_version: i16, 155 | /// Unknown tagged fields. 156 | pub unknown_tagged_fields: Vec, 157 | } 158 | 159 | impl Serializable for SupportedFeatureKey { 160 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 161 | if version > 3 { 162 | Err(err_encode_message_unsupported( 163 | version, 164 | "SupportedFeatureKey", 165 | ))? 166 | } 167 | NullableString(true).encode(buf, self.name.as_ref())?; 168 | Int16.encode(buf, self.min_version)?; 169 | Int16.encode(buf, self.max_version)?; 170 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 171 | Ok(()) 172 | } 173 | 174 | fn calculate_size(&self, _version: i16) -> usize { 175 | let mut res = 0; 176 | res += NullableString(true).calculate_size(self.name.as_ref()); 177 | res += Int16::SIZE; // self.min_version 178 | res += Int16::SIZE; // self.max_version 179 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 180 | res 181 | } 182 | } 183 | 184 | #[derive(Debug, Default, Clone)] 185 | pub struct FinalizedFeatureKey { 186 | /// The name of the feature. 187 | pub name: String, 188 | /// The cluster-wide finalized max version level for the feature. 189 | pub max_version_level: i16, 190 | /// The cluster-wide finalized min version level for the feature. 191 | pub min_version_level: i16, 192 | /// Unknown tagged fields. 193 | pub unknown_tagged_fields: Vec, 194 | } 195 | 196 | impl Serializable for FinalizedFeatureKey { 197 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 198 | if version > 3 { 199 | Err(err_encode_message_unsupported( 200 | version, 201 | "FinalizedFeatureKey", 202 | ))? 203 | } 204 | NullableString(true).encode(buf, self.name.as_ref())?; 205 | Int16.encode(buf, self.max_version_level)?; 206 | Int16.encode(buf, self.min_version_level)?; 207 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 208 | Ok(()) 209 | } 210 | 211 | fn calculate_size(&self, _version: i16) -> usize { 212 | let mut res = 0; 213 | res += NullableString(true).calculate_size(self.name.as_ref()); 214 | res += Int16::SIZE; // self.max_version_level 215 | res += Int16::SIZE; // self.min_version_level 216 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 217 | res 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/create_topic_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null}; 18 | 19 | // Version 1 adds validateOnly. 20 | // 21 | // Version 4 makes partitions/replicationFactor optional even when assignments are not present 22 | // (KIP-464) 23 | // 24 | // Version 5 is the first flexible version. 25 | // Version 5 also returns topic configs in the response (KIP-525). 26 | // 27 | // Version 6 is identical to version 5 but may return a THROTTLING_QUOTA_EXCEEDED error 28 | // in the response if the topics creation is throttled (KIP-599). 29 | // 30 | // Version 7 is the same as version 6. 31 | 32 | #[derive(Debug, Default, Clone)] 33 | pub struct CreateTopicsRequest { 34 | /// The topics to create. 35 | pub topics: Vec, 36 | /// How long to wait in milliseconds before timing out the request. 37 | pub timeout_ms: i32, 38 | /// If true, check that the topics can be created as specified, but don't create anything. 39 | pub validate_only: bool, 40 | /// Unknown tagged fields. 41 | pub unknown_tagged_fields: Vec, 42 | } 43 | 44 | impl Deserializable for CreateTopicsRequest { 45 | fn read(buf: &mut B, version: i16) -> io::Result { 46 | let mut res = CreateTopicsRequest { 47 | topics: NullableArray(Struct(version), version >= 5) 48 | .decode(buf)? 49 | .ok_or_else(|| err_decode_message_null("name"))?, 50 | timeout_ms: Int32.decode(buf)?, 51 | ..Default::default() 52 | }; 53 | if version >= 1 { 54 | res.validate_only = Bool.decode(buf)?; 55 | } 56 | if version >= 5 { 57 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 58 | } 59 | Ok(res) 60 | } 61 | } 62 | 63 | #[derive(Debug, Default, Clone)] 64 | pub struct CreatableTopic { 65 | /// The topic name. 66 | pub name: String, 67 | /// The number of partitions to create in the topic, or -1 if we are either specifying a manual 68 | /// partition assignment or using the default partitions. 69 | pub num_partitions: i32, 70 | /// The number of replicas to create for each partition in the topic, or -1 if we are either 71 | /// specifying a manual partition assignment or using the default replication factor. 72 | pub replication_factor: i16, 73 | /// The manual partition assignment, or the empty array if we are using automatic assignment. 74 | pub assignments: Vec, 75 | /// The custom topic configurations to set. 76 | pub configs: Vec, 77 | /// Unknown tagged fields. 78 | pub unknown_tagged_fields: Vec, 79 | } 80 | 81 | impl Deserializable for CreatableTopic { 82 | fn read(buf: &mut B, version: i16) -> io::Result { 83 | let mut res = CreatableTopic { 84 | name: NullableString(version >= 5) 85 | .decode(buf)? 86 | .ok_or_else(|| err_decode_message_null("name"))?, 87 | num_partitions: Int32.decode(buf)?, 88 | replication_factor: Int16.decode(buf)?, 89 | assignments: NullableArray(Struct(version), version >= 5) 90 | .decode(buf)? 91 | .ok_or_else(|| err_decode_message_null("assignments"))?, 92 | configs: NullableArray(Struct(version), version >= 5) 93 | .decode(buf)? 94 | .ok_or_else(|| err_decode_message_null("assignments"))?, 95 | ..Default::default() 96 | }; 97 | if version >= 5 { 98 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 99 | } 100 | Ok(res) 101 | } 102 | } 103 | 104 | #[derive(Debug, Default, Clone)] 105 | pub struct CreatableTopicConfig { 106 | /// The configuration name. 107 | pub name: String, 108 | /// The configuration value. 109 | pub value: Option, 110 | /// Unknown tagged fields. 111 | pub unknown_tagged_fields: Vec, 112 | } 113 | 114 | impl Deserializable for CreatableTopicConfig { 115 | fn read(buf: &mut B, version: i16) -> io::Result { 116 | let mut res = CreatableTopicConfig { 117 | name: NullableString(version >= 5) 118 | .decode(buf)? 119 | .ok_or_else(|| err_decode_message_null("name"))?, 120 | value: NullableString(version >= 5).decode(buf)?, 121 | ..Default::default() 122 | }; 123 | if version >= 5 { 124 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 125 | } 126 | Ok(res) 127 | } 128 | } 129 | 130 | #[derive(Debug, Default, Clone)] 131 | pub struct CreatableReplicaAssignment { 132 | /// The partition index. 133 | pub partition_index: i32, 134 | /// The brokers to place the partition on. 135 | pub broker_ids: Vec, 136 | /// Unknown tagged fields. 137 | pub unknown_tagged_fields: Vec, 138 | } 139 | 140 | impl Deserializable for CreatableReplicaAssignment { 141 | fn read(buf: &mut B, version: i16) -> io::Result { 142 | let mut res = CreatableReplicaAssignment { 143 | partition_index: Int32.decode(buf)?, 144 | ..Default::default() 145 | }; 146 | res.broker_ids = NullableArray(Int32, version >= 5) 147 | .decode(buf)? 148 | .ok_or_else(|| err_decode_message_null("broker_ids"))?; 149 | if version >= 5 { 150 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 151 | } 152 | Ok(res) 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/create_topic_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported}; 18 | 19 | // Version 1 adds a per-topic error message string. 20 | // 21 | // Version 2 adds the throttle time. 22 | // 23 | // Starting in version 3, on quota violation, brokers send out responses before throttling. 24 | // 25 | // Version 4 makes partitions/replicationFactor optional even when assignments are not present 26 | // (KIP-464). 27 | // 28 | // Version 5 is the first flexible version. 29 | // Version 5 also returns topic configs in the response (KIP-525). 30 | // 31 | // Version 6 is identical to version 5 but may return a THROTTLING_QUOTA_EXCEEDED error 32 | // in the response if the topics creation is throttled (KIP-599). 33 | // 34 | // Version 7 returns the topic ID of the newly created topic if creation is successful. 35 | 36 | #[derive(Debug, Default, Clone)] 37 | pub struct CreateTopicsResponse { 38 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 39 | /// or zero if the request did not violate any quota. 40 | pub throttle_time_ms: i32, 41 | /// Results for each topic we tried to create. 42 | pub topics: Vec, 43 | /// Unknown tagged fields. 44 | pub unknown_tagged_fields: Vec, 45 | } 46 | 47 | impl Serializable for CreateTopicsResponse { 48 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 49 | if version >= 2 { 50 | Int32.encode(buf, self.throttle_time_ms)?; 51 | } 52 | NullableArray(Struct(version), version >= 5).encode(buf, self.topics.as_slice())?; 53 | if version >= 5 { 54 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 55 | } 56 | Ok(()) 57 | } 58 | 59 | fn calculate_size(&self, version: i16) -> usize { 60 | let mut res = 0; 61 | if version >= 2 { 62 | res += Int32::SIZE; // self.throttle_time_ms 63 | } 64 | res += NullableArray(Struct(version), version >= 5).calculate_size(self.topics.as_slice()); 65 | if version >= 5 { 66 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 67 | } 68 | res 69 | } 70 | } 71 | 72 | #[derive(Debug, Default, Clone)] 73 | pub struct CreatableTopicResult { 74 | /// The topic name. 75 | pub name: String, 76 | /// The unique topic ID 77 | pub topic_id: uuid::Uuid, 78 | /// The error code, or 0 if there was no error. 79 | pub error_code: i16, 80 | /// The error message, or null if there was no error. 81 | pub error_message: Option, 82 | /// Optional topic config error returned if configs are not returned in the response. 83 | pub topic_config_error_code: i16, 84 | /// Number of partitions of the topic. 85 | pub num_partitions: i32, 86 | /// Replication factor of the topic. 87 | pub replication_factor: i16, 88 | /// Configuration of the topic. 89 | pub configs: Vec, 90 | /// Unknown tagged fields. 91 | pub unknown_tagged_fields: Vec, 92 | } 93 | 94 | impl Serializable for CreatableTopicResult { 95 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 96 | NullableString(version >= 5).encode(buf, self.name.as_str())?; 97 | if version >= 7 { 98 | Uuid.encode(buf, self.topic_id)?; 99 | } 100 | Int16.encode(buf, self.error_code)?; 101 | if version >= 1 { 102 | NullableString(version >= 5).encode(buf, self.error_message.as_deref())?; 103 | } 104 | if version >= 5 { 105 | Int32.encode(buf, self.num_partitions)?; 106 | Int16.encode(buf, self.replication_factor)?; 107 | NullableArray(Struct(version), true).encode(buf, self.configs.as_slice())?; 108 | } 109 | if version >= 5 { 110 | RawTaggedFieldList.encode_with(buf, 1, &self.unknown_tagged_fields, |buf| { 111 | RawTaggedFieldWriter.write_field(buf, 0, Int16, self.topic_config_error_code)?; 112 | Ok(()) 113 | })?; 114 | } 115 | Ok(()) 116 | } 117 | 118 | fn calculate_size(&self, version: i16) -> usize { 119 | let mut res = 0; 120 | res += NullableString(version >= 5).calculate_size(self.name.as_str()); 121 | if version >= 7 { 122 | res += Uuid::SIZE; // self.topic_id 123 | } 124 | res += Int16::SIZE; // self.error_code 125 | if version >= 1 { 126 | res += NullableString(version >= 5).calculate_size(self.error_message.as_deref()); 127 | } 128 | if version >= 5 { 129 | res += Int32::SIZE; // self.num_partitions 130 | res += Int16::SIZE; // self.replication_factor 131 | res += NullableArray(Struct(version), true).calculate_size(self.configs.as_slice()); 132 | } 133 | if version >= 5 { 134 | res += RawTaggedFieldList.calculate_size_with( 135 | 1, 136 | RawTaggedFieldWriter.calculate_field_size(0, Int16, &self.topic_config_error_code), 137 | &self.unknown_tagged_fields, 138 | ); 139 | } 140 | res 141 | } 142 | } 143 | 144 | #[derive(Debug, Default, Clone)] 145 | pub struct CreatableTopicConfigs { 146 | /// The configuration name. 147 | pub name: String, 148 | /// The configuration value. 149 | pub value: Option, 150 | /// True if the configuration is read-only. 151 | pub read_only: bool, 152 | /// The configuration source. 153 | pub config_source: i8, 154 | /// True if this configuration is sensitive. 155 | pub is_sensitive: bool, 156 | /// Unknown tagged fields. 157 | pub unknown_tagged_fields: Vec, 158 | } 159 | 160 | impl Serializable for CreatableTopicConfigs { 161 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 162 | if version < 5 { 163 | Err(err_encode_message_unsupported( 164 | version, 165 | "CreatableTopicConfigs", 166 | ))? 167 | } 168 | NullableString(true).encode(buf, self.name.as_str())?; 169 | NullableString(true).encode(buf, self.value.as_deref())?; 170 | Bool.encode(buf, self.read_only)?; 171 | Int8.encode(buf, self.config_source)?; 172 | Bool.encode(buf, self.is_sensitive)?; 173 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 174 | Ok(()) 175 | } 176 | 177 | fn calculate_size(&self, _version: i16) -> usize { 178 | let mut res = 0; 179 | res += NullableString(true).calculate_size(self.name.as_str()); 180 | res += NullableString(true).calculate_size(self.value.as_deref()); 181 | res += Bool::SIZE; // self.read_only 182 | res += Int8::SIZE; // self.config_source 183 | res += Bool::SIZE; // self.is_sensitive 184 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 185 | res 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/fetch_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null, err_decode_message_unsupported}; 18 | 19 | // Version 1 is the same as version 0. 20 | // 21 | // Starting in Version 2, the requester must be able to handle Kafka Log 22 | // Message format version 1. 23 | // 24 | // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in 25 | // the request is now relevant. Partitions will be processed in the order 26 | // they appear in the request. 27 | // 28 | // Version 4 adds IsolationLevel. Starting in version 4, the requester must be 29 | // able to handle Kafka log message format version 2. 30 | // 31 | // Version 5 adds LogStartOffset to indicate the earliest available offset of 32 | // partition data that can be consumed. 33 | // 34 | // Version 6 is the same as version 5. 35 | // 36 | // Version 7 adds incremental fetch request support. 37 | // 38 | // Version 8 is the same as version 7. 39 | // 40 | // Version 9 adds CurrentLeaderEpoch, as described in KIP-320. 41 | // 42 | // Version 10 indicates that we can use the ZStd compression algorithm, as 43 | // described in KIP-110. 44 | // Version 12 adds flexible versions support as well as epoch validation through 45 | // the `LastFetchedEpoch` field 46 | // 47 | // Version 13 replaces topic names with topic IDs (KIP-516). May return UNKNOWN_TOPIC_ID error code. 48 | // 49 | // Version 14 is the same as version 13 but it also receives a new error called 50 | // OffsetMovedToTieredStorageException(KIP-405) 51 | // 52 | // Version 15 adds the ReplicaState which includes new field ReplicaEpoch and the ReplicaId. Also, 53 | // deprecate the old ReplicaId field and set its default value to -1. (KIP-903) 54 | 55 | #[derive(Debug, Default, Clone)] 56 | pub struct FetchRequest { 57 | /// The clusterId if known. This is used to validate metadata fetches prior to broker 58 | /// registration. 59 | pub cluster_id: Option, 60 | /// The broker ID of the follower, of -1 if this request is from a consumer. 61 | pub replica_id: i32, 62 | pub replica_state: ReplicaState, 63 | /// The maximum time in milliseconds to wait for the response. 64 | pub max_wait_ms: i32, 65 | /// The minimum bytes to accumulate in the response. 66 | pub min_bytes: i32, 67 | /// The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored. 68 | pub max_bytes: i32, 69 | /// This setting controls the visibility of transactional records. Using READ_UNCOMMITTED 70 | /// (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), 71 | /// non-transactional and COMMITTED transactional records are visible. To be more concrete, 72 | /// READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable 73 | /// offset), and enables the inclusion of the list of aborted transactions in the result, which 74 | /// allows consumers to discard ABORTED transactional records 75 | pub isolation_level: i8, 76 | /// The fetch session ID. 77 | pub session_id: i32, 78 | /// The fetch session epoch, which is used for ordering requests in a session. 79 | pub session_epoch: i32, 80 | /// The topics to fetch. 81 | pub topics: Vec, 82 | /// In an incremental fetch request, the partitions to remove. 83 | pub forgotten_topics_data: Vec, 84 | /// Rack ID of the consumer making this request. 85 | pub rack_id: String, 86 | /// Unknown tagged fields. 87 | pub unknown_tagged_fields: Vec, 88 | } 89 | 90 | impl Deserializable for FetchRequest { 91 | fn read(buf: &mut B, version: i16) -> io::Result { 92 | let mut this = FetchRequest { 93 | replica_id: -1, 94 | max_bytes: i32::MAX, 95 | session_epoch: -1, 96 | ..Default::default() 97 | }; 98 | if version <= 14 { 99 | this.replica_id = Int32.decode(buf)? 100 | } 101 | this.max_wait_ms = Int32.decode(buf)?; 102 | this.min_bytes = Int32.decode(buf)?; 103 | if version >= 3 { 104 | this.max_bytes = Int32.decode(buf)?; 105 | } 106 | if version >= 4 { 107 | this.isolation_level = Int8.decode(buf)?; 108 | } 109 | if version >= 7 { 110 | this.session_id = Int32.decode(buf)?; 111 | } 112 | if version >= 7 { 113 | this.session_epoch = Int32.decode(buf)?; 114 | } 115 | this.topics = NullableArray(Struct(version), version >= 12) 116 | .decode(buf)? 117 | .ok_or_else(|| err_decode_message_null("topics"))?; 118 | if version >= 7 { 119 | this.forgotten_topics_data = NullableArray(Struct(version), version >= 12) 120 | .decode(buf)? 121 | .ok_or_else(|| err_decode_message_null("forgotten_topics_data"))?; 122 | } 123 | if version >= 11 { 124 | this.rack_id = NullableString(version >= 12) 125 | .decode(buf)? 126 | .ok_or_else(|| err_decode_message_null("rack_id"))?; 127 | } 128 | if version >= 12 { 129 | this.unknown_tagged_fields = 130 | RawTaggedFieldList.decode_with(buf, |buf, tag, _| match tag { 131 | 0 => { 132 | this.cluster_id = NullableString(true).decode(buf)?; 133 | Ok(true) 134 | } 135 | 1 => { 136 | if version >= 15 { 137 | this.replica_state = ReplicaState::read(buf, version)?; 138 | } 139 | Ok(true) 140 | } 141 | _ => Ok(false), 142 | })?; 143 | } 144 | Ok(this) 145 | } 146 | } 147 | 148 | #[derive(Debug, Default, Clone)] 149 | pub struct ReplicaState { 150 | /// The replica ID of the follower, or -1 if this request is from a consumer. 151 | pub replica_id: i32, 152 | /// The epoch of this follower, or -1 if not available. 153 | pub replica_epoch: i64, 154 | /// Unknown tagged fields. 155 | pub unknown_tagged_fields: Vec, 156 | } 157 | 158 | impl Deserializable for ReplicaState { 159 | fn read(buf: &mut B, version: i16) -> io::Result { 160 | if version > 15 { 161 | Err(err_decode_message_unsupported(version, "ReplicaState"))? 162 | } 163 | 164 | Ok(ReplicaState { 165 | replica_id: Int32.decode(buf)?, 166 | replica_epoch: Int64.decode(buf)?, 167 | unknown_tagged_fields: RawTaggedFieldList.decode(buf)?, 168 | }) 169 | } 170 | } 171 | 172 | #[derive(Debug, Default, Clone)] 173 | pub struct FetchTopic { 174 | /// The name of the topic to fetch. 175 | pub topic: String, 176 | /// The unique topic ID 177 | pub topic_id: uuid::Uuid, 178 | /// The partitions to fetch. 179 | pub partitions: Vec, 180 | /// Unknown tagged fields. 181 | pub unknown_tagged_fields: Vec, 182 | } 183 | 184 | impl Deserializable for FetchTopic { 185 | fn read(buf: &mut B, version: i16) -> io::Result { 186 | if version > 15 { 187 | Err(err_decode_message_unsupported(version, "FetchTopic"))? 188 | } 189 | let mut this = FetchTopic::default(); 190 | if version <= 12 { 191 | this.topic = NullableString(version >= 12) 192 | .decode(buf)? 193 | .ok_or_else(|| err_decode_message_null("topic"))?; 194 | } 195 | if version >= 13 { 196 | this.topic_id = Uuid.decode(buf)?; 197 | } 198 | this.partitions = NullableArray(Struct(version), version >= 12) 199 | .decode(buf)? 200 | .ok_or_else(|| err_decode_message_null("partitions"))?; 201 | if version >= 12 { 202 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 203 | } 204 | Ok(this) 205 | } 206 | } 207 | 208 | #[derive(Debug, Default, Clone)] 209 | pub struct FetchPartition { 210 | /// The partition index. 211 | pub partition: i32, 212 | /// The current leader epoch of the partition. 213 | pub current_leader_epoch: i32, 214 | /// The message offset. 215 | pub fetch_offset: i64, 216 | /// The epoch of the last fetched record or -1 if there is none 217 | pub last_fetched_epoch: i32, 218 | /// The earliest available offset of the follower replica. 219 | /// 220 | /// The field is only used when the request is sent by the follower. 221 | pub log_start_offset: i64, 222 | /// The maximum bytes to fetch from this partition. 223 | /// 224 | /// See KIP-74 for cases where this limit may not be honored. 225 | pub partition_max_bytes: i32, 226 | /// Unknown tagged fields. 227 | pub unknown_tagged_fields: Vec, 228 | } 229 | 230 | impl Deserializable for FetchPartition { 231 | fn read(buf: &mut B, version: i16) -> io::Result { 232 | if version > 15 { 233 | Err(err_decode_message_unsupported(version, "FetchPartition"))? 234 | } 235 | let mut this = FetchPartition { 236 | partition: Int32.decode(buf)?, 237 | ..Default::default() 238 | }; 239 | this.current_leader_epoch = if version >= 9 { Int32.decode(buf)? } else { -1 }; 240 | this.fetch_offset = Int64.decode(buf)?; 241 | this.last_fetched_epoch = if version >= 12 { 242 | Int32.decode(buf)? 243 | } else { 244 | -1 245 | }; 246 | this.log_start_offset = if version >= 5 { Int64.decode(buf)? } else { -1 }; 247 | this.partition_max_bytes = Int32.decode(buf)?; 248 | if version >= 12 { 249 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 250 | } 251 | Ok(this) 252 | } 253 | } 254 | 255 | #[derive(Debug, Default, Clone)] 256 | pub struct ForgottenTopic { 257 | /// The topic name. 258 | pub topic: String, 259 | /// The unique topic ID 260 | pub topic_id: uuid::Uuid, 261 | /// The partitions indexes to forget. 262 | pub partitions: Vec, 263 | /// Unknown tagged fields. 264 | pub unknown_tagged_fields: Vec, 265 | } 266 | 267 | impl Deserializable for ForgottenTopic { 268 | fn read(buf: &mut B, version: i16) -> io::Result { 269 | if version > 15 { 270 | Err(err_decode_message_unsupported(version, "ForgottenTopic"))? 271 | } 272 | let mut this = ForgottenTopic::default(); 273 | if version <= 12 { 274 | this.topic = NullableString(version >= 12) 275 | .decode(buf)? 276 | .ok_or_else(|| err_decode_message_null("topic"))?; 277 | } 278 | if version >= 13 { 279 | this.topic_id = Uuid.decode(buf)?; 280 | } 281 | this.partitions = NullableArray(Int32, version >= 12) 282 | .decode(buf)? 283 | .ok_or_else(|| err_decode_message_null("partitions"))?; 284 | if version >= 12 { 285 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 286 | } 287 | Ok(this) 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/fetch_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported, records::ReadOnlyRecords}; 18 | 19 | // Version 1 adds throttle time. 20 | // 21 | // Version 2 and 3 are the same as version 1. 22 | // 23 | // Version 4 adds features for transactional consumption. 24 | // 25 | // Version 5 adds LogStartOffset to indicate the earliest available offset of 26 | // partition data that can be consumed. 27 | // 28 | // Starting in version 6, we may return KAFKA_STORAGE_ERROR as an error code. 29 | // 30 | // Version 7 adds incremental fetch request support. 31 | // 32 | // Starting in version 8, on quota violation, brokers send out responses before throttling. 33 | // 34 | // Version 9 is the same as version 8. 35 | // 36 | // Version 10 indicates that the response data can use the ZStd compression 37 | // algorithm, as described in KIP-110. 38 | // Version 12 adds support for flexible versions, epoch detection through the `TruncationOffset` 39 | // field, and leader discovery through the `CurrentLeader` field 40 | // 41 | // Version 13 replaces the topic name field with topic ID (KIP-516). 42 | // 43 | // Version 14 is the same as version 13 but it also receives a new error called 44 | // OffsetMovedToTieredStorageException (KIP-405) 45 | // 46 | // Version 15 is the same as version 14 (KIP-903). 47 | 48 | #[derive(Debug, Default, Clone)] 49 | pub struct FetchResponse { 50 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 51 | /// or zero if the request did not violate any quota. 52 | pub throttle_time_ms: i32, 53 | /// The top level response error code. 54 | pub error_code: i16, 55 | /// The fetch session ID, or 0 if this is not part of a fetch session. 56 | pub session_id: i32, 57 | /// The response topics. 58 | pub responses: Vec, 59 | /// Unknown tagged fields. 60 | pub unknown_tagged_fields: Vec, 61 | } 62 | 63 | impl Serializable for FetchResponse { 64 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 65 | if version >= 1 { 66 | Int32.encode(buf, self.throttle_time_ms)?; 67 | } 68 | if version >= 7 { 69 | Int16.encode(buf, self.error_code)?; 70 | Int32.encode(buf, self.session_id)?; 71 | } 72 | NullableArray(Struct(version), version >= 12).encode(buf, self.responses.as_slice())?; 73 | if version >= 12 { 74 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 75 | } 76 | Ok(()) 77 | } 78 | 79 | fn calculate_size(&self, version: i16) -> usize { 80 | let mut res = 0; 81 | if version >= 1 { 82 | res += Int32::SIZE; // self.throttle_time_ms 83 | } 84 | if version >= 7 { 85 | res += Int16::SIZE; // self.error_code 86 | res += Int32::SIZE; // self.session_id 87 | } 88 | res += 89 | NullableArray(Struct(version), version >= 12).calculate_size(self.responses.as_slice()); 90 | if version >= 12 { 91 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 92 | } 93 | res 94 | } 95 | } 96 | 97 | #[derive(Debug, Default, Clone)] 98 | pub struct FetchableTopicResponse { 99 | /// The topic name. 100 | pub topic: String, 101 | /// The unique topic ID 102 | pub topic_id: uuid::Uuid, 103 | /// The topic partitions. 104 | pub partitions: Vec, 105 | /// Unknown tagged fields. 106 | pub unknown_tagged_fields: Vec, 107 | } 108 | 109 | impl Serializable for FetchableTopicResponse { 110 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 111 | if version <= 12 { 112 | NullableString(version >= 12).encode(buf, self.topic.as_str())?; 113 | } 114 | if version >= 13 { 115 | Uuid.encode(buf, self.topic_id)?; 116 | } 117 | NullableArray(Struct(version), version >= 12).encode(buf, self.partitions.as_slice())?; 118 | if version >= 12 { 119 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 120 | } 121 | Ok(()) 122 | } 123 | 124 | fn calculate_size(&self, version: i16) -> usize { 125 | let mut res = 0; 126 | if version <= 12 { 127 | res += NullableString(version >= 12).calculate_size(self.topic.as_str()); 128 | } 129 | if version >= 13 { 130 | res += Uuid::SIZE; // self.topic_id 131 | } 132 | res += NullableArray(Struct(version), version >= 12) 133 | .calculate_size(self.partitions.as_slice()); 134 | if version >= 12 { 135 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 136 | } 137 | res 138 | } 139 | } 140 | 141 | #[derive(Debug, Clone)] 142 | pub struct PartitionData { 143 | /// The topic name. 144 | pub partition_index: i32, 145 | /// The error code, or 0 if there was no fetch error. 146 | pub error_code: i16, 147 | /// The current high water mark. 148 | pub high_watermark: i64, 149 | /// The last stable offset (or LSO) of the partition. This is the last offset such that the 150 | /// state of all transactional records prior to this offset have been decided (ABORTED or 151 | /// COMMITTED). 152 | pub last_stable_offset: i64, 153 | /// The current log start offset. 154 | pub log_start_offset: i64, 155 | /// In case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the 156 | /// request, this field indicates the largest epoch and its end offset such that subsequent 157 | /// records are known to diverge 158 | pub diverging_epoch: Option, 159 | pub current_leader: Option, 160 | /// In the case of fetching an offset less than the LogStartOffset, this is the end offset and 161 | /// epoch that should be used in the FetchSnapshot request. 162 | pub snapshot_id: Option, 163 | /// The aborted transactions. 164 | pub aborted_transactions: Option>, 165 | /// The preferred read replica for the consumer to use on its next fetch request 166 | pub preferred_read_replica: i32, 167 | /// The record data. 168 | pub records: ReadOnlyRecords, 169 | /// Unknown tagged fields. 170 | pub unknown_tagged_fields: Vec, 171 | } 172 | 173 | impl Default for PartitionData { 174 | fn default() -> Self { 175 | PartitionData { 176 | partition_index: 0, 177 | error_code: 0, 178 | high_watermark: 0, 179 | last_stable_offset: -1, 180 | log_start_offset: -1, 181 | diverging_epoch: None, 182 | current_leader: None, 183 | snapshot_id: None, 184 | aborted_transactions: None, 185 | preferred_read_replica: -1, 186 | records: Default::default(), 187 | unknown_tagged_fields: vec![], 188 | } 189 | } 190 | } 191 | 192 | impl Serializable for PartitionData { 193 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 194 | Int32.encode(buf, self.partition_index)?; 195 | Int16.encode(buf, self.error_code)?; 196 | Int64.encode(buf, self.high_watermark)?; 197 | if version >= 4 { 198 | Int64.encode(buf, self.last_stable_offset)?; 199 | } 200 | if version >= 5 { 201 | Int64.encode(buf, self.log_start_offset)?; 202 | } 203 | if version >= 4 { 204 | NullableArray(Struct(version), version >= 12) 205 | .encode(buf, self.aborted_transactions.as_deref())?; 206 | } 207 | if version >= 11 { 208 | Int32.encode(buf, self.preferred_read_replica)?; 209 | } 210 | NullableRecords(version >= 12).encode(buf, &self.records)?; 211 | if version >= 12 { 212 | let mut n = self.diverging_epoch.is_some() as usize; 213 | n += self.current_leader.is_some() as usize; 214 | n += self.snapshot_id.is_some() as usize; 215 | RawTaggedFieldList.encode_with(buf, n, &self.unknown_tagged_fields, |buf| { 216 | if let Some(diverging_epoch) = &self.diverging_epoch { 217 | RawTaggedFieldWriter.write_field(buf, 0, Struct(version), diverging_epoch)?; 218 | } 219 | if let Some(current_leader) = &self.current_leader { 220 | RawTaggedFieldWriter.write_field(buf, 1, Struct(version), current_leader)?; 221 | } 222 | if let Some(snapshot_id) = &self.snapshot_id { 223 | RawTaggedFieldWriter.write_field(buf, 2, Struct(version), snapshot_id)?; 224 | } 225 | Ok(()) 226 | })?; 227 | } 228 | Ok(()) 229 | } 230 | 231 | fn calculate_size(&self, version: i16) -> usize { 232 | let mut res = 0; 233 | res += Int32::SIZE; // self.partition_index 234 | res += Int16::SIZE; // self.error_code 235 | res += Int64::SIZE; // self.high_watermark 236 | if version >= 4 { 237 | res += Int64::SIZE; // self.last_stable_offset 238 | } 239 | if version >= 5 { 240 | res += Int64::SIZE; // self.log_start_offset 241 | } 242 | if version >= 4 { 243 | res += NullableArray(Struct(version), version >= 12) 244 | .calculate_size(self.aborted_transactions.as_deref()); 245 | } 246 | if version >= 11 { 247 | res += Int32::SIZE; // self.preferred_read_replica 248 | } 249 | res += NullableRecords(version >= 12).calculate_size(&self.records); 250 | if version >= 12 { 251 | let mut n = 0; 252 | let mut bs = 0; 253 | if let Some(diverging_epoch) = &self.diverging_epoch { 254 | n += 1; 255 | bs += 256 | RawTaggedFieldWriter.calculate_field_size(0, Struct(version), diverging_epoch); 257 | } 258 | if let Some(current_leader) = &self.current_leader { 259 | n += 1; 260 | bs += RawTaggedFieldWriter.calculate_field_size(0, Struct(version), current_leader); 261 | } 262 | if let Some(snapshot_id) = &self.snapshot_id { 263 | n += 1; 264 | bs += RawTaggedFieldWriter.calculate_field_size(0, Struct(version), snapshot_id); 265 | } 266 | res += RawTaggedFieldList.calculate_size_with(n, bs, &self.unknown_tagged_fields); 267 | } 268 | res 269 | } 270 | } 271 | 272 | #[derive(Debug, Clone)] 273 | pub struct EpochEndOffset { 274 | pub epoch: i32, 275 | pub end_offset: i64, 276 | /// Unknown tagged fields. 277 | pub unknown_tagged_fields: Vec, 278 | } 279 | 280 | impl Default for EpochEndOffset { 281 | fn default() -> Self { 282 | EpochEndOffset { 283 | epoch: -1, 284 | end_offset: -1, 285 | unknown_tagged_fields: vec![], 286 | } 287 | } 288 | } 289 | 290 | impl Serializable for EpochEndOffset { 291 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 292 | if version < 12 { 293 | Err(err_encode_message_unsupported(version, "EpochEndOffset"))? 294 | } 295 | Int32.encode(buf, self.epoch)?; 296 | Int64.encode(buf, self.end_offset)?; 297 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 298 | Ok(()) 299 | } 300 | 301 | fn calculate_size(&self, _version: i16) -> usize { 302 | let mut res = 0; 303 | res += Int32::SIZE; // self.epoch 304 | res += Int64::SIZE; // self.end_offset 305 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 306 | res 307 | } 308 | } 309 | 310 | #[derive(Debug, Clone)] 311 | pub struct LeaderIdAndEpoch { 312 | /// The ID of the current leader or -1 if the leader is unknown. 313 | pub leader_id: i32, 314 | /// The latest known leader epoch 315 | pub leader_epoch: i32, 316 | /// Unknown tagged fields. 317 | pub unknown_tagged_fields: Vec, 318 | } 319 | 320 | impl Default for LeaderIdAndEpoch { 321 | fn default() -> Self { 322 | LeaderIdAndEpoch { 323 | leader_id: -1, 324 | leader_epoch: -1, 325 | unknown_tagged_fields: vec![], 326 | } 327 | } 328 | } 329 | 330 | impl Serializable for LeaderIdAndEpoch { 331 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 332 | if version < 12 { 333 | Err(err_encode_message_unsupported(version, "LeaderIdAndEpoch"))? 334 | } 335 | Int32.encode(buf, self.leader_id)?; 336 | Int32.encode(buf, self.leader_epoch)?; 337 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 338 | Ok(()) 339 | } 340 | 341 | fn calculate_size(&self, _version: i16) -> usize { 342 | let mut res = 0; 343 | res += Int32::SIZE; // self.leader_id 344 | res += Int32::SIZE; // self.leader_epoch 345 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 346 | res 347 | } 348 | } 349 | 350 | #[derive(Debug, Clone)] 351 | pub struct SnapshotId { 352 | pub end_offset: i64, 353 | pub epoch: i32, 354 | /// Unknown tagged fields. 355 | pub unknown_tagged_fields: Vec, 356 | } 357 | 358 | impl Default for SnapshotId { 359 | fn default() -> Self { 360 | SnapshotId { 361 | end_offset: -1, 362 | epoch: -1, 363 | unknown_tagged_fields: vec![], 364 | } 365 | } 366 | } 367 | 368 | impl Serializable for SnapshotId { 369 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 370 | if version < 12 { 371 | Err(err_encode_message_unsupported(version, "SnapshotId"))? 372 | } 373 | Int64.encode(buf, self.end_offset)?; 374 | Int32.encode(buf, self.epoch)?; 375 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 376 | Ok(()) 377 | } 378 | 379 | fn calculate_size(&self, _version: i16) -> usize { 380 | let mut res = 0; 381 | res += Int64::SIZE; // self.end_offset 382 | res += Int32::SIZE; // self.epoch 383 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 384 | res 385 | } 386 | } 387 | 388 | #[derive(Debug, Default, Clone)] 389 | pub struct AbortedTransaction { 390 | /// The producer id associated with the aborted transaction. 391 | pub producer_id: i64, 392 | /// The first offset in the aborted transaction. 393 | pub first_offset: i64, 394 | /// Unknown tagged fields. 395 | pub unknown_tagged_fields: Vec, 396 | } 397 | 398 | impl Serializable for AbortedTransaction { 399 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 400 | if version < 4 { 401 | Err(err_encode_message_unsupported( 402 | version, 403 | "AbortedTransaction", 404 | ))? 405 | } 406 | Int64.encode(buf, self.producer_id)?; 407 | Int64.encode(buf, self.first_offset)?; 408 | if version >= 12 { 409 | RawTaggedFieldList.encode(buf, self.unknown_tagged_fields.as_slice())?; 410 | } 411 | Ok(()) 412 | } 413 | 414 | fn calculate_size(&self, version: i16) -> usize { 415 | let mut res = 0; 416 | res += Int64::SIZE; // self.producer_id 417 | res += Int64::SIZE; // self.first_offset 418 | if version >= 12 { 419 | res += RawTaggedFieldList.calculate_size(self.unknown_tagged_fields.as_slice()); 420 | } 421 | res 422 | } 423 | } 424 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/find_coordinator_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null}; 18 | 19 | // Version 1 adds KeyType. 20 | // 21 | // Version 2 is the same as version 1. 22 | // 23 | // Version 3 is the first flexible version. 24 | // 25 | // Version 4 adds support for batching via CoordinatorKeys (KIP-699) 26 | 27 | #[derive(Debug, Default, Clone)] 28 | pub struct FindCoordinatorRequest { 29 | /// The coordinator key. 30 | pub key: String, 31 | /// The coordinator key type. (Group, transaction, etc.) 32 | pub key_type: i8, 33 | /// The coordinator keys. 34 | pub coordinator_keys: Vec, 35 | /// Unknown tagged fields. 36 | pub unknown_tagged_fields: Vec, 37 | } 38 | 39 | impl Deserializable for FindCoordinatorRequest { 40 | fn read(buf: &mut B, version: i16) -> io::Result { 41 | let mut this = FindCoordinatorRequest::default(); 42 | if version <= 3 { 43 | this.key = NullableString(version >= 3) 44 | .decode(buf)? 45 | .unwrap_or_default(); 46 | } 47 | if version >= 1 { 48 | this.key_type = Int8.decode(buf)?; 49 | } 50 | if version >= 4 { 51 | this.coordinator_keys = NullableArray(NullableString(true), true) 52 | .decode(buf)? 53 | .unwrap_or_default() 54 | .into_iter() 55 | .map(|key| key.ok_or_else(|| err_decode_message_null("coordinatorKeys element"))) 56 | .collect::>>()?; 57 | } 58 | if version >= 3 { 59 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 60 | } 61 | Ok(this) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/find_coordinator_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported}; 18 | 19 | // Version 1 adds throttle time and error messages. 20 | // 21 | // Starting in version 2, on quota violation, brokers send out responses before throttling. 22 | // 23 | // Version 3 is the first flexible version. 24 | // 25 | // Version 4 adds support for batching via Coordinators (KIP-699) 26 | 27 | #[derive(Debug, Default, Clone)] 28 | pub struct FindCoordinatorResponse { 29 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 30 | /// or zero if the request did not violate any quota. 31 | pub throttle_time_ms: i32, 32 | /// The error code, or 0 if there was no error. 33 | pub error_code: i16, 34 | /// The error message, or null if there was no error. 35 | pub error_message: Option, 36 | /// The node id. 37 | pub node_id: i32, 38 | /// The host name. 39 | pub host: String, 40 | /// The port. 41 | pub port: i32, 42 | /// Each coordinator result in the response 43 | pub coordinators: Vec, 44 | /// Unknown tagged fields. 45 | pub unknown_tagged_fields: Vec, 46 | } 47 | 48 | impl Serializable for FindCoordinatorResponse { 49 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 50 | if version >= 1 { 51 | Int32.encode(buf, self.throttle_time_ms)?; 52 | } 53 | if version <= 3 { 54 | Int16.encode(buf, self.error_code)?; 55 | } 56 | if (1..=3).contains(&version) { 57 | NullableString(version >= 3).encode(buf, self.error_message.as_deref())?; 58 | } 59 | if version <= 3 { 60 | Int32.encode(buf, self.node_id)?; 61 | } 62 | if version <= 3 { 63 | NullableString(version >= 3).encode(buf, self.host.as_str())?; 64 | } 65 | if version <= 3 { 66 | Int32.encode(buf, self.port)?; 67 | } 68 | if version >= 4 { 69 | NullableArray(Struct(version), true).encode(buf, self.coordinators.as_slice())?; 70 | } 71 | if version >= 3 { 72 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 73 | } 74 | Ok(()) 75 | } 76 | 77 | fn calculate_size(&self, version: i16) -> usize { 78 | let mut res = 0; 79 | if version >= 1 { 80 | res += Int32::SIZE; // self.throttle_time_ms 81 | } 82 | if version <= 3 { 83 | res += Int16::SIZE; // self.error_code 84 | } 85 | if (1..=3).contains(&version) { 86 | res += NullableString(version >= 3).calculate_size(self.error_message.as_deref()); 87 | } 88 | if version <= 3 { 89 | res += Int32::SIZE; // self.node_id 90 | } 91 | if version <= 3 { 92 | res += NullableString(version >= 3).calculate_size(self.host.as_str()); 93 | } 94 | if version <= 3 { 95 | res += Int32::SIZE; // self.port 96 | } 97 | if version >= 4 { 98 | res += 99 | NullableArray(Struct(version), true).calculate_size(self.coordinators.as_slice()); 100 | } 101 | if version >= 3 { 102 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 103 | } 104 | res 105 | } 106 | } 107 | 108 | #[derive(Debug, Default, Clone)] 109 | pub struct Coordinator { 110 | /// The coordinator key. 111 | pub key: String, 112 | /// The node id. 113 | pub node_id: i32, 114 | /// The host name. 115 | pub host: String, 116 | /// The port. 117 | pub port: i32, 118 | /// The error code, or 0 if there was no error. 119 | pub error_code: i16, 120 | /// The error message, or null if there was no error. 121 | pub error_message: Option, 122 | /// Unknown tagged fields. 123 | pub unknown_tagged_fields: Vec, 124 | } 125 | 126 | impl Serializable for Coordinator { 127 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 128 | if version > 4 { 129 | Err(err_encode_message_unsupported(version, "Coordinator"))? 130 | } 131 | NullableString(true).encode(buf, self.key.as_str())?; 132 | Int32.encode(buf, self.node_id)?; 133 | NullableString(true).encode(buf, self.host.as_str())?; 134 | Int32.encode(buf, self.port)?; 135 | Int16.encode(buf, self.error_code)?; 136 | NullableString(true).encode(buf, self.error_message.as_deref())?; 137 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 138 | Ok(()) 139 | } 140 | 141 | fn calculate_size(&self, _version: i16) -> usize { 142 | let mut res = 0; 143 | res += NullableString(true).calculate_size(self.key.as_str()); 144 | res += Int32::SIZE; // self.node_id 145 | res += NullableString(true).calculate_size(self.host.as_str()); 146 | res += Int32::SIZE; // self.port 147 | res += Int16::SIZE; // self.error_code 148 | res += NullableString(true).calculate_size(self.error_message.as_deref()); 149 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 150 | res 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/init_producer_id_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::codec::*; 18 | 19 | // Version 1 is the same as version 0. 20 | // 21 | // Version 2 is the first flexible version. 22 | // 23 | // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an 24 | // INVALID_PRODUCER_EPOCH error 25 | // 26 | // Version 4 adds the support for new error code PRODUCER_FENCED. 27 | 28 | #[derive(Debug, Default, Clone)] 29 | pub struct InitProducerIdRequest { 30 | /// The transactional id, or null if the producer is not transactional. 31 | pub transactional_id: Option, 32 | /// The time in ms to wait before aborting idle transactions sent by this producer. This is 33 | /// only relevant if a TransactionalId has been defined. 34 | pub transaction_timeout_ms: i32, 35 | /// The producer id. This is used to disambiguate requests if a transactional id is reused 36 | /// following its expiration. 37 | pub producer_id: i64, 38 | /// The producer's current epoch. This will be checked against the producer epoch on the 39 | /// broker, and the request will return an error if they do not match. 40 | pub producer_epoch: i16, 41 | /// Unknown tagged fields. 42 | pub unknown_tagged_fields: Vec, 43 | } 44 | 45 | impl Deserializable for InitProducerIdRequest { 46 | fn read(buf: &mut B, version: i16) -> io::Result { 47 | let mut res = InitProducerIdRequest { 48 | transactional_id: NullableString(version >= 3).decode(buf)?, 49 | transaction_timeout_ms: Int32.decode(buf)?, 50 | producer_id: if version >= 3 { Int64.decode(buf)? } else { -1 }, 51 | producer_epoch: if version >= 3 { Int16.decode(buf)? } else { -1 }, 52 | ..Default::default() 53 | }; 54 | if version >= 2 { 55 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 56 | } 57 | Ok(res) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/init_producer_id_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::codec::*; 18 | 19 | // Starting in version 1, on quota violation, brokers send out responses before throttling. 20 | // 21 | // Version 2 is the first flexible version. 22 | // 23 | // Version 3 is the same as version 2. 24 | // 25 | // Version 4 adds the support for new error code PRODUCER_FENCED. 26 | 27 | #[derive(Debug, Default, Clone)] 28 | pub struct InitProducerIdResponse { 29 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 30 | /// or zero if the request did not violate any quota. 31 | pub throttle_time_ms: i32, 32 | /// The error code, or 0 if there was no error. 33 | pub error_code: i16, 34 | /// The current producer id. 35 | pub producer_id: i64, 36 | /// The current epoch associated with the producer id. 37 | pub producer_epoch: i16, 38 | /// Unknown tagged fields. 39 | pub unknown_tagged_fields: Vec, 40 | } 41 | 42 | impl Serializable for InitProducerIdResponse { 43 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 44 | Int32.encode(buf, self.throttle_time_ms)?; 45 | Int16.encode(buf, self.error_code)?; 46 | Int64.encode(buf, self.producer_id)?; 47 | Int16.encode(buf, self.producer_epoch)?; 48 | if version >= 2 { 49 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 50 | } 51 | Ok(()) 52 | } 53 | 54 | fn calculate_size(&self, version: i16) -> usize { 55 | let mut res = 0; 56 | res += Int32::SIZE; // self.throttle_time_ms 57 | res += Int16::SIZE; // self.error_code 58 | res += Int64::SIZE; // self.producer_id 59 | res += Int16::SIZE; // self.producer_epoch 60 | if version >= 2 { 61 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 62 | } 63 | res 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/join_group_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{ 18 | bytebuffer::ByteBuffer, codec::*, err_decode_message_null, err_decode_message_unsupported, 19 | }; 20 | 21 | // Version 1 adds RebalanceTimeoutMs. 22 | // 23 | // Version 2 and 3 are the same as version 1. 24 | // 25 | // Starting from version 4, the client needs to issue a second request to join group 26 | // 27 | // Starting from version 5, we add a new field called groupInstanceId to indicate member identity 28 | // across restarts. with assigned id. 29 | // 30 | // Version 6 is the first flexible version. 31 | // 32 | // Version 7 is the same as version 6. 33 | // 34 | // Version 8 adds the Reason field (KIP-800). 35 | // 36 | // Version 9 is the same as version 8. 37 | 38 | #[derive(Debug, Default, Clone)] 39 | pub struct JoinGroupRequest { 40 | /// The group identifier. 41 | pub group_id: String, 42 | /// The coordinator considers the consumer dead if it receives no heartbeat after this timeout 43 | /// in milliseconds. 44 | pub session_timeout_ms: i32, 45 | /// The maximum time in milliseconds that the coordinator will wait for each member to rejoin 46 | /// when rebalancing the group. 47 | pub rebalance_timeout_ms: i32, 48 | /// The member id assigned by the group coordinator. 49 | pub member_id: String, 50 | /// The unique identifier of the consumer instance provided by end user. 51 | pub group_instance_id: Option, 52 | /// The unique name the for class of protocols implemented by the group we want to join. 53 | pub protocol_type: String, 54 | /// The list of protocols that the member supports. 55 | pub protocols: Vec, 56 | /// The reason why the member (re-)joins the group. 57 | pub reason: Option, 58 | /// Unknown tagged fields. 59 | pub unknown_tagged_fields: Vec, 60 | } 61 | 62 | impl Deserializable for JoinGroupRequest { 63 | fn read(buf: &mut B, version: i16) -> io::Result { 64 | let mut this = JoinGroupRequest { 65 | group_id: NullableString(version >= 6) 66 | .decode(buf)? 67 | .ok_or_else(|| err_decode_message_null("group_id"))?, 68 | session_timeout_ms: Int32.decode(buf)?, 69 | ..Default::default() 70 | }; 71 | this.rebalance_timeout_ms = if version >= 1 { Int32.decode(buf)? } else { -1 }; 72 | this.member_id = NullableString(version >= 6) 73 | .decode(buf)? 74 | .ok_or_else(|| err_decode_message_null("member_id"))?; 75 | if version >= 5 { 76 | this.group_instance_id = NullableString(version >= 6).decode(buf)?; 77 | } 78 | this.protocol_type = NullableString(version >= 6) 79 | .decode(buf)? 80 | .ok_or_else(|| err_decode_message_null("protocol_type"))?; 81 | this.protocols = NullableArray(Struct(version), version >= 6) 82 | .decode(buf)? 83 | .ok_or_else(|| err_decode_message_null("protocols"))?; 84 | if version >= 8 { 85 | this.reason = NullableString(true).decode(buf)?; 86 | } 87 | if version >= 6 { 88 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 89 | } 90 | Ok(this) 91 | } 92 | } 93 | 94 | #[derive(Debug, Default, Clone)] 95 | pub struct JoinGroupRequestProtocol { 96 | /// The protocol name. 97 | pub name: String, 98 | /// The protocol metadata. 99 | pub metadata: ByteBuffer, 100 | /// Unknown tagged fields. 101 | pub unknown_tagged_fields: Vec, 102 | } 103 | 104 | impl Deserializable for JoinGroupRequestProtocol { 105 | fn read(buf: &mut B, version: i16) -> io::Result { 106 | if version > 9 { 107 | Err(err_decode_message_unsupported( 108 | version, 109 | "JoinGroupRequestProtocol", 110 | ))? 111 | } 112 | let mut this = JoinGroupRequestProtocol { 113 | name: NullableString(version >= 6) 114 | .decode(buf)? 115 | .ok_or_else(|| err_decode_message_null("name"))?, 116 | metadata: NullableBytes(version >= 6) 117 | .decode(buf)? 118 | .ok_or_else(|| err_decode_message_null("metadata"))?, 119 | ..Default::default() 120 | }; 121 | if version >= 6 { 122 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 123 | } 124 | Ok(this) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/join_group_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{bytebuffer::ByteBuffer, codec::*, err_encode_message_null}; 18 | 19 | // Version 1 is the same as version 0. 20 | // 21 | // Version 2 adds throttle time. 22 | // 23 | // Starting in version 3, on quota violation, brokers send out responses before throttling. 24 | // 25 | // Starting in version 4, the client needs to issue a second request to join group 26 | // with assigned id. 27 | // 28 | // Version 5 is bumped to apply group.instance.id to identify member across restarts. 29 | // 30 | // Version 6 is the first flexible version. 31 | // 32 | // Starting from version 7, the broker sends back the Protocol Type to the client (KIP-559). 33 | // 34 | // Version 8 is the same as version 7. 35 | // 36 | // Version 9 adds the SkipAssignment field. 37 | 38 | #[derive(Debug, Default, Clone)] 39 | pub struct JoinGroupResponse { 40 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 41 | /// or zero if the request did not violate any quota. 42 | pub throttle_time_ms: i32, 43 | /// The error code, or 0 if there was no error. 44 | pub error_code: i16, 45 | /// The generation ID of the group. 46 | pub generation_id: i32, 47 | /// The group protocol name. 48 | pub protocol_type: Option, 49 | /// The group protocol selected by the coordinator. 50 | pub protocol_name: Option, 51 | /// The leader of the group. 52 | pub leader: String, 53 | /// True if the leader must skip running the assignment. 54 | pub skip_assignment: bool, 55 | /// The member id assigned by the group coordinator. 56 | pub member_id: String, 57 | pub members: Vec, 58 | /// Unknown tagged fields. 59 | pub unknown_tagged_fields: Vec, 60 | } 61 | 62 | impl Serializable for JoinGroupResponse { 63 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 64 | if version >= 2 { 65 | Int32.encode(buf, self.throttle_time_ms)?; 66 | } 67 | Int16.encode(buf, self.error_code)?; 68 | Int32.encode(buf, self.generation_id)?; 69 | if version >= 7 { 70 | NullableString(true).encode(buf, self.protocol_type.as_deref())?; 71 | } 72 | if version < 7 && self.protocol_name.is_none() { 73 | Err(err_encode_message_null("protocol_name"))? 74 | } 75 | NullableString(version >= 6).encode(buf, self.protocol_name.as_deref())?; 76 | NullableString(version >= 6).encode(buf, self.leader.as_str())?; 77 | if version >= 9 { 78 | Bool.encode(buf, self.skip_assignment)?; 79 | } 80 | NullableString(version >= 6).encode(buf, self.member_id.as_str())?; 81 | NullableArray(Struct(version), version >= 6).encode(buf, self.members.as_slice())?; 82 | if version >= 6 { 83 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 84 | } 85 | Ok(()) 86 | } 87 | 88 | fn calculate_size(&self, version: i16) -> usize { 89 | let mut res = 0; 90 | if version >= 2 { 91 | res += Int32::SIZE; // self.throttle_time_ms 92 | } 93 | res += Int16::SIZE; // self.error_code 94 | res += Int32::SIZE; // self.generation_id 95 | if version >= 7 { 96 | res += NullableString(true).calculate_size(self.protocol_type.as_deref()); 97 | } 98 | res += NullableString(version >= 6).calculate_size(self.protocol_name.as_deref()); 99 | res += NullableString(version >= 6).calculate_size(self.leader.as_str()); 100 | if version >= 9 { 101 | res += Bool::SIZE; // self.skip_assignment 102 | } 103 | res += NullableString(version >= 6).calculate_size(self.member_id.as_str()); 104 | res += NullableArray(Struct(version), version >= 6).calculate_size(self.members.as_slice()); 105 | if version >= 6 { 106 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 107 | } 108 | res 109 | } 110 | } 111 | 112 | #[derive(Debug, Default, Clone)] 113 | pub struct JoinGroupResponseMember { 114 | /// The group member ID 115 | pub member_id: String, 116 | /// The unique identifier of the consumer instance provided by end user. 117 | pub group_instance_id: Option, 118 | /// The group member metadata. 119 | pub metadata: ByteBuffer, 120 | /// Unknown tagged fields. 121 | pub unknown_tagged_fields: Vec, 122 | } 123 | 124 | impl Serializable for JoinGroupResponseMember { 125 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 126 | NullableString(version >= 6).encode(buf, self.member_id.as_str())?; 127 | if version >= 5 { 128 | NullableString(version >= 6).encode(buf, self.group_instance_id.as_deref())?; 129 | } 130 | NullableBytes(version >= 6).encode(buf, &self.metadata)?; 131 | if version >= 6 { 132 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 133 | } 134 | Ok(()) 135 | } 136 | 137 | fn calculate_size(&self, version: i16) -> usize { 138 | let mut res = 0; 139 | res += NullableString(version >= 6).calculate_size(self.member_id.as_str()); 140 | if version >= 5 { 141 | res += NullableString(version >= 6).calculate_size(self.group_instance_id.as_deref()); 142 | } 143 | res += NullableBytes(version >= 6).calculate_size(&self.metadata); 144 | if version >= 6 { 145 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 146 | } 147 | res 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/metadata_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null, err_decode_message_unsupported}; 18 | 19 | // In version 0, an empty array indicates "request metadata for all topics." In version 1 and 20 | // higher, an empty array indicates "request metadata for no topics," and a null array is used to 21 | // indicate "request metadata for all topics." 22 | // 23 | // Version 2 and 3 are the same as version 1. 24 | // 25 | // Version 4 adds AllowAutoTopicCreation. 26 | // 27 | // Starting in version 8, authorized operations can be requested for cluster and topic resource. 28 | // 29 | // Version 9 is the first flexible version. 30 | // 31 | // Version 10 adds topicId and allows name field to be null. However, this functionality was not 32 | // implemented on the server. Versions 10 and 11 should not use the topicId field or set topic name 33 | // to null. 34 | // 35 | // Version 11 deprecates IncludeClusterAuthorizedOperations field. This is now exposed 36 | // by the DescribeCluster API (KIP-700). 37 | // 38 | // Version 12 supports topic Id. 39 | 40 | #[derive(Debug, Default, Clone)] 41 | pub struct MetadataRequest { 42 | /// The topics to fetch metadata for. 43 | pub topics: Vec, 44 | /// If this is true, the broker may auto-create topics that we requested which do not already 45 | /// exist, if it is configured to do so. 46 | pub allow_auto_topic_creation: bool, 47 | /// Whether to include cluster authorized operations. 48 | pub include_cluster_authorized_operations: bool, 49 | /// Whether to include topic authorized operations. 50 | pub include_topic_authorized_operations: bool, 51 | /// Unknown tagged fields. 52 | pub unknown_tagged_fields: Vec, 53 | } 54 | 55 | impl Deserializable for MetadataRequest { 56 | fn read(buf: &mut B, version: i16) -> io::Result { 57 | let mut this = MetadataRequest { 58 | topics: NullableArray(Struct(version), version >= 9) 59 | .decode(buf)? 60 | .or_else(|| if version >= 1 { Some(vec![]) } else { None }) 61 | .ok_or_else(|| err_decode_message_null("topics"))?, 62 | ..Default::default() 63 | }; 64 | if version >= 4 { 65 | this.allow_auto_topic_creation = Bool.decode(buf)?; 66 | } else { 67 | this.allow_auto_topic_creation = true; 68 | }; 69 | if (8..=10).contains(&version) { 70 | this.include_cluster_authorized_operations = Bool.decode(buf)?; 71 | } 72 | if version >= 9 { 73 | this.include_topic_authorized_operations = Bool.decode(buf)?; 74 | } 75 | if version >= 9 { 76 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 77 | } 78 | Ok(this) 79 | } 80 | } 81 | 82 | #[derive(Debug, Default, Clone)] 83 | pub struct MetadataRequestTopic { 84 | /// The topic id. 85 | pub topic_id: uuid::Uuid, 86 | /// The topic name. 87 | pub name: Option, 88 | /// Unknown tagged fields. 89 | pub unknown_tagged_fields: Vec, 90 | } 91 | 92 | impl Deserializable for MetadataRequestTopic { 93 | fn read(buf: &mut B, version: i16) -> io::Result { 94 | if version > 12 { 95 | Err(err_decode_message_unsupported( 96 | version, 97 | "MetadataRequestTopic", 98 | ))? 99 | } 100 | let mut this = MetadataRequestTopic::default(); 101 | if version >= 10 { 102 | this.topic_id = Uuid.decode(buf)?; 103 | } 104 | this.name = if version >= 10 { 105 | NullableString(true).decode(buf)? 106 | } else { 107 | Some( 108 | NullableString(version >= 9) 109 | .decode(buf)? 110 | .ok_or_else(|| err_decode_message_null("name"))?, 111 | ) 112 | }; 113 | if version >= 9 { 114 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 115 | } 116 | Ok(this) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/metadata_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_null}; 18 | 19 | // Version 1 adds fields for the rack of each broker, the controller id, and 20 | // whether or not the topic is internal. 21 | // 22 | // Version 2 adds the cluster ID field. 23 | // 24 | // Version 3 adds the throttle time. 25 | // 26 | // Version 4 is the same as version 3. 27 | // 28 | // Version 5 adds a per-partition offline_replicas field. This field specifies 29 | // the list of replicas that are offline. 30 | // 31 | // Starting in version 6, on quota violation, brokers send out responses before throttling. 32 | // 33 | // Version 7 adds the leader epoch to the partition metadata. 34 | // 35 | // Starting in version 8, brokers can send authorized operations for topic and cluster. 36 | // 37 | // Version 9 is the first flexible version. 38 | // 39 | // Version 10 adds topicId. 40 | // 41 | // Version 11 deprecates ClusterAuthorizedOperations. This is now exposed 42 | // by the DescribeCluster API (KIP-700). 43 | // 44 | // Version 12 supports topicId. 45 | 46 | #[derive(Debug, Default, Clone)] 47 | pub struct MetadataResponse { 48 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 49 | /// or zero if the request did not violate any quota. 50 | pub throttle_time_ms: i32, 51 | /// Each broker in the response. 52 | pub brokers: Vec, 53 | /// The cluster ID that responding broker belongs to. 54 | pub cluster_id: Option, 55 | /// The ID of the controller broker. 56 | pub controller_id: i32, 57 | /// Each topic in the response. 58 | pub topics: Vec, 59 | /// 32-bit bitfield to represent authorized operations for this cluster. 60 | pub cluster_authorized_operations: i32, 61 | /// Unknown tagged fields. 62 | pub unknown_tagged_fields: Vec, 63 | } 64 | 65 | impl Serializable for MetadataResponse { 66 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 67 | if version >= 3 { 68 | Int32.encode(buf, self.throttle_time_ms)?; 69 | } 70 | NullableArray(Struct(version), version >= 9).encode(buf, self.brokers.as_slice())?; 71 | if version >= 2 { 72 | NullableString(version >= 9).encode(buf, self.cluster_id.as_deref())?; 73 | } 74 | if version >= 1 { 75 | Int32.encode(buf, self.controller_id)?; 76 | } 77 | NullableArray(Struct(version), version >= 9).encode(buf, self.topics.as_slice())?; 78 | if (8..=10).contains(&version) { 79 | Int32.encode(buf, self.cluster_authorized_operations)?; 80 | } 81 | if version >= 9 { 82 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 83 | } 84 | Ok(()) 85 | } 86 | 87 | fn calculate_size(&self, version: i16) -> usize { 88 | let mut res = 0; 89 | if version >= 3 { 90 | res += Int32::SIZE; // self.throttle_time_ms 91 | } 92 | res += NullableArray(Struct(version), version >= 9).calculate_size(self.brokers.as_slice()); 93 | if version >= 2 { 94 | res += NullableString(version >= 9).calculate_size(self.cluster_id.as_deref()); 95 | } 96 | if version >= 1 { 97 | res += Int32::SIZE; // self.controller_id 98 | } 99 | res += NullableArray(Struct(version), version >= 9).calculate_size(self.topics.as_slice()); 100 | if (8..=10).contains(&version) { 101 | res += Int32::SIZE; // self.cluster_authorized_operations 102 | } 103 | if version >= 9 { 104 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 105 | } 106 | res 107 | } 108 | } 109 | 110 | #[derive(Debug, Default, Clone)] 111 | pub struct MetadataResponseBroker { 112 | /// The broker ID. 113 | pub node_id: i32, 114 | /// The broker hostname. 115 | pub host: String, 116 | /// The broker port. 117 | pub port: i32, 118 | /// The rack of the broker, or null if it has not been assigned to a rack. 119 | pub rack: Option, 120 | /// Unknown tagged fields. 121 | pub unknown_tagged_fields: Vec, 122 | } 123 | 124 | impl Serializable for MetadataResponseBroker { 125 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 126 | Int32.encode(buf, self.node_id)?; 127 | NullableString(version >= 9).encode(buf, self.host.as_str())?; 128 | Int32.encode(buf, self.port)?; 129 | if version >= 1 { 130 | NullableString(version >= 9).encode(buf, self.rack.as_deref())?; 131 | } 132 | if version >= 9 { 133 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 134 | } 135 | Ok(()) 136 | } 137 | 138 | fn calculate_size(&self, version: i16) -> usize { 139 | let mut res = 0; 140 | res += Int32::SIZE; // self.node_id 141 | res += NullableString(version >= 9).calculate_size(self.host.as_str()); 142 | res += Int32::SIZE; // self.port 143 | if version >= 1 { 144 | res += NullableString(version >= 9).calculate_size(self.rack.as_deref()); 145 | } 146 | if version >= 9 { 147 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 148 | } 149 | res 150 | } 151 | } 152 | 153 | #[derive(Debug, Default, Clone)] 154 | pub struct MetadataResponseTopic { 155 | /// The topic error, or 0 if there was no error. 156 | pub error_code: i16, 157 | /// The topic name. 158 | pub name: Option, 159 | /// The topic id. 160 | pub topic_id: uuid::Uuid, 161 | /// True if the topic is internal. 162 | pub is_internal: bool, 163 | /// Each partition in the topic. 164 | pub partitions: Vec, 165 | /// 32-bit bitfield to represent authorized operations for this topic. 166 | pub topic_authorized_operations: i32, 167 | /// Unknown tagged fields. 168 | pub unknown_tagged_fields: Vec, 169 | } 170 | 171 | impl Serializable for MetadataResponseTopic { 172 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 173 | Int16.encode(buf, self.error_code)?; 174 | match self.name { 175 | None => { 176 | if version >= 12 { 177 | NullableString(true).encode(buf, None)?; 178 | } else { 179 | Err(err_encode_message_null("name"))?; 180 | } 181 | } 182 | Some(ref name) => { 183 | NullableString(version >= 9).encode(buf, name.as_str())?; 184 | } 185 | } 186 | if version >= 10 { 187 | Uuid.encode(buf, self.topic_id)?; 188 | } 189 | if version >= 1 { 190 | Bool.encode(buf, self.is_internal)?; 191 | } 192 | NullableArray(Struct(version), version >= 9).encode(buf, self.partitions.as_slice())?; 193 | if version >= 8 { 194 | Int32.encode(buf, self.topic_authorized_operations)?; 195 | } 196 | if version >= 9 { 197 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 198 | } 199 | Ok(()) 200 | } 201 | 202 | fn calculate_size(&self, version: i16) -> usize { 203 | let mut res = 0; 204 | res += Int16::SIZE; // self.error_code 205 | res += NullableString(version >= 9).calculate_size(self.name.as_deref()); 206 | if version >= 10 { 207 | res += Uuid::SIZE; // self.topic_id 208 | } 209 | if version >= 1 { 210 | res += Bool::SIZE; // self.is_internal 211 | } 212 | res += 213 | NullableArray(Struct(version), version >= 9).calculate_size(self.partitions.as_slice()); 214 | if version >= 8 { 215 | res += Int32::SIZE; // self.topic_authorized_operations 216 | } 217 | if version >= 9 { 218 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 219 | } 220 | res 221 | } 222 | } 223 | 224 | #[derive(Debug, Default, Clone)] 225 | pub struct MetadataResponsePartition { 226 | /// The partition error, or 0 if there was no error. 227 | pub error_code: i16, 228 | /// The partition index. 229 | pub partition_index: i32, 230 | /// The ID of the leader broker. 231 | pub leader_id: i32, 232 | /// The leader epoch of this partition. 233 | pub leader_epoch: i32, 234 | /// The set of all nodes that host this partition. 235 | pub replica_nodes: Vec, 236 | /// The set of nodes that are in sync with the leader for this partition. 237 | pub isr_nodes: Vec, 238 | /// The set of offline replicas of this partition. 239 | pub offline_replicas: Vec, 240 | /// Unknown tagged fields. 241 | pub unknown_tagged_fields: Vec, 242 | } 243 | 244 | impl Serializable for MetadataResponsePartition { 245 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 246 | Int16.encode(buf, self.error_code)?; 247 | Int32.encode(buf, self.partition_index)?; 248 | Int32.encode(buf, self.leader_id)?; 249 | if version >= 7 { 250 | Int32.encode(buf, self.leader_epoch)?; 251 | } 252 | NullableArray(Int32, version >= 9).encode(buf, self.replica_nodes.as_slice())?; 253 | NullableArray(Int32, version >= 9).encode(buf, self.isr_nodes.as_slice())?; 254 | if version >= 5 { 255 | NullableArray(Int32, version >= 9).encode(buf, self.offline_replicas.as_slice())?; 256 | } 257 | if version >= 9 { 258 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 259 | } 260 | Ok(()) 261 | } 262 | 263 | fn calculate_size(&self, version: i16) -> usize { 264 | let mut res = 0; 265 | res += Int16::SIZE; // self.error_code 266 | res += Int32::SIZE; // self.partition_index 267 | res += Int32::SIZE; // self.leader_id 268 | if version >= 7 { 269 | res += Int32::SIZE; // self.leader_epoch 270 | } 271 | res += NullableArray(Int32, version >= 9).calculate_size(self.replica_nodes.as_slice()); 272 | res += NullableArray(Int32, version >= 9).calculate_size(self.isr_nodes.as_slice()); 273 | if version >= 5 { 274 | res += 275 | NullableArray(Int32, version >= 9).calculate_size(self.offline_replicas.as_slice()); 276 | } 277 | if version >= 9 { 278 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 279 | } 280 | res 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{ 18 | apikey::ApiMessageType, bytebuffer::ByteBuffer, codec::*, request_header::RequestHeader, 19 | response_header::ResponseHeader, 20 | }; 21 | 22 | // Upstream baseline commits 23 | // ------------------------- 24 | // Generator https://github.com/apache/kafka/commit/c08120f83f7318f15dcf14d525876d18caf6afd0 25 | // Message https://github.com/apache/kafka/commit/e71f68d6c91394db30bb1219ea882232f7be194d 26 | 27 | pub mod api_versions_request; 28 | pub mod api_versions_response; 29 | pub mod create_topic_request; 30 | pub mod create_topic_response; 31 | pub mod fetch_request; 32 | pub mod fetch_response; 33 | pub mod find_coordinator_request; 34 | pub mod find_coordinator_response; 35 | pub mod init_producer_id_request; 36 | pub mod init_producer_id_response; 37 | pub mod join_group_request; 38 | pub mod join_group_response; 39 | pub mod metadata_request; 40 | pub mod metadata_response; 41 | pub mod offset_fetch_request; 42 | pub mod offset_fetch_response; 43 | pub mod produce_request; 44 | pub mod produce_response; 45 | pub mod request_header; 46 | pub mod response_header; 47 | pub mod sync_group_request; 48 | pub mod sync_group_response; 49 | 50 | #[derive(Debug)] 51 | pub enum Request { 52 | ApiVersionsRequest(api_versions_request::ApiVersionsRequest), 53 | CreateTopicRequest(create_topic_request::CreateTopicsRequest), 54 | FetchRequest(fetch_request::FetchRequest), 55 | FindCoordinatorRequest(find_coordinator_request::FindCoordinatorRequest), 56 | InitProducerIdRequest(init_producer_id_request::InitProducerIdRequest), 57 | JoinGroupRequest(join_group_request::JoinGroupRequest), 58 | MetadataRequest(metadata_request::MetadataRequest), 59 | OffsetFetchRequest(offset_fetch_request::OffsetFetchRequest), 60 | ProduceRequest(produce_request::ProduceRequest), 61 | SyncGroupRequest(sync_group_request::SyncGroupRequest), 62 | } 63 | 64 | impl Request { 65 | pub fn decode(buf: &mut ByteBuffer) -> io::Result<(RequestHeader, Request)> { 66 | let header_version = { 67 | let mut buf = &buf[..]; 68 | let api_key = Int16.decode(&mut buf)?; 69 | let api_version = Int16.decode(&mut buf)?; 70 | ApiMessageType::try_from(api_key)?.request_header_version(api_version) 71 | }; 72 | 73 | let header = RequestHeader::read(buf, header_version)?; 74 | let api_type = ApiMessageType::try_from(header.request_api_key)?; 75 | let api_version = header.request_api_version; 76 | 77 | let request = match api_type { 78 | ApiMessageType::API_VERSIONS => { 79 | api_versions_request::ApiVersionsRequest::read(buf, api_version) 80 | .map(Request::ApiVersionsRequest) 81 | } 82 | ApiMessageType::CREATE_TOPICS => { 83 | create_topic_request::CreateTopicsRequest::read(buf, api_version) 84 | .map(Request::CreateTopicRequest) 85 | } 86 | ApiMessageType::FETCH => { 87 | fetch_request::FetchRequest::read(buf, api_version).map(Request::FetchRequest) 88 | } 89 | ApiMessageType::FIND_COORDINATOR => { 90 | find_coordinator_request::FindCoordinatorRequest::read(buf, api_version) 91 | .map(Request::FindCoordinatorRequest) 92 | } 93 | ApiMessageType::INIT_PRODUCER_ID => { 94 | init_producer_id_request::InitProducerIdRequest::read(buf, api_version) 95 | .map(Request::InitProducerIdRequest) 96 | } 97 | ApiMessageType::JOIN_GROUP => { 98 | join_group_request::JoinGroupRequest::read(buf, api_version) 99 | .map(Request::JoinGroupRequest) 100 | } 101 | ApiMessageType::METADATA => metadata_request::MetadataRequest::read(buf, api_version) 102 | .map(Request::MetadataRequest), 103 | ApiMessageType::OFFSET_FETCH => { 104 | offset_fetch_request::OffsetFetchRequest::read(buf, api_version) 105 | .map(Request::OffsetFetchRequest) 106 | } 107 | ApiMessageType::PRODUCE => { 108 | produce_request::ProduceRequest::read(buf, api_version).map(Request::ProduceRequest) 109 | } 110 | ApiMessageType::SYNC_GROUP => { 111 | sync_group_request::SyncGroupRequest::read(buf, api_version) 112 | .map(Request::SyncGroupRequest) 113 | } 114 | _ => unimplemented!("{}", api_type.api_key), 115 | }?; 116 | 117 | Ok((header, request)) 118 | } 119 | } 120 | 121 | #[derive(Debug)] 122 | pub enum Response { 123 | ApiVersionsResponse(api_versions_response::ApiVersionsResponse), 124 | CreateTopicsResponse(create_topic_response::CreateTopicsResponse), 125 | FindCoordinatorResponse(find_coordinator_response::FindCoordinatorResponse), 126 | FetchResponse(fetch_response::FetchResponse), 127 | InitProducerIdResponse(init_producer_id_response::InitProducerIdResponse), 128 | JoinGroupResponse(join_group_response::JoinGroupResponse), 129 | MetadataResponse(metadata_response::MetadataResponse), 130 | OffsetFetchResponse(offset_fetch_response::OffsetFetchResponse), 131 | ProduceResponse(produce_response::ProduceResponse), 132 | SyncGroupResponse(sync_group_response::SyncGroupResponse), 133 | } 134 | 135 | impl Response { 136 | pub fn encode(&self, header: RequestHeader, buf: &mut B) -> io::Result<()> { 137 | let api_type = ApiMessageType::try_from(header.request_api_key)?; 138 | let api_version = header.request_api_version; 139 | let correlation_id = header.correlation_id; 140 | 141 | let response_header_version = api_type.response_header_version(api_version); 142 | let response_header = ResponseHeader { 143 | correlation_id, 144 | unknown_tagged_fields: vec![], 145 | }; 146 | 147 | // 1. total size 148 | let size = self.calculate_size(api_version) 149 | + response_header.calculate_size(response_header_version); 150 | Int32.encode(buf, size as i32)?; 151 | 152 | // 2. response header 153 | response_header.write(buf, response_header_version)?; 154 | 155 | // 3. response body 156 | self.do_encode(buf, api_version) 157 | } 158 | 159 | fn calculate_size(&self, version: i16) -> usize { 160 | match self { 161 | Response::ApiVersionsResponse(resp) => resp.calculate_size(version), 162 | Response::CreateTopicsResponse(resp) => resp.calculate_size(version), 163 | Response::FindCoordinatorResponse(resp) => resp.calculate_size(version), 164 | Response::FetchResponse(resp) => resp.calculate_size(version), 165 | Response::InitProducerIdResponse(resp) => resp.calculate_size(version), 166 | Response::JoinGroupResponse(resp) => resp.calculate_size(version), 167 | Response::MetadataResponse(resp) => resp.calculate_size(version), 168 | Response::OffsetFetchResponse(resp) => resp.calculate_size(version), 169 | Response::ProduceResponse(resp) => resp.calculate_size(version), 170 | Response::SyncGroupResponse(resp) => resp.calculate_size(version), 171 | } 172 | } 173 | 174 | fn do_encode(&self, buf: &mut B, version: i16) -> io::Result<()> { 175 | match self { 176 | Response::ApiVersionsResponse(resp) => resp.write(buf, version)?, 177 | Response::CreateTopicsResponse(resp) => resp.write(buf, version)?, 178 | Response::FindCoordinatorResponse(resp) => resp.write(buf, version)?, 179 | Response::FetchResponse(resp) => resp.write(buf, version)?, 180 | Response::InitProducerIdResponse(resp) => resp.write(buf, version)?, 181 | Response::JoinGroupResponse(resp) => resp.write(buf, version)?, 182 | Response::MetadataResponse(resp) => resp.write(buf, version)?, 183 | Response::OffsetFetchResponse(resp) => resp.write(buf, version)?, 184 | Response::ProduceResponse(resp) => resp.write(buf, version)?, 185 | Response::SyncGroupResponse(resp) => resp.write(buf, version)?, 186 | } 187 | Ok(()) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/offset_fetch_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_decode_message_null, err_decode_message_unsupported}; 18 | 19 | // In version 0, the request read offsets from ZK. 20 | // 21 | // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets 22 | // topic. 23 | // 24 | // Starting in version 2, the request can contain a null topics array to indicate that offsets 25 | // for all topics should be fetched. It also returns a top level error code 26 | // for group or coordinator level errors. 27 | // 28 | // Version 3, 4, and 5 are the same as version 2. 29 | // 30 | // Version 6 is the first flexible version. 31 | // 32 | // Version 7 is adding the require stable flag. 33 | // 34 | // Version 8 is adding support for fetching offsets for multiple groups at a time 35 | 36 | #[derive(Debug, Default, Clone)] 37 | pub struct OffsetFetchRequest { 38 | /// The group to fetch offsets for. 39 | pub group_id: String, 40 | /// Each topic we would like to fetch offsets for, or null to fetch offsets for all topics. 41 | pub topics: Vec, 42 | /// Each group we would like to fetch offsets for. 43 | pub groups: Vec, 44 | /// Whether broker should hold on returning unstable offsets but set a retryable error code for 45 | /// the partitions. 46 | pub require_stable: bool, 47 | /// Unknown tagged fields. 48 | pub unknown_tagged_fields: Vec, 49 | } 50 | 51 | impl Deserializable for OffsetFetchRequest { 52 | fn read(buf: &mut B, version: i16) -> io::Result { 53 | let mut this = OffsetFetchRequest::default(); 54 | if version <= 7 { 55 | this.group_id = NullableString(version >= 6) 56 | .decode(buf)? 57 | .ok_or_else(|| err_decode_message_null("groups"))?; 58 | this.topics = NullableArray(Struct(version), version >= 6) 59 | .decode(buf)? 60 | .or_else(|| if version >= 2 { Some(vec![]) } else { None }) 61 | .ok_or_else(|| err_decode_message_null("topics"))?; 62 | } 63 | if version >= 8 { 64 | this.groups = NullableArray(Struct(version), true) 65 | .decode(buf)? 66 | .ok_or_else(|| err_decode_message_null("groups"))?; 67 | } 68 | if version >= 7 { 69 | this.require_stable = Bool.decode(buf)?; 70 | } 71 | if version >= 6 { 72 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 73 | } 74 | Ok(this) 75 | } 76 | } 77 | 78 | #[derive(Debug, Default, Clone)] 79 | pub struct OffsetFetchRequestTopic { 80 | /// The topic name. 81 | pub name: String, 82 | /// The partition indexes we would like to fetch offsets for. 83 | pub partition_indexes: Vec, 84 | /// Unknown tagged fields. 85 | pub unknown_tagged_fields: Vec, 86 | } 87 | 88 | impl Deserializable for OffsetFetchRequestTopic { 89 | fn read(buf: &mut B, version: i16) -> io::Result { 90 | let mut this = OffsetFetchRequestTopic { 91 | name: NullableString(version >= 6) 92 | .decode(buf)? 93 | .ok_or_else(|| err_decode_message_null("name"))?, 94 | partition_indexes: NullableArray(Int32, version >= 6) 95 | .decode(buf)? 96 | .ok_or_else(|| err_decode_message_null("partition_indexes"))?, 97 | ..Default::default() 98 | }; 99 | if version >= 6 { 100 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 101 | } 102 | Ok(this) 103 | } 104 | } 105 | 106 | #[derive(Debug, Default, Clone)] 107 | pub struct OffsetFetchRequestGroup { 108 | /// The group ID. 109 | pub group_id: String, 110 | /// Each topic we would like to fetch offsets for, or null to fetch offsets for all topics. 111 | pub topics: Vec, 112 | /// Unknown tagged fields. 113 | pub unknown_tagged_fields: Vec, 114 | } 115 | 116 | impl Deserializable for OffsetFetchRequestGroup { 117 | fn read(buf: &mut B, version: i16) -> io::Result { 118 | if version > 8 { 119 | Err(err_decode_message_unsupported( 120 | version, 121 | "OffsetFetchRequestGroup", 122 | ))? 123 | } 124 | let mut this = OffsetFetchRequestGroup { 125 | group_id: NullableString(true) 126 | .decode(buf)? 127 | .ok_or_else(|| err_decode_message_null("group_id"))?, 128 | topics: NullableArray(Struct(version), true) 129 | .decode(buf)? 130 | .unwrap_or_default(), 131 | ..Default::default() 132 | }; 133 | if version >= 6 { 134 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 135 | } 136 | Ok(this) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/offset_fetch_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported}; 18 | 19 | // Version 1 is the same as version 0. 20 | // 21 | // Version 2 adds a top-level error code. 22 | // 23 | // Version 3 adds the throttle time. 24 | // 25 | // Starting in version 4, on quota violation, brokers send out responses before throttling. 26 | // 27 | // Version 5 adds the leader epoch to the committed offset. 28 | // 29 | // Version 6 is the first flexible version. 30 | // 31 | // Version 7 adds pending offset commit as new error response on partition level. 32 | // 33 | // Version 8 is adding support for fetching offsets for multiple groups 34 | 35 | #[derive(Debug, Default, Clone)] 36 | pub struct OffsetFetchResponse { 37 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 38 | /// or zero if the request did not violate any quota. 39 | pub throttle_time_ms: i32, 40 | /// The responses per topic. 41 | pub topics: Vec, 42 | /// The top-level error code, or 0 if there was no error. 43 | pub error_code: i16, 44 | /// The responses per group id. 45 | pub groups: Vec, 46 | /// Unknown tagged fields. 47 | pub unknown_tagged_fields: Vec, 48 | } 49 | 50 | impl Serializable for OffsetFetchResponse { 51 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 52 | if version >= 3 { 53 | Int32.encode(buf, self.throttle_time_ms)?; 54 | } 55 | if version <= 7 { 56 | NullableArray(Struct(version), version >= 6).encode(buf, self.topics.as_slice())?; 57 | } 58 | if (2..=7).contains(&version) { 59 | Int16.encode(buf, self.error_code)?; 60 | } 61 | if version >= 8 { 62 | NullableArray(Struct(version), true).encode(buf, self.groups.as_slice())?; 63 | } 64 | if version >= 6 { 65 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 66 | } 67 | Ok(()) 68 | } 69 | 70 | fn calculate_size(&self, version: i16) -> usize { 71 | let mut res = 0; 72 | if version >= 3 { 73 | res += Int32::SIZE; // self.throttle_time_ms 74 | } 75 | if version <= 7 { 76 | res += 77 | NullableArray(Struct(version), version >= 6).calculate_size(self.topics.as_slice()); 78 | } 79 | if (2..=7).contains(&version) { 80 | res += Int16::SIZE; // self.error_code 81 | } 82 | if version >= 8 { 83 | res += NullableArray(Struct(version), true).calculate_size(self.groups.as_slice()); 84 | } 85 | if version >= 6 { 86 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 87 | } 88 | res 89 | } 90 | } 91 | 92 | #[derive(Debug, Default, Clone)] 93 | pub struct OffsetFetchResponseTopic { 94 | /// The topic name. 95 | pub name: String, 96 | /// The responses per partition. 97 | pub partitions: Vec, 98 | /// Unknown tagged fields. 99 | pub unknown_tagged_fields: Vec, 100 | } 101 | 102 | impl Serializable for OffsetFetchResponseTopic { 103 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 104 | if version > 7 { 105 | Err(err_encode_message_unsupported( 106 | version, 107 | "OffsetFetchResponseTopic", 108 | ))? 109 | } 110 | NullableString(version >= 6).encode(buf, self.name.as_str())?; 111 | NullableArray(Struct(version), version >= 6).encode(buf, self.partitions.as_slice())?; 112 | if version >= 6 { 113 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 114 | } 115 | Ok(()) 116 | } 117 | 118 | fn calculate_size(&self, version: i16) -> usize { 119 | let mut res = 0; 120 | res += NullableString(version >= 6).calculate_size(self.name.as_str()); 121 | res += 122 | NullableArray(Struct(version), version >= 6).calculate_size(self.partitions.as_slice()); 123 | if version >= 6 { 124 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 125 | } 126 | res 127 | } 128 | } 129 | 130 | #[derive(Debug, Default, Clone)] 131 | pub struct OffsetFetchResponsePartition { 132 | /// The partition index. 133 | pub partition_index: i32, 134 | /// The committed message offset. 135 | pub committed_offset: i32, 136 | /// The leader epoch. 137 | pub committed_leader_epoch: i32, 138 | /// The partition metadata. 139 | pub metadata: Option, 140 | /// The partition-level error code, or 0 if there was no error. 141 | pub error_code: i16, 142 | /// Unknown tagged fields. 143 | pub unknown_tagged_fields: Vec, 144 | } 145 | 146 | impl Serializable for OffsetFetchResponsePartition { 147 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 148 | Int32.encode(buf, self.partition_index)?; 149 | Int32.encode(buf, self.committed_offset)?; 150 | if version >= 5 { 151 | Int32.encode(buf, self.committed_leader_epoch)?; 152 | } 153 | NullableString(version >= 6).encode(buf, self.metadata.as_deref())?; 154 | Int16.encode(buf, self.error_code)?; 155 | if version >= 6 { 156 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 157 | } 158 | Ok(()) 159 | } 160 | 161 | fn calculate_size(&self, version: i16) -> usize { 162 | let mut res = 0; 163 | res += Int32::SIZE; // self.partition_index 164 | res += Int32::SIZE; // self.committed_offset 165 | if version >= 5 { 166 | res += Int32::SIZE; // self.committed_leader_epoch 167 | } 168 | res += NullableString(version >= 6).calculate_size(self.metadata.as_deref()); 169 | res += Int16::SIZE; // self.error_code 170 | if version >= 6 { 171 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 172 | } 173 | res 174 | } 175 | } 176 | 177 | #[derive(Debug, Default, Clone)] 178 | pub struct OffsetFetchResponseGroup { 179 | /// The group to fetch offsets for. 180 | pub group_id: String, 181 | /// The responses per topic. 182 | pub topics: Vec, 183 | /// The group-level error code, or 0 if there was no error. 184 | pub error_code: i16, 185 | /// Unknown tagged fields. 186 | pub unknown_tagged_fields: Vec, 187 | } 188 | 189 | impl Serializable for OffsetFetchResponseGroup { 190 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 191 | if version < 8 { 192 | Err(err_encode_message_unsupported( 193 | version, 194 | "OffsetFetchResponseGroup", 195 | ))? 196 | } 197 | NullableString(true).encode(buf, self.group_id.as_str())?; 198 | NullableArray(Struct(version), true).encode(buf, self.topics.as_slice())?; 199 | Int16.encode(buf, self.error_code)?; 200 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 201 | Ok(()) 202 | } 203 | 204 | fn calculate_size(&self, version: i16) -> usize { 205 | let mut res = 0; 206 | res += NullableString(true).calculate_size(self.group_id.as_str()); 207 | res += NullableArray(Struct(version), true).calculate_size(self.topics.as_slice()); 208 | res += Int16::SIZE; // self.error_code 209 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 210 | res 211 | } 212 | } 213 | 214 | #[derive(Debug, Default, Clone)] 215 | pub struct OffsetFetchResponseTopics { 216 | /// The topic name. 217 | pub name: String, 218 | /// The responses per partition. 219 | pub partitions: Vec, 220 | /// Unknown tagged fields. 221 | pub unknown_tagged_fields: Vec, 222 | } 223 | 224 | impl Serializable for OffsetFetchResponseTopics { 225 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 226 | NullableString(true).encode(buf, self.name.as_str())?; 227 | NullableArray(Struct(version), true).encode(buf, self.partitions.as_slice())?; 228 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 229 | Ok(()) 230 | } 231 | 232 | fn calculate_size(&self, version: i16) -> usize { 233 | let mut res = 0; 234 | res += NullableString(true).calculate_size(self.name.as_str()); 235 | res += NullableArray(Struct(version), true).calculate_size(self.partitions.as_slice()); 236 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 237 | res 238 | } 239 | } 240 | 241 | #[derive(Debug, Default, Clone)] 242 | pub struct OffsetFetchResponsePartitions { 243 | /// The partition index. 244 | pub partition_index: i32, 245 | /// The committed message offset. 246 | pub committed_offset: i64, 247 | /// The leader epoch. 248 | pub committed_leader_epoch: i32, 249 | /// The partition metadata. 250 | pub metadata: Option, 251 | /// The partition-level error code, or 0 if there was no error. 252 | pub error_code: i16, 253 | /// Unknown tagged fields. 254 | pub unknown_tagged_fields: Vec, 255 | } 256 | 257 | impl Serializable for OffsetFetchResponsePartitions { 258 | fn write(&self, buf: &mut B, _version: i16) -> io::Result<()> { 259 | Int32.encode(buf, self.partition_index)?; 260 | Int64.encode(buf, self.committed_offset)?; 261 | Int32.encode(buf, self.committed_leader_epoch)?; 262 | NullableString(true).encode(buf, self.metadata.as_deref())?; 263 | Int16.encode(buf, self.error_code)?; 264 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 265 | Ok(()) 266 | } 267 | 268 | fn calculate_size(&self, _version: i16) -> usize { 269 | let mut res = 0; 270 | res += Int32::SIZE; // self.partition_index 271 | res += Int64::SIZE; // self.committed_offset 272 | res += Int32::SIZE; // self.committed_leader_epoch 273 | res += NullableString(true).calculate_size(self.metadata.as_deref()); 274 | res += Int16::SIZE; // self.error_code 275 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 276 | res 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/produce_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{ 18 | codec::*, err_decode_message_null, err_decode_message_unsupported, records::MutableRecords, 19 | }; 20 | 21 | // Version 1 and 2 are the same as version 0. 22 | // 23 | // Version 3 adds the transactional ID, which is used for authorization when attempting to write 24 | // transactional data. Version 3 also adds support for Kafka Message Format v2. 25 | // 26 | // Version 4 is the same as version 3, but the requester must be prepared to handle a 27 | // KAFKA_STORAGE_ERROR. 28 | // 29 | // Version 5 and 6 are the same as version 3. 30 | // 31 | // Starting in version 7, records can be produced using ZStandard compression. See KIP-110. 32 | // 33 | // Starting in Version 8, response has RecordErrors and ErrorMessage. See KIP-467. 34 | // 35 | // Version 9 enables flexible versions. 36 | 37 | #[derive(Debug, Default, Clone)] 38 | pub struct ProduceRequest { 39 | /// The transactional ID, or null if the producer is not transactional. 40 | pub transactional_id: Option, 41 | /// The number of acknowledgments the producer requires the leader to have received before 42 | /// considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the 43 | /// leader and -1 for the full ISR. 44 | pub acks: i16, 45 | /// The timeout to await a response in milliseconds. 46 | pub timeout_ms: i32, 47 | /// Each topic to produce to. 48 | pub topic_data: Vec, 49 | /// Unknown tagged fields. 50 | pub unknown_tagged_fields: Vec, 51 | } 52 | 53 | impl Deserializable for ProduceRequest { 54 | fn read(buf: &mut B, version: i16) -> io::Result { 55 | let mut this = ProduceRequest::default(); 56 | if version >= 3 { 57 | this.transactional_id = NullableString(version >= 9).decode(buf)?; 58 | } 59 | this.acks = Int16.decode(buf)?; 60 | this.timeout_ms = Int32.decode(buf)?; 61 | this.topic_data = NullableArray(Struct(version), version >= 9) 62 | .decode(buf)? 63 | .ok_or_else(|| err_decode_message_null("topic_data"))?; 64 | if version >= 9 { 65 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 66 | } 67 | Ok(this) 68 | } 69 | } 70 | 71 | #[derive(Debug, Default, Clone)] 72 | pub struct TopicProduceData { 73 | /// The topic name. 74 | pub name: String, 75 | /// Each partition to produce to. 76 | pub partition_data: Vec, 77 | /// Unknown tagged fields. 78 | pub unknown_tagged_fields: Vec, 79 | } 80 | 81 | impl Deserializable for TopicProduceData { 82 | fn read(buf: &mut B, version: i16) -> io::Result { 83 | if version > 9 { 84 | Err(err_decode_message_unsupported(version, "TopicProduceData"))? 85 | } 86 | let mut this = TopicProduceData { 87 | name: NullableString(version >= 9) 88 | .decode(buf)? 89 | .ok_or_else(|| err_decode_message_null("name"))?, 90 | partition_data: NullableArray(Struct(version), version >= 9) 91 | .decode(buf)? 92 | .ok_or_else(|| err_decode_message_null("partition_data"))?, 93 | ..Default::default() 94 | }; 95 | if version >= 9 { 96 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 97 | } 98 | Ok(this) 99 | } 100 | } 101 | 102 | #[derive(Debug, Default, Clone)] 103 | pub struct PartitionProduceData { 104 | /// The partition index. 105 | pub index: i32, 106 | /// The record data to be produced. 107 | pub records: Option, 108 | /// Unknown tagged fields. 109 | pub unknown_tagged_fields: Vec, 110 | } 111 | 112 | impl Deserializable for PartitionProduceData { 113 | fn read(buf: &mut B, version: i16) -> io::Result { 114 | if version > 9 { 115 | Err(err_decode_message_unsupported( 116 | version, 117 | "PartitionProduceData", 118 | ))? 119 | } 120 | let mut this = PartitionProduceData { 121 | index: Int32.decode(buf)?, 122 | records: NullableRecords(version >= 9).decode(buf)?, 123 | ..Default::default() 124 | }; 125 | if version >= 9 { 126 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 127 | } 128 | Ok(this) 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/produce_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{codec::*, err_encode_message_unsupported}; 18 | 19 | // Version 1 added the throttle time. 20 | // 21 | // Version 2 added the log append time. 22 | // 23 | // Version 3 is the same as version 2. 24 | // 25 | // Version 4 added KAFKA_STORAGE_ERROR as a possible error code. 26 | // 27 | // Version 5 added LogStartOffset to filter out spurious 28 | // OutOfOrderSequenceExceptions on the client. 29 | // 30 | // Version 8 added RecordErrors and ErrorMessage to include information about 31 | // records that cause the whole batch to be dropped. See KIP-467 for details. 32 | // 33 | // Version 9 enables flexible versions. 34 | 35 | #[derive(Debug, Default, Clone)] 36 | pub struct ProduceResponse { 37 | /// Each produce response 38 | pub responses: Vec, 39 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 40 | /// or zero if the request did not violate any quota. 41 | pub throttle_time_ms: i32, 42 | /// Unknown tagged fields. 43 | pub unknown_tagged_fields: Vec, 44 | } 45 | 46 | impl Serializable for ProduceResponse { 47 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 48 | NullableArray(Struct(version), version >= 9).encode(buf, self.responses.as_slice())?; 49 | if version > 1 { 50 | Int32.encode(buf, self.throttle_time_ms)?; 51 | } 52 | if version >= 9 { 53 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 54 | } 55 | Ok(()) 56 | } 57 | 58 | fn calculate_size(&self, version: i16) -> usize { 59 | let mut res = 0; 60 | res += 61 | NullableArray(Struct(version), version >= 9).calculate_size(self.responses.as_slice()); 62 | if version > 1 { 63 | res += Int32::SIZE; // self.throttle_time_ms 64 | } 65 | if version >= 9 { 66 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 67 | } 68 | res 69 | } 70 | } 71 | 72 | #[derive(Debug, Default, Clone)] 73 | pub struct TopicProduceResponse { 74 | /// The topic name. 75 | pub name: String, 76 | /// Each partition that we produced to within the topic. 77 | pub partition_responses: Vec, 78 | /// Unknown tagged fields. 79 | pub unknown_tagged_fields: Vec, 80 | } 81 | 82 | impl Serializable for TopicProduceResponse { 83 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 84 | NullableString(version >= 9).encode(buf, self.name.as_str())?; 85 | NullableArray(Struct(version), version >= 9) 86 | .encode(buf, self.partition_responses.as_slice())?; 87 | if version >= 9 { 88 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 89 | } 90 | Ok(()) 91 | } 92 | 93 | fn calculate_size(&self, version: i16) -> usize { 94 | let mut res = 0; 95 | res += NullableString(version >= 9).calculate_size(self.name.as_str()); 96 | res += NullableArray(Struct(version), version >= 9) 97 | .calculate_size(self.partition_responses.as_slice()); 98 | if version >= 9 { 99 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 100 | } 101 | res 102 | } 103 | } 104 | 105 | #[derive(Debug, Default, Clone)] 106 | pub struct PartitionProduceResponse { 107 | /// The partition index. 108 | pub index: i32, 109 | /// The error code, or 0 if there was no error. 110 | pub error_code: i16, 111 | /// The base offset. 112 | pub base_offset: i64, 113 | /// The timestamp returned by broker after appending the messages. If CreateTime is used for 114 | /// the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp 115 | /// will be the broker local time when the messages are appended. 116 | pub log_append_time_ms: i64, 117 | /// The log start offset. 118 | pub log_start_offset: i64, 119 | /// The batch indices of records that caused the batch to be dropped. 120 | pub record_errors: Vec, 121 | /// The global error message summarizing the common root cause of the records that caused the 122 | /// batch to be dropped. 123 | pub error_message: Option, 124 | /// Unknown tagged fields. 125 | pub unknown_tagged_fields: Vec, 126 | } 127 | 128 | impl Serializable for PartitionProduceResponse { 129 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 130 | Int32.encode(buf, self.index)?; 131 | Int16.encode(buf, self.error_code)?; 132 | Int64.encode(buf, self.base_offset)?; 133 | if version >= 2 { 134 | Int64.encode(buf, self.log_append_time_ms)?; 135 | } 136 | if version >= 5 { 137 | Int64.encode(buf, self.log_start_offset)?; 138 | } 139 | if version >= 8 { 140 | NullableArray(Struct(version), version >= 9) 141 | .encode(buf, self.record_errors.as_slice())?; 142 | } 143 | if version >= 8 { 144 | NullableString(version >= 9).encode(buf, self.error_message.as_deref())?; 145 | } 146 | if version >= 9 { 147 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 148 | } 149 | Ok(()) 150 | } 151 | 152 | fn calculate_size(&self, version: i16) -> usize { 153 | let mut res = 0; 154 | res += Int32::SIZE; // self.index 155 | res += Int16::SIZE; // self.error_code 156 | res += Int64::SIZE; // self.base_offset 157 | if version >= 2 { 158 | res += Int64::SIZE; // self.log_append_time_ms 159 | } 160 | if version >= 5 { 161 | res += Int64::SIZE; // self.log_start_offset 162 | } 163 | if version >= 8 { 164 | res += NullableArray(Struct(version), version >= 9) 165 | .calculate_size(self.record_errors.as_slice()); 166 | } 167 | if version >= 8 { 168 | res += NullableString(version >= 9).calculate_size(self.error_message.as_deref()); 169 | } 170 | if version >= 9 { 171 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 172 | } 173 | res 174 | } 175 | } 176 | 177 | #[derive(Debug, Default, Clone)] 178 | pub struct BatchIndexAndErrorMessage { 179 | /// The batch index of the record that cause the batch to be dropped. 180 | pub batch_index: i32, 181 | /// The error message of the record that caused the batch to be dropped. 182 | pub batch_index_error_message: Option, 183 | /// Unknown tagged fields. 184 | pub unknown_tagged_fields: Vec, 185 | } 186 | 187 | impl Serializable for BatchIndexAndErrorMessage { 188 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 189 | if version < 8 { 190 | Err(err_encode_message_unsupported( 191 | version, 192 | "BatchIndexAndErrorMessage", 193 | ))? 194 | } 195 | Int32.encode(buf, self.batch_index)?; 196 | NullableString(version >= 9).encode(buf, self.batch_index_error_message.as_deref())?; 197 | if version >= 9 { 198 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 199 | } 200 | Ok(()) 201 | } 202 | 203 | fn calculate_size(&self, version: i16) -> usize { 204 | let mut res = 0; 205 | res += Int32::SIZE; // self.batch_index 206 | res += 207 | NullableString(version >= 9).calculate_size(self.batch_index_error_message.as_deref()); 208 | if version >= 9 { 209 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 210 | } 211 | res 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/request_header.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::codec::*; 18 | 19 | // Version 0 of the RequestHeader is only used by v0 of ControlledShutdownRequest. 20 | // 21 | // Version 1 is the first version with ClientId. 22 | // 23 | // Version 2 is the first flexible version. 24 | 25 | #[derive(Debug, Default, Clone)] 26 | pub struct RequestHeader { 27 | /// The API key of this request. 28 | pub request_api_key: i16, 29 | /// The API version of this request. 30 | pub request_api_version: i16, 31 | /// The correlation ID of this request. 32 | pub correlation_id: i32, 33 | /// The client ID string. 34 | pub client_id: String, 35 | /// Unknown tagged fields. 36 | pub unknown_tagged_fields: Vec, 37 | } 38 | 39 | impl Deserializable for RequestHeader { 40 | fn read(buf: &mut B, version: i16) -> io::Result { 41 | let mut res = RequestHeader { 42 | request_api_key: Int16.decode(buf)?, 43 | request_api_version: Int16.decode(buf)?, 44 | correlation_id: Int32.decode(buf)?, 45 | ..Default::default() 46 | }; 47 | if version >= 1 { 48 | res.client_id = NullableString(false).decode(buf)?.unwrap_or_default(); 49 | } 50 | if version >= 2 { 51 | res.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 52 | } 53 | Ok(res) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/response_header.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::codec::*; 18 | 19 | // Version 1 is the first flexible version. 20 | 21 | #[derive(Debug, Default, Clone)] 22 | pub struct ResponseHeader { 23 | /// The correlation ID of this response. 24 | pub correlation_id: i32, 25 | /// Unknown tagged fields. 26 | pub unknown_tagged_fields: Vec, 27 | } 28 | 29 | impl Serializable for ResponseHeader { 30 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 31 | Int32.encode(buf, self.correlation_id)?; 32 | if version >= 1 { 33 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 34 | } 35 | Ok(()) 36 | } 37 | 38 | fn calculate_size(&self, version: i16) -> usize { 39 | let mut res = 0; 40 | res += Int32::SIZE; // self.correlation_id 41 | if version >= 1 { 42 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 43 | } 44 | res 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/sync_group_request.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{ 18 | bytebuffer::ByteBuffer, codec::*, err_decode_message_null, err_decode_message_unsupported, 19 | }; 20 | 21 | // Versions 1 and 2 are the same as version 0. 22 | // 23 | // Starting from version 3, we add a new field called groupInstanceId to indicate member identity 24 | // across restarts. 25 | // 26 | // Version 4 is the first flexible version. 27 | // 28 | // Starting from version 5, the client sends the Protocol Type and the Protocol Name 29 | // to the broker (KIP-559). The broker will reject the request if they are inconsistent 30 | // with the Type and Name known by the broker. 31 | 32 | #[derive(Debug, Default, Clone)] 33 | pub struct SyncGroupRequest { 34 | /// The unique group identifier. 35 | pub group_id: String, 36 | /// The generation of the group. 37 | pub generation_id: i32, 38 | /// The member ID assigned by the group. 39 | pub member_id: String, 40 | /// The unique identifier of the consumer instance provided by end user. 41 | pub group_instance_id: Option, 42 | /// The group protocol type. 43 | pub protocol_type: Option, 44 | /// The group protocol name 45 | pub protocol_name: Option, 46 | /// Each assignment. 47 | pub assignments: Vec, 48 | /// Unknown tagged fields. 49 | pub unknown_tagged_fields: Vec, 50 | } 51 | 52 | impl Deserializable for SyncGroupRequest { 53 | fn read(buf: &mut B, version: i16) -> io::Result { 54 | let mut this = SyncGroupRequest { 55 | group_id: NullableString(version >= 4) 56 | .decode(buf)? 57 | .ok_or_else(|| err_decode_message_null("group_id"))?, 58 | generation_id: Int32.decode(buf)?, 59 | member_id: NullableString(version >= 4) 60 | .decode(buf)? 61 | .ok_or_else(|| err_decode_message_null("member_id"))?, 62 | ..Default::default() 63 | }; 64 | if version >= 3 { 65 | this.group_instance_id = NullableString(version >= 4).decode(buf)?; 66 | } 67 | if version >= 5 { 68 | this.protocol_type = NullableString(true).decode(buf)?; 69 | this.protocol_name = NullableString(true).decode(buf)?; 70 | } 71 | this.assignments = NullableArray(Struct(version), version >= 4) 72 | .decode(buf)? 73 | .ok_or_else(|| err_decode_message_null("assignments"))?; 74 | if version >= 4 { 75 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 76 | } 77 | Ok(this) 78 | } 79 | } 80 | 81 | #[derive(Debug, Default, Clone)] 82 | pub struct SyncGroupRequestAssignment { 83 | /// The ID of the member to assign. 84 | pub member_id: String, 85 | /// The member assignment. 86 | pub assignment: ByteBuffer, 87 | /// Unknown tagged fields. 88 | pub unknown_tagged_fields: Vec, 89 | } 90 | 91 | impl Deserializable for SyncGroupRequestAssignment { 92 | fn read(buf: &mut B, version: i16) -> io::Result { 93 | if version > 5 { 94 | Err(err_decode_message_unsupported( 95 | version, 96 | "SyncGroupRequestAssignment", 97 | ))? 98 | } 99 | let mut this = SyncGroupRequestAssignment { 100 | member_id: NullableString(version >= 4) 101 | .decode(buf)? 102 | .ok_or_else(|| err_decode_message_null("member_id"))?, 103 | assignment: NullableBytes(version >= 4) 104 | .decode(buf)? 105 | .ok_or_else(|| err_decode_message_null("assignment"))?, 106 | ..Default::default() 107 | }; 108 | if version >= 4 { 109 | this.unknown_tagged_fields = RawTaggedFieldList.decode(buf)?; 110 | } 111 | Ok(this) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /kafka-api/src/schemata/sync_group_response.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use crate::{bytebuffer::ByteBuffer, codec::*}; 18 | 19 | // Version 1 adds throttle time. 20 | // 21 | // Starting in version 2, on quota violation, brokers send out responses before throttling. 22 | // 23 | // Starting from version 3, syncGroupRequest supports a new field called groupInstanceId to indicate 24 | // member identity across restarts. 25 | // 26 | // Version 4 is the first flexible version. 27 | // 28 | // Starting from version 5, the broker sends back the Protocol Type and the Protocol Name 29 | // to the client (KIP-559). 30 | 31 | #[derive(Debug, Default, Clone)] 32 | pub struct SyncGroupResponse { 33 | /// The duration in milliseconds for which the request was throttled due to a quota violation, 34 | /// or zero if the request did not violate any quota. 35 | pub throttle_time_ms: i32, 36 | /// The error code, or 0 if there was no error. 37 | pub error_code: i16, 38 | /// The group protocol type. 39 | pub protocol_type: Option, 40 | /// The group protocol name 41 | pub protocol_name: Option, 42 | /// The member assignment. 43 | pub assignment: ByteBuffer, 44 | /// Unknown tagged fields. 45 | pub unknown_tagged_fields: Vec, 46 | } 47 | 48 | impl Serializable for SyncGroupResponse { 49 | fn write(&self, buf: &mut B, version: i16) -> io::Result<()> { 50 | if version >= 1 { 51 | Int32.encode(buf, self.throttle_time_ms)?; 52 | } 53 | Int16.encode(buf, self.error_code)?; 54 | if version >= 5 { 55 | NullableString(true).encode(buf, self.protocol_type.as_deref())?; 56 | NullableString(true).encode(buf, self.protocol_name.as_deref())?; 57 | } 58 | NullableBytes(version >= 4).encode(buf, &self.assignment)?; 59 | if version >= 4 { 60 | RawTaggedFieldList.encode(buf, &self.unknown_tagged_fields)?; 61 | } 62 | Ok(()) 63 | } 64 | 65 | fn calculate_size(&self, version: i16) -> usize { 66 | let mut res = 0; 67 | if version >= 1 { 68 | res += Int32::SIZE; // self.throttle_time_ms 69 | } 70 | res += Int16::SIZE; // self.error_code 71 | if version >= 5 { 72 | res += NullableString(true).calculate_size(self.protocol_type.as_deref()); 73 | res += NullableString(true).calculate_size(self.protocol_name.as_deref()); 74 | } 75 | res += NullableBytes(version >= 4).calculate_size(&self.assignment); 76 | if version >= 4 { 77 | res += RawTaggedFieldList.calculate_size(&self.unknown_tagged_fields); 78 | } 79 | res 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /kafka-api/src/sendable/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::io; 16 | 17 | use bytes::BufMut; 18 | 19 | use crate::{bytebuffer::ByteBuffer, codec::writable::Writable, records::ReadOnlyRecords}; 20 | 21 | pub struct SendBuilder { 22 | sends: Vec, 23 | bs: bytes::BytesMut, 24 | } 25 | 26 | impl Writable for SendBuilder { 27 | fn write_i8(&mut self, n: i8) -> io::Result<()> { 28 | self.bs.put_i8(n); 29 | Ok(()) 30 | } 31 | 32 | fn write_i16(&mut self, n: i16) -> io::Result<()> { 33 | self.bs.put_i16(n); 34 | Ok(()) 35 | } 36 | 37 | fn write_i32(&mut self, n: i32) -> io::Result<()> { 38 | self.bs.put_i32(n); 39 | Ok(()) 40 | } 41 | 42 | fn write_i64(&mut self, n: i64) -> io::Result<()> { 43 | self.bs.put_i64(n); 44 | Ok(()) 45 | } 46 | 47 | fn write_u8(&mut self, n: u8) -> io::Result<()> { 48 | self.bs.put_u8(n); 49 | Ok(()) 50 | } 51 | 52 | fn write_u16(&mut self, n: u16) -> io::Result<()> { 53 | self.bs.put_u16(n); 54 | Ok(()) 55 | } 56 | 57 | fn write_u32(&mut self, n: u32) -> io::Result<()> { 58 | self.bs.put_u32(n); 59 | Ok(()) 60 | } 61 | 62 | fn write_u64(&mut self, n: u64) -> io::Result<()> { 63 | self.bs.put_u64(n); 64 | Ok(()) 65 | } 66 | 67 | fn write_f32(&mut self, n: f32) -> io::Result<()> { 68 | self.bs.put_f32(n); 69 | Ok(()) 70 | } 71 | 72 | fn write_f64(&mut self, n: f64) -> io::Result<()> { 73 | self.bs.put_f64(n); 74 | Ok(()) 75 | } 76 | 77 | fn write_slice(&mut self, src: &[u8]) -> io::Result<()> { 78 | self.bs.put_slice(src); 79 | Ok(()) 80 | } 81 | 82 | fn write_bytes(&mut self, buf: &ByteBuffer) -> io::Result<()> { 83 | self.flush_bytes(); 84 | self.sends.push(Sendable::ByteBuffer(buf.clone())); 85 | Ok(()) 86 | } 87 | 88 | fn write_records(&mut self, r: &ReadOnlyRecords) -> io::Result<()> { 89 | self.flush_bytes(); 90 | // shallow clone - only metadata copied 91 | let r = r.clone(); 92 | self.sends.push(Sendable::Records(r)); 93 | Ok(()) 94 | } 95 | } 96 | 97 | impl Default for SendBuilder { 98 | fn default() -> Self { 99 | SendBuilder::new() 100 | } 101 | } 102 | 103 | impl SendBuilder { 104 | pub fn new() -> Self { 105 | SendBuilder { 106 | sends: vec![], 107 | bs: bytes::BytesMut::new(), 108 | } 109 | } 110 | 111 | pub fn finish(mut self) -> Vec { 112 | self.flush_bytes(); 113 | self.sends 114 | } 115 | 116 | fn flush_bytes(&mut self) { 117 | if !self.bs.is_empty() { 118 | let bs = self.bs.split().freeze(); 119 | self.sends.push(Sendable::Bytes(bs)); 120 | } 121 | } 122 | } 123 | 124 | #[derive(Debug)] 125 | pub enum Sendable { 126 | Bytes(bytes::Bytes), 127 | ByteBuffer(ByteBuffer), 128 | Records(ReadOnlyRecords), 129 | } 130 | 131 | impl Sendable { 132 | // io::Write cannot leverage the sendfile syscall if we want to copy bytes from a file to 133 | // socket. Rust seems doesn't have a good solution so we keep use io::Write here but open to 134 | // any other solution. 135 | pub fn write_to(&self, writer: &mut W) -> io::Result<()> { 136 | match self { 137 | Sendable::Bytes(bs) => writer.write_all(bs.as_ref()), 138 | Sendable::ByteBuffer(buf) => writer.write_all(buf.as_bytes()), 139 | Sendable::Records(r) => r.write_to(writer), 140 | } 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /licenserc.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | headerPath = "Apache-2.0.txt" 16 | 17 | includes = [ 18 | '**/*.rs', 19 | '**/*.yml', 20 | '**/*.yaml', 21 | '**/Cargo.toml', 22 | ] 23 | 24 | [properties] 25 | inceptionYear = 2023 26 | copyrightOwner = "tison " 27 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly-2023-08-01" 3 | components = ["rustfmt", "clippy"] 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | group_imports = "StdExternalCrate" 3 | comment_width = 120 4 | wrap_comments = true 5 | format_code_in_doc_comments = true 6 | -------------------------------------------------------------------------------- /simplesrv/Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2023 tison 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | [package] 16 | name = "simplesrv" 17 | authors.workspace = true 18 | edition.workspace = true 19 | version.workspace = true 20 | license.workspace = true 21 | publish = false 22 | 23 | description = "Simple Kafka server to demostrate APIs usage." 24 | 25 | [dependencies] 26 | kafka-api = { path = "../kafka-api" } 27 | 28 | bytes = "1.4.0" 29 | tracing = "0.1.37" 30 | tracing-subscriber = "0.3.17" 31 | uuid = { version = "1.3.4", features = ["v4"] } 32 | -------------------------------------------------------------------------------- /simplesrv/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2023 tison 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | use std::{ 16 | io, 17 | io::Read, 18 | mem::size_of, 19 | net::{SocketAddr, TcpListener, TcpStream}, 20 | sync::{Arc, Mutex}, 21 | }; 22 | 23 | use bytes::Buf; 24 | use kafka_api::{bytebuffer::ByteBuffer, sendable::SendBuilder, Request}; 25 | use simplesrv::{Broker, BrokerMeta, ClientInfo, ClusterMeta}; 26 | use tracing::{debug, error, error_span, info, Level}; 27 | 28 | fn main() -> io::Result<()> { 29 | tracing_subscriber::fmt() 30 | .with_max_level(Level::TRACE) 31 | .init(); 32 | 33 | let addr: SocketAddr = "127.0.0.1:9092".parse().unwrap(); 34 | let listener = TcpListener::bind(addr)?; 35 | info!("Starting Kafka Simple Server at {}", addr); 36 | 37 | let broker_meta = BrokerMeta { 38 | node_id: 1, 39 | host: addr.ip().to_string(), 40 | port: addr.port() as i32, 41 | }; 42 | let cluster_meta = ClusterMeta { 43 | cluster_id: "Kafka Simple Server".to_string(), 44 | controller_id: 1, 45 | brokers: vec![broker_meta.clone()], 46 | }; 47 | let broker = Arc::new(Mutex::new(Broker::new(broker_meta, cluster_meta))); 48 | 49 | loop { 50 | let (socket, addr) = listener.accept()?; 51 | let broker = broker.clone(); 52 | std::thread::spawn(move || { 53 | let addr = addr.to_string(); 54 | error_span!("connection", addr).in_scope(|| { 55 | info!("Accept socket on {}", addr); 56 | match dispatch(socket, broker) { 57 | Ok(()) => { 58 | info!("connection closed"); 59 | } 60 | Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { 61 | info!("connection closed by client") 62 | } 63 | Err(err) => { 64 | error!(?err, "connection failed"); 65 | } 66 | } 67 | }) 68 | }); 69 | } 70 | } 71 | 72 | fn dispatch(mut socket: TcpStream, broker: Arc>) -> io::Result<()> { 73 | let client_host = socket.peer_addr()?; 74 | 75 | loop { 76 | let n = { 77 | let mut buf = [0; size_of::()]; 78 | socket.read_exact(&mut buf)?; 79 | i32::from_be_bytes(buf) as usize 80 | }; 81 | 82 | let mut buf = { 83 | let mut buf = vec![0u8; n]; 84 | socket.read_exact(&mut buf)?; 85 | ByteBuffer::new(buf) 86 | }; 87 | 88 | let (header, request) = Request::decode(&mut buf)?; 89 | assert!(!buf.has_remaining(), "remaining bytes unparsed"); 90 | debug!("Receive request {request:?}"); 91 | 92 | let response = { 93 | let client_info = ClientInfo { 94 | client_id: header.client_id.clone(), 95 | client_host: client_host.to_string(), 96 | }; 97 | let mut broker = broker.lock().unwrap(); 98 | broker.reply(client_info, header.clone(), request) 99 | }; 100 | let mut builder = SendBuilder::new(); 101 | response.encode(header, &mut builder)?; 102 | let sends = builder.finish(); 103 | for send in sends { 104 | send.write_to(&mut socket)?; 105 | } 106 | } 107 | } 108 | --------------------------------------------------------------------------------