├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ ├── go.yml │ └── goreleaser.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── asciicinema.cast ├── asciicinema.gif ├── cmd └── kaf │ ├── completion.go │ ├── config.go │ ├── consume.go │ ├── group.go │ ├── kaf.go │ ├── main_test.go │ ├── node.go │ ├── node_test.go │ ├── nodes_test.go │ ├── oauth.go │ ├── produce.go │ ├── produce_consume_test.go │ ├── query.go │ ├── scram_client.go │ ├── topic.go │ ├── topic_test.go │ ├── topics_test.go │ └── valid.go ├── docker-compose.yaml ├── examples ├── README.md ├── aws_msk_iam.yaml ├── basic.yaml ├── sasl_plaintext.yaml ├── sasl_ssl.yaml ├── sasl_ssl_custom_ca.yaml ├── sasl_ssl_insecure.yaml ├── sasl_ssl_oauth.yaml ├── sasl_ssl_oauth_token.yaml ├── sasl_ssl_scram.yaml ├── sasl_v1_handshake.yaml ├── schema_registry_basic_auth.yaml └── ssl_keys.yaml ├── go.mod ├── go.sum ├── godownloader.sh └── pkg ├── avro └── schema.go ├── config ├── config.go └── confluent_cloud.go ├── partitioner └── jvm.go ├── proto └── proto.go └── streams ├── decoder.go └── subscription_info.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: birdayz 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | - package-ecosystem: "docker" 13 | directories: 14 | - "/" 15 | schedule: 16 | interval: "daily" 17 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | build: 12 | name: Build 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Set up Go 1.x 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: ^1.22 20 | id: go 21 | 22 | - name: Check out code into the Go module directory 23 | uses: actions/checkout@v2 24 | 25 | - name: Build 26 | run: go build -v -ldflags "-s -w -X main.version=$GITHUB_REF -X main.commit=${GITHUB_SHA::8}" ./cmd/kaf 27 | 28 | - name: Test 29 | run: go test -v ./... 30 | -------------------------------------------------------------------------------- /.github/workflows/goreleaser.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | goreleaser: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - 13 | name: Checkout 14 | uses: actions/checkout@v2 15 | with: 16 | fetch-depth: 0 17 | - 18 | name: Set up Go 19 | uses: actions/setup-go@v2 20 | - 21 | name: Run GoReleaser 22 | uses: goreleaser/goreleaser-action@v2 23 | with: 24 | # either 'goreleaser' (default) or 'goreleaser-pro' 25 | distribution: goreleaser 26 | version: latest 27 | args: release 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }} 31 | AUR_KEY: ${{ secrets.AUR_KEY }} 32 | # Your GoReleaser Pro key, if you are using the 'goreleaser-pro' distribution 33 | # GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /cmd/kaf/kaf 2 | /dist 3 | /kaf* 4 | .idea 5 | 6 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | concurrency: 4 3 | deadline: 10m 4 | tests: false 5 | skip-dirs: 6 | - client 7 | - vendor 8 | modules-download-mode: vendor 9 | linters-settings: 10 | govet: 11 | check-shadowing: true 12 | unused: 13 | check-exported: false 14 | nakedret: 15 | max-func-lines: 100 16 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | # This is an example goreleaser.yaml file with some sane defaults. 3 | # Make sure to check the documentation at http://goreleaser.com 4 | before: 5 | hooks: 6 | # You may remove this if you don't use go modules. 7 | - go mod download 8 | # you may remove this if you don't need go generate 9 | - go generate ./... 10 | builds: 11 | - env: 12 | - CGO_ENABLED=0 13 | main: ./cmd/kaf 14 | goarch: 15 | - amd64 16 | - arm64 17 | goos: 18 | - linux 19 | - darwin 20 | - windows 21 | archives: 22 | - name_template: >- 23 | {{ .ProjectName }}_{{ .Version }}_ 24 | {{- title .Os }}_ 25 | {{- if eq .Arch "amd64" }}x86_64 26 | {{- else if eq .Arch "386" }}i386 27 | {{- else }}{{ .Arch }}{{ end }} 28 | checksum: 29 | name_template: 'checksums.txt' 30 | snapshot: 31 | version_template: "{{ .Tag }}-next" 32 | changelog: 33 | sort: asc 34 | filters: 35 | exclude: 36 | - '^docs:' 37 | - '^test:' 38 | release: 39 | github: 40 | owner: birdayz 41 | name: kaf 42 | 43 | brews: 44 | - 45 | # Name template of the recipe 46 | # Default to project name 47 | name: kaf 48 | 49 | # GOARM to specify which 32-bit arm version to use if there are multiple versions 50 | # from the build section. Brew formulas support only one 32-bit version. 51 | # Default is 6 for all artifacts or each id if there a multiple versions. 52 | goarm: 6 53 | 54 | # GOAMD64 to specify which amd64 version to use if there are multiple versions 55 | # from the build section. 56 | # Default is v1. 57 | #goamd64: v3 58 | 59 | # NOTE: make sure the url_template, the token and given repo (github or gitlab) owner and name are from the 60 | # same kind. We will probably unify this in the next major version like it is done with scoop. 61 | 62 | # GitHub/GitLab repository to push the formula to 63 | repository: 64 | owner: birdayz 65 | name: homebrew-kaf 66 | 67 | # Optionally a branch can be provided. 68 | # Defaults to the default repository branch. 69 | branch: master 70 | 71 | # Optionally a token can be provided, if it differs from the token provided to GoReleaser 72 | token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" 73 | 74 | # Template for the url which is determined by the given Token (github, gitlab or gitea) 75 | # 76 | # Default depends on the client. 77 | #url_template: "http://github.mycompany.com/foo/bar/releases/{{ .Tag }}/{{ .ArtifactName }}" 78 | 79 | # Allows you to set a custom download strategy. Note that you'll need 80 | # to implement the strategy and add it to your tap repository. 81 | # Example: https://docs.brew.sh/Formula-Cookbook#specifying-the-download-strategy-explicitly 82 | # Default is empty. 83 | # download_strategy: CurlDownloadStrategy 84 | 85 | # Allows you to add a custom require_relative at the top of the formula template 86 | # Default is empty 87 | # custom_require: custom_download_strategy 88 | 89 | # Git author used to commit to the repository. 90 | # Defaults are shown. 91 | commit_author: 92 | name: Johannes Bruederl 93 | email: johannes.bruederl@gmail.com 94 | 95 | # The project name and current git tag are used in the format string. 96 | # commit_msg_template: "Brew formula update for {{ .ProjectName }} version {{ .Tag }}" 97 | 98 | # Folder inside the repository to put the formula. 99 | # Default is the root folder. 100 | # folder: Formula 101 | 102 | # Caveats for the user of your binary. 103 | # Default is empty. 104 | # caveats: "How to use this binary" 105 | 106 | # Your app's homepage. 107 | # Default is empty. 108 | # homepage: "https://example.com/" 109 | 110 | # Template of your app's description. 111 | # Default is empty. 112 | # description: "Software to create fast and easy drum rolls." 113 | 114 | # SPDX identifier of your app's license. 115 | # Default is empty. 116 | # license: "MIT" 117 | 118 | # Setting this will prevent goreleaser to actually try to commit the updated 119 | # formula - instead, the formula file will be stored on the dist folder only, 120 | # leaving the responsibility of publishing it to the user. 121 | # If set to auto, the release will not be uploaded to the homebrew tap 122 | # in case there is an indicator for prerelease in the tag e.g. v1.0.0-rc1 123 | # Default is false. 124 | # skip_upload: true 125 | 126 | # Custom block for brew. 127 | # Can be used to specify alternate downloads for devel or head releases. 128 | # Default is empty. 129 | # custom_block: | 130 | # head "https://github.com/some/package.git" 131 | # ... 132 | 133 | # Packages your package depends on. 134 | # dependencies: 135 | # - name: git 136 | # - name: zsh 137 | # type: optional 138 | 139 | # Packages that conflict with your package. 140 | # conflicts: 141 | # - svn 142 | # - bash 143 | 144 | # Specify for packages that run as a service. 145 | # Default is empty. 146 | # plist: | 147 | # 148 | # ... 149 | 150 | # Service block. 151 | # service: | 152 | # run: foo/bar 153 | # ... 154 | 155 | # So you can `brew test` your formula. 156 | # Default is empty. 157 | # test: | 158 | # system "#{bin}/program --version" 159 | # ... 160 | 161 | # Custom install script for brew. 162 | # Default is 'bin.install "program"'. 163 | # install: | 164 | # bin.install "program" 165 | # ... 166 | 167 | # Custom post_install script for brew. 168 | # Could be used to do any additional work after the "install" script 169 | # Default is empty. 170 | # post_install: | 171 | # etc.install "app-config.conf" 172 | # ... 173 | # 174 | # .goreleaser.yaml 175 | # 176 | aurs: 177 | - 178 | # The package name. 179 | # 180 | # Defaults to the Project Name with a -bin suffix. 181 | # 182 | # Note that since this integration does not create a PKGBUILD to build from 183 | # source, per Arch's guidelines. 184 | # That said, GoReleaser will enforce a `-bin` suffix if its not present. 185 | name: kaf-bin 186 | 187 | homepage: https://github.com/birdayz/kaf 188 | 189 | # Your app's homepage. 190 | # Default is empty. 191 | #homepage: "https://example.com/" 192 | 193 | # Template of your app's description. 194 | # Default is empty. 195 | description: "Kafka CLI inspired by kubectl & docker" 196 | 197 | # The maintainers of the package. 198 | # Defaults to empty. 199 | maintainers: 200 | - 'Johannes Bruederl ' 201 | 202 | # The contributors of the package. 203 | # Defaults to empty. 204 | contributors: 205 | - 'Michał Lisowski ' 206 | 207 | # SPDX identifier of your app's license. 208 | # Default is empty. 209 | license: "MIT" 210 | 211 | # The SSH private key that should be used to commit to the Git repository. 212 | # This can either be a path or the key contents. 213 | # 214 | # WARNING: do not expose your private key in the config file! 215 | private_key: '{{ .Env.AUR_KEY }}' 216 | 217 | # The AUR Git URL for this package. 218 | # Defaults to empty. 219 | git_url: 'ssh://aur@aur.archlinux.org/kaf-bin.git' 220 | 221 | # Setting this will prevent goreleaser to actually try to commit the updated 222 | # formula - instead, the formula file will be stored on the dist folder only, 223 | # leaving the responsibility of publishing it to the user. 224 | # 225 | # If set to auto, the release will not be uploaded to the homebrew tap 226 | # in case there is an indicator for prerelease in the tag e.g. v1.0.0-rc1. 227 | # 228 | # Default is false. 229 | # skip_upload: true 230 | 231 | # List of additional packages that the software provides the features of. 232 | # 233 | # Defaults to the project name. 234 | provides: 235 | - kaf-bin 236 | 237 | # List of packages that conflict with, or cause problems with the package. 238 | # 239 | # Defaults to the project name. 240 | conflicts: 241 | - kaf-bin 242 | - kaf 243 | 244 | # List of packages that must be installed to install this. 245 | # 246 | # Defaults to empty. 247 | # depends: 248 | # - curl 249 | 250 | # List of packages that are not needed for the software to function, 251 | # but provide additional features. 252 | # 253 | # Must be in the format `package: short description of the extra functionality`. 254 | # 255 | # Defaults to empty. 256 | # optdepends: 257 | # - 'wget: for downloading things' 258 | 259 | # Custom package instructions. 260 | # 261 | # Defaults to `install -Dm755 "./PROJECT_NAME" "${pkgdir}/usr/bin/PROJECT_NAME", 262 | # which is not always correct. 263 | # 264 | # We recommend you override this, installing the binary, license and 265 | # everything else your package needs. 266 | package: |- 267 | # bin 268 | install -Dm755 "./kaf" "${pkgdir}/usr/bin/kaf" 269 | 270 | # completions 271 | # bash 272 | mkdir -p "${pkgdir}/etc/bash_completion.d" 273 | ./kaf completion bash > "${pkgdir}/etc/bash_completion.d/kaf" 274 | 275 | # zsh 276 | mkdir -p "${pkgdir}/usr/share/zsh/site-functions" 277 | ./kaf completion zsh > "${pkgdir}/usr/share/zsh/site-functions/_kaf" 278 | 279 | # Fish 280 | mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d" 281 | ./kaf completion fish > ${pkgdir}/usr/share/fish/vendor_completions.d/kaf.fish 282 | 283 | # Git author used to commit to the repository. 284 | # Defaults are shown below. 285 | commit_author: 286 | name: Johannes Bruederl 287 | email: johannes.bruederl@gmail.com 288 | 289 | # Commit message template. 290 | # Defaults to `Update to {{ .Tag }}`. 291 | commit_msg_template: "pkgbuild updates" 292 | 293 | # If you build for multiple GOAMD64 versions, you may use this to choose which one to use. 294 | # Defaults to `v1`. 295 | #goamd64: v2 296 | 297 | # The value to be passed to `GIT_SSH_COMMAND`. 298 | # This is mainly used to specify the SSH private key used to pull/push to 299 | # the Git URL. 300 | # 301 | # Defaults to `ssh -i {{ .KeyPath }} -o StrictHostKeyChecking=accept-new -F /dev/null`. 302 | #git_ssh_command: 'ssh -i {{ .Env.KEY }} -o SomeOption=yes' 303 | 304 | # Template for the url which is determined by the given Token 305 | # (github, gitlab or gitea). 306 | # 307 | # Default depends on the client. 308 | #url_template: "http://github.mycompany.com/foo/bar/releases/{{ .Tag }}/{{ .ArtifactName }}" 309 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.2@sha256:3a060d683c28fbb21d7fe8966458e084a6d7ebfb1f3ef3fd901abd2083c43675 AS buildstage 2 | 3 | # Set destination for COPY 4 | WORKDIR /app 5 | 6 | # Download Go modules 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | COPY . ./ 10 | 11 | # Build 12 | RUN CGO_ENABLED=0 GOOS=linux go build -ldflags "-w -s" -o /kaf ./cmd/kaf 13 | 14 | FROM scratch 15 | 16 | WORKDIR / 17 | 18 | COPY --from=buildstage /kaf /bin/kaf 19 | 20 | USER 1001 21 | 22 | # Run 23 | ENTRYPOINT ["/bin/kaf"] 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DOCKER_CMD ?= docker 2 | DOCKER_REGISTRY ?= docker.io 3 | DOCKER_ORG ?= $(USER) 4 | DOCKER_NAME ?= kaf 5 | DOCKER_TAG ?= latest 6 | BUILD_TAG ?= latest 7 | 8 | build: 9 | go build -ldflags "-w -s" ./cmd/kaf 10 | install: 11 | go install -ldflags "-w -s" ./cmd/kaf 12 | release: 13 | goreleaser 14 | run-kafka: 15 | docker-compose up -d 16 | docker-build: 17 | ${DOCKER_CMD} build -t ${DOCKER_REGISTRY}/${DOCKER_ORG}/${DOCKER_NAME}:${DOCKER_TAG} . 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kaf 2 | Kafka CLI inspired by kubectl & docker 3 | 4 | [![Actions Status](https://github.com/birdayz/kaf/workflows/Go/badge.svg)](https://github.com/birdayz/kaf/actions) 5 | [![GoReportCard](https://goreportcard.com/badge/github.com/birdayz/kaf)](https://goreportcard.com/report/github.com/birdayz/kaf) 6 | [![GoDoc](https://godoc.org/github.com/birdayz/kaf?status.svg)](https://godoc.org/github.com/birdayz/kaf) 7 | ![AUR version](https://img.shields.io/aur/version/kaf-bin) 8 | 9 | ![asciicinema](asciicinema.gif) 10 | 11 | ## Install 12 | Install via Go from source: 13 | 14 | ``` 15 | go install github.com/birdayz/kaf/cmd/kaf@latest 16 | ``` 17 | 18 | Install via install script: 19 | 20 | ``` 21 | curl https://raw.githubusercontent.com/birdayz/kaf/master/godownloader.sh | BINDIR=$HOME/bin bash 22 | ``` 23 | 24 | Install on Archlinux via [AUR](https://aur.archlinux.org/packages/kaf-bin/): 25 | 26 | ``` 27 | yay -S kaf-bin 28 | ``` 29 | 30 | Install via Homebrew: 31 | 32 | ``` 33 | brew tap birdayz/kaf 34 | brew install kaf 35 | ``` 36 | 37 | ## Usage 38 | 39 | Show the tool version 40 | 41 | `kaf --version` 42 | 43 | Add a local Kafka with no auth 44 | 45 | `kaf config add-cluster local -b localhost:9092` 46 | 47 | Select cluster from dropdown list 48 | 49 | `kaf config select-cluster` 50 | 51 | Describe and List nodes 52 | 53 | `kaf node ls` 54 | 55 | List topics, partitions and replicas 56 | 57 | `kaf topics` 58 | 59 | Describe a given topic called _mqtt.messages.incoming_ 60 | 61 | `kaf topic describe mqtt.messages.incoming` 62 | 63 | ### Group Inspection 64 | 65 | List consumer groups 66 | 67 | `kaf groups` 68 | 69 | Describe a given consumer group called _dispatcher_ 70 | 71 | `kaf group describe dispatcher` 72 | 73 | Write message into given topic from stdin 74 | 75 | `echo test | kaf produce mqtt.messages.incoming` 76 | 77 | ### Offset Reset 78 | 79 | Set offset for consumer group _dispatcher_ consuming from topic _mqtt.messages.incoming_ to latest for all partitions 80 | 81 | `kaf group commit dispatcher -t mqtt.messages.incoming --offset latest --all-partitions` 82 | 83 | Set offset to oldest 84 | 85 | `kaf group commit dispatcher -t mqtt.messages.incoming --offset oldest --all-partitions` 86 | 87 | Set offset to 1001 for partition 0 88 | 89 | `kaf group commit dispatcher -t mqtt.messages.incoming --offset 1001 --partition 0` 90 | 91 | ## Configuration 92 | See the [examples](examples) folder 93 | 94 | ## Shell autocompletion 95 | Source the completion script in your shell commands file: 96 | 97 | Bash Linux: 98 | 99 | ```kaf completion bash > /etc/bash_completion.d/kaf``` 100 | 101 | Bash MacOS: 102 | 103 | ```kaf completion bash > /usr/local/etc/bash_completion.d/kaf``` 104 | 105 | Zsh 106 | 107 | ```kaf completion zsh > "${fpath[1]}/_kaf"``` 108 | 109 | Fish 110 | 111 | ```kaf completion fish > ~/.config/fish/completions/kaf.fish``` 112 | 113 | Powershell 114 | 115 | ```Invoke-Expression (@(kaf completion powershell) -replace " ''\)$"," ' ')" -join "`n")``` 116 | 117 | ## Sponsors 118 | ### [Redpanda](https://github.com/redpanda-data/redpanda) 119 | - The streaming data platform for developers 120 | - Single binary w/no dependencies 121 | - Fully Kafka API compatible 122 | - 10x lower P99 latencies, 6x faster transactions 123 | - Zero data loss by default 124 | ### [Zerodha](https://zerodha.tech) 125 | 126 | -------------------------------------------------------------------------------- /asciicinema.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/birdayz/kaf/d654fffc95d06e5e3b3424b46e67dc3b3602bf4e/asciicinema.gif -------------------------------------------------------------------------------- /cmd/kaf/completion.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | func init() { 8 | rootCmd.AddCommand(completionCmd) 9 | } 10 | 11 | var completionCmd = &cobra.Command{ 12 | Use: "completion [SHELL]", 13 | Short: "Generate completion script for bash, zsh, fish or powershell", 14 | Long: `To load completions: 15 | 16 | Bash: 17 | 18 | $ source <(kaf completion bash) 19 | 20 | # To load completions for each session, execute once: 21 | Linux: 22 | $ kaf completion bash > /etc/bash_completion.d/kaf 23 | MacOS: 24 | $ kaf completion bash > /usr/local/etc/bash_completion.d/kaf 25 | 26 | Zsh: 27 | 28 | # To load completions for each session, execute once: 29 | $ kaf completion zsh > "${fpath[1]}/_kaf" 30 | 31 | # You will need to start a new shell for this setup to take effect. 32 | 33 | Fish: 34 | 35 | $ kaf completion fish | source 36 | 37 | # To load completions for each session, execute once: 38 | $ kaf completion fish > ~/.config/fish/completions/kaf.fish 39 | `, 40 | DisableFlagsInUseLine: true, 41 | Args: cobra.ExactValidArgs(1), 42 | ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, 43 | Run: func(cmd *cobra.Command, args []string) { 44 | switch args[0] { 45 | case "bash": 46 | err := rootCmd.GenBashCompletion(outWriter) 47 | if err != nil { 48 | errorExit("Failed to generate bash completion: %w", err) 49 | } 50 | case "zsh": 51 | if err := rootCmd.GenZshCompletion(outWriter); err != nil { 52 | errorExit("Failed to generate zsh completion: %w", err) 53 | } 54 | case "fish": 55 | if err := rootCmd.GenFishCompletion(outWriter, true); err != nil { 56 | errorExit("Failed to generate fish completion: %w", err) 57 | } 58 | case "powershell": 59 | err := rootCmd.GenPowerShellCompletion(outWriter) 60 | if err != nil { 61 | errorExit("Failed to generate powershell completion: %w", err) 62 | } 63 | } 64 | }, 65 | } 66 | -------------------------------------------------------------------------------- /cmd/kaf/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "regexp" 9 | 10 | "github.com/IBM/sarama" 11 | "github.com/birdayz/kaf/pkg/config" 12 | "github.com/manifoldco/promptui" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var ( 17 | flagEhConnString string 18 | flagBrokerVersion string 19 | ) 20 | 21 | func init() { 22 | configCmd.AddCommand(configImportCmd) 23 | configCmd.AddCommand(configUseCmd) 24 | configCmd.AddCommand(configLsCmd) 25 | configCmd.AddCommand(configAddClusterCmd) 26 | configCmd.AddCommand(configRemoveClusterCmd) 27 | configCmd.AddCommand(configSelectCluster) 28 | configCmd.AddCommand(configCurrentContext) 29 | configCmd.AddCommand(configAddEventhub) 30 | rootCmd.AddCommand(configCmd) 31 | 32 | configLsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 33 | configAddEventhub.Flags().StringVar(&flagEhConnString, "eh-connstring", "", "EventHub ConnectionString") 34 | configAddClusterCmd.Flags().StringVar(&flagBrokerVersion, "broker-version", "", fmt.Sprintf("Broker Version. Available Versions: %v", sarama.SupportedVersions)) 35 | } 36 | 37 | var configCmd = &cobra.Command{ 38 | Use: "config", 39 | Short: "Handle kaf configuration", 40 | } 41 | 42 | var configCurrentContext = &cobra.Command{ 43 | Use: "current-context", 44 | Short: "Displays the current context", 45 | Args: cobra.ExactArgs(0), 46 | Run: func(cmd *cobra.Command, args []string) { 47 | fmt.Println(cfg.CurrentCluster) 48 | }, 49 | } 50 | 51 | var configUseCmd = &cobra.Command{ 52 | Use: "use-cluster [NAME]", 53 | Short: "Sets the current cluster in the configuration", 54 | Args: cobra.ExactArgs(1), 55 | ValidArgsFunction: validConfigArgs, 56 | Run: func(cmd *cobra.Command, args []string) { 57 | name := args[0] 58 | if err := cfg.SetCurrentCluster(name); err != nil { 59 | fmt.Printf("Cluster with name %v not found\n", name) 60 | } else { 61 | fmt.Printf("Switched to cluster \"%v\".\n", name) 62 | } 63 | }, 64 | } 65 | 66 | var configLsCmd = &cobra.Command{ 67 | Use: "get-clusters", 68 | Short: "Display clusters in the configuration file", 69 | Args: cobra.NoArgs, 70 | Run: func(cmd *cobra.Command, args []string) { 71 | if !noHeaderFlag { 72 | fmt.Println("NAME") 73 | } 74 | for _, cluster := range cfg.Clusters { 75 | fmt.Println(cluster.Name) 76 | } 77 | }, 78 | } 79 | 80 | var configAddEventhub = &cobra.Command{ 81 | Use: "add-eventhub [NAME]", 82 | Example: "esp config add-eventhub my-eventhub --eh-connstring 'Endpoint=sb://......AccessKey=....'", 83 | Short: "Add Azure EventHub", 84 | Args: cobra.ExactArgs(1), 85 | Run: func(cmd *cobra.Command, args []string) { 86 | name := args[0] 87 | for _, cluster := range cfg.Clusters { 88 | if cluster.Name == name { 89 | errorExit("Could not add cluster: cluster with name '%v' exists already.", name) 90 | } 91 | } 92 | 93 | // Parse hub name from ConnString 94 | r, _ := regexp.Compile(`^Endpoint=sb://(.*)\.servicebus.*$`) 95 | hubName := r.FindStringSubmatch(flagEhConnString) 96 | if len(hubName) != 2 { 97 | errorExit("Failed to determine EventHub name from Connection String. Check your ConnectionString") 98 | } 99 | 100 | cfg.Clusters = append(cfg.Clusters, &config.Cluster{ 101 | Name: name, 102 | Brokers: []string{hubName[1] + ".servicebus.windows.net:9093"}, 103 | SchemaRegistryURL: schemaRegistryURL, 104 | SASL: &config.SASL{ 105 | Mechanism: "PLAIN", 106 | Username: "$ConnectionString", 107 | Password: flagEhConnString, 108 | }, 109 | SecurityProtocol: "SASL_SSL", 110 | }) 111 | err := cfg.Write() 112 | if err != nil { 113 | errorExit("Unable to write config: %v\n", err) 114 | } 115 | fmt.Println("Added EventHub.") 116 | }, 117 | } 118 | 119 | var configSelectCluster = &cobra.Command{ 120 | Use: "select-cluster", 121 | Aliases: []string{"ls"}, 122 | Short: "Interactively select a cluster", 123 | Run: func(cmd *cobra.Command, args []string) { 124 | var clusterNames []string 125 | var pos = 0 126 | for k, cluster := range cfg.Clusters { 127 | clusterNames = append(clusterNames, cluster.Name) 128 | if cluster.Name == cfg.CurrentCluster { 129 | pos = k 130 | } 131 | } 132 | 133 | searcher := func(input string, index int) bool { 134 | cluster := clusterNames[index] 135 | name := strings.Replace(strings.ToLower(cluster), " ", "", -1) 136 | input = strings.Replace(strings.ToLower(input), " ", "", -1) 137 | return strings.Contains(name, input) 138 | } 139 | 140 | p := promptui.Select{ 141 | Label: "Select cluster", 142 | Items: clusterNames, 143 | Searcher: searcher, 144 | Size: 10, 145 | CursorPos: pos, 146 | } 147 | 148 | _, selected, err := p.Run() 149 | if err != nil { 150 | os.Exit(0) 151 | } 152 | 153 | // TODO copy pasta 154 | if err := cfg.SetCurrentCluster(selected); err != nil { 155 | fmt.Printf("Cluster with selected %v not found\n", selected) 156 | } 157 | }, 158 | } 159 | 160 | var configAddClusterCmd = &cobra.Command{ 161 | Use: "add-cluster [NAME]", 162 | Short: "Add cluster", 163 | Args: cobra.ExactArgs(1), 164 | Run: func(cmd *cobra.Command, args []string) { 165 | name := args[0] 166 | for _, cluster := range cfg.Clusters { 167 | if cluster.Name == name { 168 | errorExit("Could not add cluster: cluster with name '%v' exists already.", name) 169 | } 170 | } 171 | 172 | cfg.Clusters = append(cfg.Clusters, &config.Cluster{ 173 | Name: name, 174 | Brokers: brokersFlag, 175 | SchemaRegistryURL: schemaRegistryURL, 176 | Version: flagBrokerVersion, 177 | }) 178 | err := cfg.Write() 179 | if err != nil { 180 | errorExit("Unable to write config: %v\n", err) 181 | } 182 | fmt.Println("Added cluster.") 183 | }, 184 | } 185 | 186 | var configRemoveClusterCmd = &cobra.Command{ 187 | Use: "remove-cluster [NAME]", 188 | Short: "remove cluster", 189 | Args: cobra.ExactArgs(1), 190 | ValidArgsFunction: validConfigArgs, 191 | Run: func(cmd *cobra.Command, args []string) { 192 | name := args[0] 193 | 194 | var pos = -1 195 | for i, cluster := range cfg.Clusters { 196 | if cluster.Name == name { 197 | pos = i 198 | break 199 | } 200 | } 201 | 202 | if pos == -1 { 203 | errorExit("Could not delete cluster: cluster with name '%v' not exists.", name) 204 | } 205 | 206 | cfg.Clusters = append(cfg.Clusters[:pos], cfg.Clusters[pos+1:]...) 207 | 208 | err := cfg.Write() 209 | if err != nil { 210 | errorExit("Unable to write config: %v\n", err) 211 | } 212 | fmt.Println("Removed cluster.") 213 | }, 214 | } 215 | 216 | var configImportCmd = &cobra.Command{ 217 | Use: "import [ccloud]", 218 | Short: "Import configurations into the $HOME/.kaf/config file", 219 | Run: func(cmd *cobra.Command, args []string) { 220 | if path, err := config.TryFindCcloudConfigFile(); err == nil { 221 | fmt.Printf("Detected Confluent Cloud config in file %v\n", path) 222 | if username, password, broker, err := config.ParseConfluentCloudConfig(path); err == nil { 223 | 224 | newCluster := &config.Cluster{ 225 | Name: "ccloud", 226 | Brokers: []string{broker}, 227 | SASL: &config.SASL{ 228 | Username: username, 229 | Password: password, 230 | Mechanism: "PLAIN", 231 | }, 232 | SecurityProtocol: "SASL_SSL", 233 | } 234 | 235 | var found bool 236 | for i, newCluster := range cfg.Clusters { 237 | if newCluster.Name == "confluent cloud" { 238 | found = true 239 | cfg.Clusters[i] = newCluster 240 | break 241 | } 242 | } 243 | 244 | if !found { 245 | fmt.Println("Wrote new entry to config file") 246 | cfg.Clusters = append(cfg.Clusters, newCluster) 247 | } 248 | 249 | if cfg.CurrentCluster == "" { 250 | cfg.CurrentCluster = newCluster.Name 251 | } 252 | err = cfg.Write() 253 | if err != nil { 254 | errorExit("Failed to write config: %w", err) 255 | } 256 | 257 | } 258 | } 259 | }, 260 | ValidArgs: []string{"ccloud"}, 261 | Args: func(cmd *cobra.Command, args []string) error { 262 | if err := cobra.OnlyValidArgs(cmd, args); err != nil { 263 | return err 264 | } 265 | 266 | if err := cobra.ExactArgs(1)(cmd, args); err != nil { 267 | return err 268 | } 269 | return nil 270 | }, 271 | } 272 | -------------------------------------------------------------------------------- /cmd/kaf/consume.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/binary" 7 | "encoding/json" 8 | "fmt" 9 | "strconv" 10 | "sync" 11 | "text/tabwriter" 12 | 13 | "github.com/IBM/sarama" 14 | "github.com/birdayz/kaf/pkg/avro" 15 | "github.com/birdayz/kaf/pkg/proto" 16 | "github.com/golang/protobuf/jsonpb" 17 | prettyjson "github.com/hokaccha/go-prettyjson" 18 | "github.com/spf13/cobra" 19 | "github.com/vmihailenco/msgpack/v5" 20 | ) 21 | 22 | var ( 23 | offsetFlag string 24 | groupFlag string 25 | groupCommitFlag bool 26 | outputFormat = OutputFormatDefault 27 | // Deprecated: Use outputFormat instead. 28 | raw bool 29 | follow bool 30 | tail int32 31 | schemaCache *avro.SchemaCache 32 | keyfmt *prettyjson.Formatter 33 | 34 | protoType string 35 | keyProtoType string 36 | 37 | flagPartitions []int32 38 | 39 | limitMessagesFlag int64 40 | 41 | reg *proto.DescriptorRegistry 42 | ) 43 | 44 | func init() { 45 | rootCmd.AddCommand(consumeCmd) 46 | consumeCmd.Flags().StringVar(&offsetFlag, "offset", "oldest", "Offset to start consuming. Possible values: oldest, newest, or integer.") 47 | consumeCmd.Flags().BoolVar(&raw, "raw", false, "Print raw output of messages, without key or prettified JSON") 48 | consumeCmd.Flags().Var(&outputFormat, "output", "Set output format messages: default, raw (without key or prettified JSON), json") 49 | consumeCmd.Flags().BoolVarP(&follow, "follow", "f", false, "Continue to consume messages until program execution is interrupted/terminated") 50 | consumeCmd.Flags().Int32VarP(&tail, "tail", "n", 0, "Print last n messages per partition") 51 | consumeCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") 52 | consumeCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") 53 | consumeCmd.Flags().BoolVar(&decodeMsgPack, "decode-msgpack", false, "Enable deserializing msgpack") 54 | consumeCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") 55 | consumeCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") 56 | consumeCmd.Flags().Int32SliceVarP(&flagPartitions, "partitions", "p", []int32{}, "Partitions to consume from") 57 | consumeCmd.Flags().Int64VarP(&limitMessagesFlag, "limit-messages", "l", 0, "Limit messages per partition") 58 | consumeCmd.Flags().StringVarP(&groupFlag, "group", "g", "", "Consumer Group to use for consume") 59 | consumeCmd.Flags().BoolVar(&groupCommitFlag, "commit", false, "Commit Group offset after receiving messages. Works only if consuming as Consumer Group") 60 | 61 | if err := consumeCmd.RegisterFlagCompletionFunc("output", completeOutputFormat); err != nil { 62 | errorExit("Failed to register flag completion: %v", err) 63 | } 64 | 65 | if err := consumeCmd.Flags().MarkDeprecated("raw", "use --output raw instead"); err != nil { 66 | errorExit("Failed to mark flag as deprecated: %v", err) 67 | } 68 | 69 | keyfmt = prettyjson.NewFormatter() 70 | keyfmt.Newline = " " // Replace newline with space to avoid condensed output. 71 | keyfmt.Indent = 0 72 | } 73 | 74 | type offsets struct { 75 | newest int64 76 | oldest int64 77 | } 78 | 79 | func getOffsets(client sarama.Client, topic string, partition int32) (*offsets, error) { 80 | newest, err := client.GetOffset(topic, partition, sarama.OffsetNewest) 81 | if err != nil { 82 | return nil, err 83 | } 84 | 85 | oldest, err := client.GetOffset(topic, partition, sarama.OffsetOldest) 86 | if err != nil { 87 | return nil, err 88 | } 89 | 90 | return &offsets{ 91 | newest: newest, 92 | oldest: oldest, 93 | }, nil 94 | } 95 | 96 | var consumeCmd = &cobra.Command{ 97 | Use: "consume TOPIC", 98 | Short: "Consume messages", 99 | Args: cobra.ExactArgs(1), 100 | ValidArgsFunction: validTopicArgs, 101 | PreRun: setupProtoDescriptorRegistry, 102 | Run: func(cmd *cobra.Command, args []string) { 103 | var offset int64 104 | cfg := getConfig() 105 | topic := args[0] 106 | client := getClientFromConfig(cfg) 107 | 108 | // Allow deprecated flag to override when outputFormat is not specified, or default. 109 | if outputFormat == OutputFormatDefault && raw { 110 | outputFormat = OutputFormatRaw 111 | } 112 | 113 | switch offsetFlag { 114 | case "oldest": 115 | offset = sarama.OffsetOldest 116 | cfg.Consumer.Offsets.Initial = sarama.OffsetOldest 117 | case "newest": 118 | offset = sarama.OffsetNewest 119 | cfg.Consumer.Offsets.Initial = sarama.OffsetNewest 120 | default: 121 | o, err := strconv.ParseInt(offsetFlag, 10, 64) 122 | if err != nil { 123 | errorExit("Could not parse '%s' to int64: %w", offsetFlag, err) 124 | } 125 | offset = o 126 | } 127 | 128 | if groupFlag != "" { 129 | withConsumerGroup(cmd.Context(), client, topic, groupFlag) 130 | } else { 131 | withoutConsumerGroup(cmd.Context(), client, topic, offset) 132 | } 133 | 134 | }, 135 | } 136 | 137 | type g struct{} 138 | 139 | func (g *g) Setup(s sarama.ConsumerGroupSession) error { 140 | return nil 141 | } 142 | 143 | func (g *g) Cleanup(s sarama.ConsumerGroupSession) error { 144 | return nil 145 | } 146 | 147 | func (g *g) ConsumeClaim(s sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { 148 | 149 | mu := sync.Mutex{} // Synchronizes stderr and stdout. 150 | for msg := range claim.Messages() { 151 | handleMessage(msg, &mu) 152 | if groupCommitFlag { 153 | s.MarkMessage(msg, "") 154 | } 155 | } 156 | return nil 157 | } 158 | 159 | func withConsumerGroup(ctx context.Context, client sarama.Client, topic, group string) { 160 | cg, err := sarama.NewConsumerGroupFromClient(group, client) 161 | if err != nil { 162 | errorExit("Failed to create consumer group: %v", err) 163 | } 164 | 165 | schemaCache = getSchemaCache() 166 | 167 | err = cg.Consume(ctx, []string{topic}, &g{}) 168 | if err != nil { 169 | errorExit("Error on consume: %v", err) 170 | } 171 | } 172 | 173 | func withoutConsumerGroup(ctx context.Context, client sarama.Client, topic string, offset int64) { 174 | consumer, err := sarama.NewConsumerFromClient(client) 175 | if err != nil { 176 | errorExit("Unable to create consumer from client: %v\n", err) 177 | } 178 | 179 | var partitions []int32 180 | if len(flagPartitions) == 0 { 181 | partitions, err = consumer.Partitions(topic) 182 | if err != nil { 183 | errorExit("Unable to get partitions: %v\n", err) 184 | } 185 | } else { 186 | partitions = flagPartitions 187 | } 188 | 189 | schemaCache = getSchemaCache() 190 | 191 | wg := sync.WaitGroup{} 192 | mu := sync.Mutex{} // Synchronizes stderr and stdout. 193 | for _, partition := range partitions { 194 | wg.Add(1) 195 | 196 | go func(partition int32, offset int64) { 197 | defer wg.Done() 198 | 199 | offsets, err := getOffsets(client, topic, partition) 200 | if err != nil { 201 | errorExit("Failed to get %s offsets for partition %d: %w", topic, partition, err) 202 | } 203 | 204 | if tail != 0 { 205 | offset = offsets.newest - int64(tail) 206 | if offset < offsets.oldest { 207 | offset = offsets.oldest 208 | } 209 | } 210 | 211 | // Already at end of partition, return early 212 | if !follow && offsets.newest == offsets.oldest { 213 | return 214 | } 215 | 216 | pc, err := consumer.ConsumePartition(topic, partition, offset) 217 | if err != nil { 218 | errorExit("Unable to consume partition: %v %v %v %v\n", topic, partition, offset, err) 219 | } 220 | 221 | var count int64 = 0 222 | for { 223 | select { 224 | case <-ctx.Done(): 225 | return 226 | case msg := <-pc.Messages(): 227 | handleMessage(msg, &mu) 228 | count++ 229 | if limitMessagesFlag > 0 && count >= limitMessagesFlag { 230 | return 231 | } 232 | if !follow && msg.Offset+1 >= pc.HighWaterMarkOffset() { 233 | return 234 | } 235 | } 236 | } 237 | }(partition, offset) 238 | } 239 | wg.Wait() 240 | } 241 | 242 | func handleMessage(msg *sarama.ConsumerMessage, mu *sync.Mutex) { 243 | var stderr bytes.Buffer 244 | 245 | var dataToDisplay []byte 246 | var keyToDisplay []byte 247 | var err error 248 | 249 | if protoType != "" { 250 | dataToDisplay, err = protoDecode(reg, msg.Value, protoType) 251 | if err != nil { 252 | fmt.Fprintf(&stderr, "failed to decode proto. falling back to binary outputla. Error: %v\n", err) 253 | } 254 | } else { 255 | dataToDisplay, err = avroDecode(msg.Value) 256 | if err != nil { 257 | fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) 258 | } 259 | } 260 | 261 | if keyProtoType != "" { 262 | keyToDisplay, err = protoDecode(reg, msg.Key, keyProtoType) 263 | if err != nil { 264 | fmt.Fprintf(&stderr, "failed to decode proto key. falling back to binary outputla. Error: %v\n", err) 265 | } 266 | } else { 267 | keyToDisplay, err = avroDecode(msg.Key) 268 | if err != nil { 269 | fmt.Fprintf(&stderr, "could not decode Avro data: %v\n", err) 270 | } 271 | } 272 | 273 | if decodeMsgPack { 274 | var obj interface{} 275 | err = msgpack.Unmarshal(msg.Value, &obj) 276 | if err != nil { 277 | fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) 278 | } 279 | 280 | dataToDisplay, err = json.Marshal(obj) 281 | if err != nil { 282 | fmt.Fprintf(&stderr, "could not decode msgpack data: %v\n", err) 283 | } 284 | } 285 | 286 | dataToDisplay = formatMessage(msg, dataToDisplay, keyToDisplay, &stderr) 287 | 288 | mu.Lock() 289 | stderr.WriteTo(errWriter) 290 | _, _ = colorableOut.Write(dataToDisplay) 291 | fmt.Fprintln(outWriter) 292 | mu.Unlock() 293 | } 294 | 295 | func formatMessage(msg *sarama.ConsumerMessage, rawMessage []byte, keyToDisplay []byte, stderr *bytes.Buffer) []byte { 296 | switch outputFormat { 297 | case OutputFormatRaw: 298 | return rawMessage 299 | case OutputFormatJSON: 300 | jsonMessage := make(map[string]interface{}) 301 | 302 | jsonMessage["partition"] = msg.Partition 303 | jsonMessage["offset"] = msg.Offset 304 | jsonMessage["timestamp"] = msg.Timestamp 305 | 306 | if len(msg.Headers) > 0 { 307 | jsonMessage["headers"] = msg.Headers 308 | } 309 | 310 | jsonMessage["key"] = formatJSON(keyToDisplay) 311 | jsonMessage["payload"] = formatJSON(rawMessage) 312 | 313 | jsonToDisplay, err := json.Marshal(jsonMessage) 314 | if err != nil { 315 | fmt.Fprintf(stderr, "could not decode JSON data: %v", err) 316 | } 317 | 318 | return jsonToDisplay 319 | case OutputFormatDefault: 320 | fallthrough 321 | default: 322 | if isJSON(rawMessage) { 323 | rawMessage = formatValue(rawMessage) 324 | } 325 | 326 | if isJSON(keyToDisplay) { 327 | keyToDisplay = formatKey(keyToDisplay) 328 | } 329 | 330 | w := tabwriter.NewWriter(stderr, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 331 | 332 | if len(msg.Headers) > 0 { 333 | fmt.Fprintf(w, "Headers:\n") 334 | } 335 | 336 | for _, hdr := range msg.Headers { 337 | var hdrValue string 338 | // Try to detect azure eventhub-specific encoding 339 | if len(hdr.Value) > 0 { 340 | switch hdr.Value[0] { 341 | case 161: 342 | hdrValue = string(hdr.Value[2 : 2+hdr.Value[1]]) 343 | case 131: 344 | hdrValue = strconv.FormatUint(binary.BigEndian.Uint64(hdr.Value[1:9]), 10) 345 | default: 346 | hdrValue = string(hdr.Value) 347 | } 348 | } 349 | 350 | fmt.Fprintf(w, "\tKey: %v\tValue: %v\n", string(hdr.Key), hdrValue) 351 | 352 | } 353 | 354 | if len(msg.Key) > 0 { 355 | fmt.Fprintf(w, "Key:\t%v\n", string(keyToDisplay)) 356 | } 357 | fmt.Fprintf(w, "Partition:\t%v\nOffset:\t%v\nTimestamp:\t%v\n", msg.Partition, msg.Offset, msg.Timestamp) 358 | w.Flush() 359 | 360 | return rawMessage 361 | } 362 | } 363 | 364 | // proto to JSON 365 | func protoDecode(reg *proto.DescriptorRegistry, b []byte, _type string) ([]byte, error) { 366 | dynamicMessage := reg.MessageForType(_type) 367 | if dynamicMessage == nil { 368 | return b, nil 369 | } 370 | 371 | err := dynamicMessage.Unmarshal(b) 372 | if err != nil { 373 | return nil, err 374 | } 375 | 376 | var m jsonpb.Marshaler 377 | var w bytes.Buffer 378 | 379 | err = m.Marshal(&w, dynamicMessage) 380 | if err != nil { 381 | return nil, err 382 | } 383 | return w.Bytes(), nil 384 | 385 | } 386 | 387 | func avroDecode(b []byte) ([]byte, error) { 388 | if schemaCache != nil { 389 | return schemaCache.DecodeMessage(b) 390 | } 391 | return b, nil 392 | } 393 | 394 | func formatKey(key []byte) []byte { 395 | if b, err := keyfmt.Format(key); err == nil { 396 | return b 397 | } 398 | return key 399 | 400 | } 401 | 402 | func formatValue(key []byte) []byte { 403 | if b, err := prettyjson.Format(key); err == nil { 404 | return b 405 | } 406 | return key 407 | } 408 | 409 | func formatJSON(data []byte) interface{} { 410 | var i interface{} 411 | if err := json.Unmarshal(data, &i); err != nil { 412 | return string(data) 413 | } 414 | 415 | return i 416 | } 417 | 418 | func isJSON(data []byte) bool { 419 | var i interface{} 420 | if err := json.Unmarshal(data, &i); err == nil { 421 | return true 422 | } 423 | return false 424 | } 425 | 426 | type OutputFormat string 427 | 428 | const ( 429 | OutputFormatDefault OutputFormat = "default" 430 | OutputFormatRaw OutputFormat = "raw" 431 | OutputFormatJSON OutputFormat = "json" 432 | ) 433 | 434 | func (e *OutputFormat) String() string { 435 | return string(*e) 436 | } 437 | 438 | func (e *OutputFormat) Set(v string) error { 439 | switch v { 440 | case "default", "raw", "json": 441 | *e = OutputFormat(v) 442 | return nil 443 | default: 444 | return fmt.Errorf("must be one of: default, raw, json") 445 | } 446 | } 447 | 448 | func (e *OutputFormat) Type() string { 449 | return "OutputFormat" 450 | } 451 | 452 | func completeOutputFormat(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 453 | return []string{"default", "raw", "json"}, cobra.ShellCompDirectiveNoFileComp 454 | } 455 | -------------------------------------------------------------------------------- /cmd/kaf/group.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "slices" 9 | "sort" 10 | "unicode" 11 | 12 | "text/tabwriter" 13 | 14 | "encoding/base64" 15 | "encoding/hex" 16 | 17 | "sync" 18 | 19 | "github.com/IBM/sarama" 20 | "github.com/birdayz/kaf/pkg/streams" 21 | "github.com/manifoldco/promptui" 22 | "github.com/spf13/cobra" 23 | 24 | "strconv" 25 | 26 | "time" 27 | ) 28 | 29 | var ( 30 | flagPeekPartitions []int32 31 | flagPeekBefore int64 32 | flagPeekAfter int64 33 | flagPeekTopics []string 34 | 35 | flagNoMembers bool 36 | flagDescribeTopics []string 37 | ) 38 | 39 | func init() { 40 | rootCmd.AddCommand(groupCmd) 41 | rootCmd.AddCommand(groupsCmd) 42 | groupCmd.AddCommand(groupDescribeCmd) 43 | groupCmd.AddCommand(groupLsCmd) 44 | groupCmd.AddCommand(groupDeleteCmd) 45 | groupCmd.AddCommand(groupPeekCmd) 46 | groupCmd.AddCommand(createGroupCommitOffsetCmd()) 47 | 48 | groupLsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 49 | groupsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 50 | 51 | groupPeekCmd.Flags().StringSliceVarP(&flagPeekTopics, "topics", "t", []string{}, "Topics to peek from") 52 | groupPeekCmd.Flags().Int32SliceVarP(&flagPeekPartitions, "partitions", "p", []int32{}, "Partitions to peek from") 53 | groupPeekCmd.Flags().Int64VarP(&flagPeekBefore, "before", "B", 0, "Number of messages to peek before current offset") 54 | groupPeekCmd.Flags().Int64VarP(&flagPeekAfter, "after", "A", 0, "Number of messages to peek after current offset") 55 | 56 | groupDescribeCmd.Flags().BoolVar(&flagNoMembers, "no-members", false, "Hide members section of the output") 57 | groupDescribeCmd.Flags().StringSliceVarP(&flagDescribeTopics, "topic", "t", []string{}, "topics to display for the group. defaults to all topics.") 58 | } 59 | 60 | const ( 61 | tabwriterMinWidth = 6 62 | tabwriterMinWidthNested = 2 63 | tabwriterWidth = 4 64 | tabwriterPadding = 3 65 | tabwriterPadChar = ' ' 66 | tabwriterFlags = 0 67 | ) 68 | 69 | var groupCmd = &cobra.Command{ 70 | Use: "group", 71 | Short: "Display information about consumer groups.", 72 | } 73 | 74 | var groupsCmd = &cobra.Command{ 75 | Use: "groups", 76 | Short: "List groups", 77 | Run: groupLsCmd.Run, 78 | } 79 | 80 | var groupDeleteCmd = &cobra.Command{ 81 | Use: "delete", 82 | Short: "Delete group", 83 | Args: cobra.MaximumNArgs(1), 84 | ValidArgsFunction: validGroupArgs, 85 | Run: func(cmd *cobra.Command, args []string) { 86 | admin := getClusterAdmin() 87 | var group string 88 | if len(args) == 1 { 89 | group = args[0] 90 | } 91 | err := admin.DeleteConsumerGroup(group) 92 | if err != nil { 93 | errorExit("Could not delete consumer group %v: %v\n", group, err.Error()) 94 | } else { 95 | fmt.Printf("Deleted consumer group %v.\n", group) 96 | } 97 | 98 | }, 99 | } 100 | 101 | type resetHandler struct { 102 | topic string 103 | partitionOffsets map[int32]int64 104 | offset int64 105 | client sarama.Client 106 | group string 107 | } 108 | 109 | func (r *resetHandler) Setup(s sarama.ConsumerGroupSession) error { 110 | req := &sarama.OffsetCommitRequest{ 111 | Version: 1, 112 | ConsumerGroup: r.group, 113 | ConsumerGroupGeneration: s.GenerationID(), 114 | ConsumerID: s.MemberID(), 115 | } 116 | 117 | for p, o := range r.partitionOffsets { 118 | req.AddBlock(r.topic, p, o, 0, "") 119 | } 120 | br, err := r.client.Coordinator(r.group) 121 | if err != nil { 122 | return err 123 | } 124 | _ = br.Open(getConfig()) 125 | _, err = br.CommitOffset(req) 126 | if err != nil { 127 | return err 128 | } 129 | return nil 130 | } 131 | 132 | func (r *resetHandler) Cleanup(s sarama.ConsumerGroupSession) error { 133 | return nil 134 | } 135 | 136 | func (r *resetHandler) ConsumeClaim(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error { 137 | return nil 138 | } 139 | 140 | func createGroupCommitOffsetCmd() *cobra.Command { 141 | var topic string 142 | var offset string 143 | var partitionFlag int32 144 | var allPartitions bool 145 | var offsetMap string 146 | var noconfirm bool 147 | res := &cobra.Command{ 148 | Use: "commit GROUP", 149 | Short: "Set offset for given consumer group", 150 | Long: "Set offset for a given consumer group, creates one if it does not exist. Offsets cannot be set on a consumer group with active consumers.", 151 | Args: cobra.ExactArgs(1), 152 | Run: func(cmd *cobra.Command, args []string) { 153 | client := getClient() 154 | 155 | group := args[0] 156 | partitionOffsets := make(map[int32]int64) 157 | 158 | if offsetMap != "" { 159 | if err := json.Unmarshal([]byte(offsetMap), &partitionOffsets); err != nil { 160 | errorExit("Wrong --offset-map format. Use JSON with keys as partition numbers and values as offsets.\nExample: --offset-map '{\"0\":123, \"1\":135, \"2\":120}'\n") 161 | } 162 | } else { 163 | var partitions []int32 164 | if allPartitions { 165 | // Determine partitions 166 | admin := getClusterAdmin() 167 | topicDetails, err := admin.DescribeTopics([]string{topic}) 168 | if err != nil { 169 | errorExit("Unable to determine partitions of topic: %v\n", err) 170 | } 171 | 172 | detail := topicDetails[0] 173 | 174 | for _, p := range detail.Partitions { 175 | partitions = append(partitions, p.ID) 176 | } 177 | } else if partitionFlag != -1 { 178 | partitions = []int32{partitionFlag} 179 | } else { 180 | errorExit("Either --partition, --all-partitions or --offset-map flag must be provided") 181 | } 182 | 183 | sort.Slice(partitions, func(i int, j int) bool { return partitions[i] < partitions[j] }) 184 | 185 | type Assignment struct { 186 | partition int32 187 | offset int64 188 | } 189 | assignments := make(chan Assignment, len(partitions)) 190 | 191 | // TODO offset must be calced per partition 192 | var wg sync.WaitGroup 193 | for _, partition := range partitions { 194 | wg.Add(1) 195 | go func(partition int32) { 196 | defer wg.Done() 197 | i, err := strconv.ParseInt(offset, 10, 64) 198 | if err != nil { 199 | // Try oldest/newest/.. 200 | if offset == "oldest" || offset == "earliest" { 201 | i = sarama.OffsetOldest 202 | } else if offset == "newest" || offset == "latest" { 203 | i = sarama.OffsetNewest 204 | } else { 205 | // Try timestamp 206 | t, err := time.Parse(time.RFC3339, offset) 207 | if err != nil { 208 | errorExit("offset is neither offset nor timestamp", nil) 209 | } 210 | i = t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) 211 | } 212 | 213 | o, err := client.GetOffset(topic, partition, i) 214 | if err != nil { 215 | errorExit("Failed to determine offset for timestamp: %v", err) 216 | } 217 | 218 | if o == -1 { 219 | fmt.Printf("Partition %v: could not determine offset from timestamp. Skipping.\n", partition) 220 | return 221 | //errorExit("Determined offset -1 from timestamp. Skipping.", o) 222 | } 223 | 224 | assignments <- Assignment{partition: partition, offset: o} 225 | 226 | fmt.Printf("Partition %v: determined offset %v from timestamp.\n", partition, o) 227 | } else { 228 | assignments <- Assignment{partition: partition, offset: i} 229 | } 230 | }(partition) 231 | } 232 | wg.Wait() 233 | close(assignments) 234 | 235 | for assign := range assignments { 236 | partitionOffsets[assign.partition] = assign.offset 237 | } 238 | } 239 | 240 | // Verify the Consumer Group is Empty 241 | admin := getClusterAdmin() 242 | groupDescs, err := admin.DescribeConsumerGroups([]string{args[0]}) 243 | if err != nil { 244 | errorExit("Unable to describe consumer groups: %v\n", err) 245 | } 246 | for _, detail := range groupDescs { 247 | state := detail.State 248 | if !slices.Contains([]string{"Empty", "Dead"}, state) { 249 | errorExit("Consumer group %s has active consumers in it, cannot set offset\n", group) 250 | } 251 | } 252 | 253 | fmt.Printf("Resetting offsets to: %v\n", partitionOffsets) 254 | 255 | if !noconfirm { 256 | prompt := promptui.Prompt{ 257 | Label: "Reset offsets as described", 258 | IsConfirm: true, 259 | } 260 | 261 | _, err := prompt.Run() 262 | if err != nil { 263 | errorExit("Aborted, exiting.\n") 264 | return 265 | } 266 | } 267 | 268 | g, err := sarama.NewConsumerGroupFromClient(group, client) 269 | if err != nil { 270 | errorExit("Failed to create consumer group: %v\n", err) 271 | } 272 | 273 | err = g.Consume(context.Background(), []string{topic}, &resetHandler{ 274 | topic: topic, 275 | partitionOffsets: partitionOffsets, 276 | client: client, 277 | group: group, 278 | }) 279 | if err != nil { 280 | errorExit("Failed to commit offset: %v\n", err) 281 | } 282 | 283 | fmt.Printf("Successfully committed offsets to %v.\n", partitionOffsets) 284 | 285 | closeErr := g.Close() 286 | if closeErr != nil { 287 | fmt.Printf("Warning: Failed to close consumer group: %v\n", closeErr) 288 | } 289 | }, 290 | } 291 | res.Flags().StringVarP(&topic, "topic", "t", "", "topic") 292 | res.Flags().StringVarP(&offset, "offset", "o", "", "offset to commit") 293 | res.Flags().Int32VarP(&partitionFlag, "partition", "p", 0, "partition") 294 | res.Flags().BoolVar(&allPartitions, "all-partitions", false, "apply to all partitions") 295 | res.Flags().StringVar(&offsetMap, "offset-map", "", "set different offsets per different partitions in JSON format, e.g. {\"0\": 123, \"1\": 42}") 296 | res.Flags().BoolVar(&noconfirm, "noconfirm", false, "Do not prompt for confirmation") 297 | return res 298 | } 299 | 300 | var groupLsCmd = &cobra.Command{ 301 | Use: "ls", 302 | Short: "List groups", 303 | Args: cobra.NoArgs, 304 | Run: func(cmd *cobra.Command, args []string) { 305 | admin := getClusterAdmin() 306 | 307 | groups, err := admin.ListConsumerGroups() 308 | if err != nil { 309 | errorExit("Unable to list consumer groups: %v\n", err) 310 | } 311 | 312 | groupList := make([]string, 0, len(groups)) 313 | for grp := range groups { 314 | groupList = append(groupList, grp) 315 | } 316 | 317 | sort.Slice(groupList, func(i int, j int) bool { 318 | return groupList[i] < groupList[j] 319 | }) 320 | 321 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 322 | 323 | groupDescs, err := admin.DescribeConsumerGroups(groupList) 324 | if err != nil { 325 | // if we can retrieve list of consumer group, but unable to describe consumer groups 326 | // fallback to only list group name without state 327 | if !noHeaderFlag { 328 | fmt.Fprintf(w, "NAME\n") 329 | } 330 | 331 | for _, group := range groupList { 332 | fmt.Fprintf(w, "%v\n", group) 333 | } 334 | } else { 335 | // return consumer group information with state 336 | if !noHeaderFlag { 337 | fmt.Fprintf(w, "NAME\tSTATE\tCONSUMERS\t\n") 338 | } 339 | 340 | for _, detail := range groupDescs { 341 | state := detail.State 342 | consumers := len(detail.Members) 343 | fmt.Fprintf(w, "%v\t%v\t%v\t\n", detail.GroupId, state, consumers) 344 | } 345 | } 346 | w.Flush() 347 | }, 348 | } 349 | 350 | var groupPeekCmd = &cobra.Command{ 351 | Use: "peek", 352 | Short: "Peek messages from consumer group offset", 353 | Args: cobra.ExactArgs(1), 354 | ValidArgsFunction: validGroupArgs, 355 | Run: func(cmd *cobra.Command, args []string) { 356 | admin := getClusterAdmin() 357 | 358 | groups, err := admin.DescribeConsumerGroups([]string{args[0]}) 359 | if err != nil { 360 | errorExit("Unable to describe consumer groups: %v\n", err) 361 | } 362 | 363 | if len(groups) == 0 { 364 | errorExit("Did not receive expected describe consumergroup result\n") 365 | } 366 | group := groups[0] 367 | 368 | if group.State == "Dead" { 369 | fmt.Printf("Group %v not found.\n", args[0]) 370 | return 371 | } 372 | 373 | peekPartitions := make(map[int32]struct{}) 374 | for _, partition := range flagPeekPartitions { 375 | peekPartitions[partition] = struct{}{} 376 | } 377 | 378 | var topicPartitions map[string][]int32 379 | if len(flagPeekTopics) > 0 { 380 | topicPartitions = make(map[string][]int32, len(flagPeekTopics)) 381 | } 382 | for _, topic := range flagPeekTopics { 383 | topicDetails, err := admin.DescribeTopics([]string{topic}) 384 | if err != nil { 385 | errorExit("Unable to describe topics: %v\n", err) 386 | } 387 | 388 | detail := topicDetails[0] 389 | if detail.Err == sarama.ErrUnknownTopicOrPartition { 390 | fmt.Printf("Topic %v not found.\n", topic) 391 | return 392 | } 393 | 394 | if len(flagPeekPartitions) > 0 { 395 | topicPartitions[topic] = flagPeekPartitions 396 | } else { 397 | partitions := make([]int32, 0, len(detail.Partitions)) 398 | for _, partition := range detail.Partitions { 399 | partitions = append(partitions, partition.ID) 400 | } 401 | topicPartitions[topic] = partitions 402 | } 403 | } 404 | 405 | offsetAndMetadata, err := admin.ListConsumerGroupOffsets(args[0], topicPartitions) 406 | if err != nil { 407 | errorExit("Failed to fetch group offsets: %v\n", err) 408 | } 409 | 410 | cfg := getConfig() 411 | client := getClientFromConfig(cfg) 412 | consumer, err := sarama.NewConsumerFromClient(client) 413 | if err != nil { 414 | errorExit("Unable to create consumer from client: %v\n", err) 415 | } 416 | 417 | mu := &sync.Mutex{} 418 | wg := &sync.WaitGroup{} 419 | 420 | for topic, partitions := range offsetAndMetadata.Blocks { 421 | for partition, offset := range partitions { 422 | if len(peekPartitions) > 0 { 423 | _, ok := peekPartitions[partition] 424 | if !ok { 425 | continue 426 | } 427 | } 428 | 429 | wg.Add(1) 430 | go func(topic string, partition int32, offset int64) { 431 | defer wg.Done() 432 | var start int64 433 | if offset > flagPeekBefore { 434 | start = offset - flagPeekBefore 435 | } 436 | 437 | pc, err := consumer.ConsumePartition(topic, partition, start) 438 | if err != nil { 439 | errorExit("Unable to consume partition: %v %v %v %v\n", topic, partition, offset, err) 440 | } 441 | 442 | for { 443 | select { 444 | case <-cmd.Context().Done(): 445 | return 446 | case msg := <-pc.Messages(): 447 | handleMessage(msg, mu) 448 | if msg.Offset >= offset+flagPeekAfter { 449 | return 450 | } 451 | } 452 | } 453 | }(topic, partition, offset.Offset) 454 | } 455 | } 456 | wg.Wait() 457 | }, 458 | } 459 | 460 | var groupDescribeCmd = &cobra.Command{ 461 | Use: "describe", 462 | Short: "Describe consumer group", 463 | Args: cobra.ExactArgs(1), 464 | ValidArgsFunction: validGroupArgs, 465 | Run: func(cmd *cobra.Command, args []string) { 466 | // TODO List: This API can be used to find the current groups managed by a broker. To get a list of all groups in the cluster, you must send ListGroup to all brokers. 467 | // same goes probably for topics 468 | admin := getClusterAdmin() 469 | 470 | groups, err := admin.DescribeConsumerGroups([]string{args[0]}) 471 | if err != nil { 472 | errorExit("Unable to describe consumer groups: %v\n", err) 473 | } 474 | 475 | if len(groups) == 0 { 476 | errorExit("Did not receive expected describe consumergroup result\n") 477 | } 478 | group := groups[0] 479 | 480 | if group.State == "Dead" { 481 | fmt.Printf("Group %v not found.\n", args[0]) 482 | return 483 | } 484 | 485 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 486 | fmt.Fprintf(w, "Group ID:\t%v\n", group.GroupId) 487 | fmt.Fprintf(w, "State:\t%v\n", group.State) 488 | fmt.Fprintf(w, "Protocol:\t%v\n", group.Protocol) 489 | fmt.Fprintf(w, "Protocol Type:\t%v\n", group.ProtocolType) 490 | 491 | fmt.Fprintf(w, "Offsets:\t\n") 492 | 493 | w.Flush() 494 | w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) 495 | 496 | offsetAndMetadata, err := admin.ListConsumerGroupOffsets(args[0], nil) 497 | if err != nil { 498 | errorExit("Failed to fetch group offsets: %v\n", err) 499 | } 500 | 501 | topics := make([]string, 0, len(offsetAndMetadata.Blocks)) 502 | for k := range offsetAndMetadata.Blocks { 503 | topics = append(topics, k) 504 | } 505 | sort.Strings(topics) 506 | 507 | for _, topic := range topics { 508 | partitions := offsetAndMetadata.Blocks[topic] 509 | if len(flagDescribeTopics) > 0 { 510 | var found bool 511 | for _, topicToShow := range flagDescribeTopics { 512 | if topic == topicToShow { 513 | found = true 514 | } 515 | } 516 | 517 | if !found { 518 | continue 519 | } 520 | } 521 | fmt.Fprintf(w, "\t%v:\n", topic) 522 | fmt.Fprintf(w, "\t\tPartition\tGroup Offset\tHigh Watermark\tLag\tMetadata\t\n") 523 | fmt.Fprintf(w, "\t\t---------\t------------\t--------------\t---\t--------\n") 524 | 525 | var p []int32 526 | 527 | for partition := range partitions { 528 | p = append(p, partition) 529 | } 530 | 531 | sort.Slice(p, func(i, j int) bool { 532 | return p[i] < p[j] 533 | }) 534 | 535 | wms := getHighWatermarks(topic, p) 536 | 537 | lagSum := 0 538 | offsetSum := 0 539 | for _, partition := range p { 540 | lag := (wms[partition] - partitions[partition].Offset) 541 | lagSum += int(lag) 542 | offset := partitions[partition].Offset 543 | offsetSum += int(offset) 544 | fmt.Fprintf(w, "\t\t%v\t%v\t%v\t%v\t%v\n", partition, partitions[partition].Offset, wms[partition], (wms[partition] - partitions[partition].Offset), partitions[partition].Metadata) 545 | } 546 | 547 | fmt.Fprintf(w, "\t\tTotal\t%d\t\t%d\t\n", offsetSum, lagSum) 548 | } 549 | 550 | if !flagNoMembers { 551 | 552 | fmt.Fprintf(w, "Members:\t") 553 | 554 | w.Flush() 555 | w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) 556 | 557 | fmt.Fprintln(w) 558 | for _, member := range group.Members { 559 | fmt.Fprintf(w, "\t%v:\n", member.ClientId) 560 | fmt.Fprintf(w, "\t\tHost:\t%v\n", member.ClientHost) 561 | 562 | assignment, err := member.GetMemberAssignment() 563 | if err != nil || assignment == nil { 564 | continue 565 | } 566 | 567 | fmt.Fprintf(w, "\t\tAssignments:\n") 568 | 569 | fmt.Fprintf(w, "\t\t Topic\tPartitions\t\n") 570 | fmt.Fprintf(w, "\t\t -----\t----------\t") 571 | 572 | for topic, partitions := range assignment.Topics { 573 | fmt.Fprintf(w, "\n\t\t %v\t%v\t", topic, partitions) 574 | } 575 | 576 | metadata, err := member.GetMemberMetadata() 577 | if err != nil { 578 | fmt.Fprintf(w, "\n") 579 | continue 580 | } 581 | 582 | decodedUserData, err := tryDecodeUserData(group.Protocol, metadata.UserData) 583 | if err != nil { 584 | if IsASCIIPrintable(string(metadata.UserData)) { 585 | fmt.Fprintf(w, "\f\t\tMetadata:\t%v\n", string(metadata.UserData)) 586 | } else { 587 | 588 | fmt.Fprintf(w, "\f\t\tMetadata:\t%v\n", base64.StdEncoding.EncodeToString(metadata.UserData)) 589 | } 590 | } else { 591 | switch d := decodedUserData.(type) { 592 | case streams.SubscriptionInfo: 593 | fmt.Fprintf(w, "\f\t\tMetadata:\t\n") 594 | fmt.Fprintf(w, "\t\t UUID:\t0x%v\n", hex.EncodeToString(d.UUID)) 595 | fmt.Fprintf(w, "\t\t UserEndpoint:\t%v\n", d.UserEndpoint) 596 | } 597 | } 598 | 599 | fmt.Fprintf(w, "\n") 600 | 601 | } 602 | } 603 | 604 | w.Flush() 605 | 606 | }, 607 | } 608 | 609 | func getHighWatermarks(topic string, partitions []int32) (watermarks map[int32]int64) { 610 | client := getClient() 611 | leaders := make(map[*sarama.Broker][]int32) 612 | 613 | for _, partition := range partitions { 614 | leader, err := client.Leader(topic, partition) 615 | if err != nil { 616 | errorExit("Unable to get available offsets for partition without leader. Topic %s Partition %d, Error: %s ", topic, partition, err) 617 | } 618 | leaders[leader] = append(leaders[leader], partition) 619 | } 620 | wg := sync.WaitGroup{} 621 | wg.Add(len(leaders)) 622 | 623 | results := make(chan map[int32]int64, len(leaders)) 624 | 625 | for leader, partitions := range leaders { 626 | req := &sarama.OffsetRequest{ 627 | Version: int16(1), 628 | } 629 | 630 | for _, partition := range partitions { 631 | req.AddBlock(topic, partition, int64(-1), int32(0)) 632 | } 633 | 634 | // Query distinct brokers in parallel 635 | go func(leader *sarama.Broker, req *sarama.OffsetRequest) { 636 | resp, err := leader.GetAvailableOffsets(req) 637 | if err != nil { 638 | errorExit("Unable to get available offsets: %v\n", err) 639 | } 640 | 641 | watermarksFromLeader := make(map[int32]int64) 642 | for partition, block := range resp.Blocks[topic] { 643 | watermarksFromLeader[partition] = block.Offset 644 | } 645 | 646 | results <- watermarksFromLeader 647 | wg.Done() 648 | 649 | }(leader, req) 650 | 651 | } 652 | 653 | wg.Wait() 654 | close(results) 655 | 656 | watermarks = make(map[int32]int64) 657 | for resultMap := range results { 658 | for partition, offset := range resultMap { 659 | watermarks[partition] = offset 660 | } 661 | } 662 | 663 | return 664 | } 665 | 666 | // IsASCIIPrintable returns true if the string is ASCII printable. 667 | func IsASCIIPrintable(s string) bool { 668 | for _, r := range s { 669 | if r > unicode.MaxASCII || !unicode.IsPrint(r) { 670 | return false 671 | } 672 | } 673 | return true 674 | } 675 | 676 | func tryDecodeUserData(protocol string, raw []byte) (data interface{}, err error) { 677 | // Interpret userdata here 678 | decoder := streams.NewDecoder(raw) 679 | 680 | switch protocol { 681 | case "stream": 682 | subscriptionInfo := streams.SubscriptionInfo{} 683 | err = subscriptionInfo.Decode(decoder) 684 | if err != nil { 685 | return nil, err 686 | } 687 | return subscriptionInfo, nil 688 | default: 689 | return nil, errors.New("unknown protocol") 690 | } 691 | } 692 | -------------------------------------------------------------------------------- /cmd/kaf/kaf.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | 7 | "crypto/tls" 8 | "crypto/x509" 9 | "log" 10 | "os" 11 | 12 | "github.com/IBM/sarama" 13 | "github.com/mattn/go-colorable" 14 | "github.com/spf13/cobra" 15 | 16 | "github.com/birdayz/kaf/pkg/avro" 17 | "github.com/birdayz/kaf/pkg/config" 18 | "github.com/birdayz/kaf/pkg/proto" 19 | ) 20 | 21 | var cfgFile string 22 | 23 | func getConfig() (saramaConfig *sarama.Config) { 24 | saramaConfig = sarama.NewConfig() 25 | saramaConfig.Version = sarama.V1_1_0_0 26 | saramaConfig.Producer.Return.Successes = true 27 | 28 | cluster := currentCluster 29 | if cluster.Version != "" { 30 | parsedVersion, err := sarama.ParseKafkaVersion(cluster.Version) 31 | if err != nil { 32 | errorExit("Unable to parse Kafka version: %v\n", err) 33 | } 34 | saramaConfig.Version = parsedVersion 35 | } 36 | if cluster.SASL != nil { 37 | saramaConfig.Net.SASL.Enable = true 38 | if cluster.SASL.Mechanism != "OAUTHBEARER" { 39 | saramaConfig.Net.SASL.User = cluster.SASL.Username 40 | saramaConfig.Net.SASL.Password = cluster.SASL.Password 41 | } 42 | saramaConfig.Net.SASL.Version = cluster.SASL.Version 43 | } 44 | if cluster.TLS != nil && cluster.SecurityProtocol != "SASL_SSL" { 45 | saramaConfig.Net.TLS.Enable = true 46 | tlsConfig := &tls.Config{ 47 | InsecureSkipVerify: cluster.TLS.Insecure, 48 | } 49 | 50 | if cluster.TLS.Cafile != "" { 51 | caCert, err := os.ReadFile(cluster.TLS.Cafile) 52 | if err != nil { 53 | errorExit("Unable to read Cafile :%v\n", err) 54 | } 55 | caCertPool := x509.NewCertPool() 56 | caCertPool.AppendCertsFromPEM(caCert) 57 | tlsConfig.RootCAs = caCertPool 58 | } 59 | 60 | if cluster.TLS.Clientfile != "" && cluster.TLS.Clientkeyfile != "" { 61 | clientCert, err := os.ReadFile(cluster.TLS.Clientfile) 62 | if err != nil { 63 | errorExit("Unable to read Clientfile :%v\n", err) 64 | } 65 | clientKey, err := os.ReadFile(cluster.TLS.Clientkeyfile) 66 | if err != nil { 67 | errorExit("Unable to read Clientkeyfile :%v\n", err) 68 | } 69 | 70 | cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey)) 71 | if err != nil { 72 | errorExit("Unable to create KeyPair: %v\n", err) 73 | } 74 | tlsConfig.Certificates = []tls.Certificate{cert} 75 | 76 | // nolint 77 | tlsConfig.BuildNameToCertificate() 78 | } 79 | saramaConfig.Net.TLS.Config = tlsConfig 80 | } 81 | if cluster.SecurityProtocol == "SASL_SSL" { 82 | saramaConfig.Net.TLS.Enable = true 83 | if cluster.TLS != nil { 84 | tlsConfig := &tls.Config{ 85 | InsecureSkipVerify: cluster.TLS.Insecure, 86 | } 87 | if cluster.TLS.Cafile != "" { 88 | caCert, err := os.ReadFile(cluster.TLS.Cafile) 89 | if err != nil { 90 | fmt.Println(err) 91 | os.Exit(1) 92 | } 93 | caCertPool := x509.NewCertPool() 94 | caCertPool.AppendCertsFromPEM(caCert) 95 | tlsConfig.RootCAs = caCertPool 96 | } 97 | saramaConfig.Net.TLS.Config = tlsConfig 98 | 99 | } else { 100 | saramaConfig.Net.TLS.Config = &tls.Config{InsecureSkipVerify: false} 101 | } 102 | } 103 | if cluster.SecurityProtocol == "SASL_SSL" || cluster.SecurityProtocol == "SASL_PLAINTEXT" { 104 | if cluster.SASL.Mechanism == "SCRAM-SHA-512" { 105 | saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } 106 | saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) 107 | } else if cluster.SASL.Mechanism == "SCRAM-SHA-256" { 108 | saramaConfig.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } 109 | saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) 110 | } else if cluster.SASL.Mechanism == "OAUTHBEARER" || cluster.SASL.Mechanism == "AWS_MSK_IAM" { 111 | //Here setup get token function 112 | saramaConfig.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeOAuth) 113 | saramaConfig.Net.SASL.TokenProvider = newTokenProvider() 114 | } 115 | } 116 | return saramaConfig 117 | } 118 | 119 | var ( 120 | outWriter io.Writer = os.Stdout 121 | errWriter io.Writer = os.Stderr 122 | inReader io.Reader = os.Stdin 123 | 124 | colorableOut io.Writer = colorable.NewColorableStdout() 125 | ) 126 | 127 | // Will be replaced by GitHub action and by goreleaser 128 | // see https://goreleaser.com/customization/build/ 129 | var commit string = "HEAD" 130 | var version string = "latest" 131 | 132 | var rootCmd = &cobra.Command{ 133 | Use: "kaf", 134 | Short: "Kafka Command Line utility for cluster management", 135 | Version: fmt.Sprintf("%s (%s)", version, commit), 136 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 137 | outWriter = cmd.OutOrStdout() 138 | errWriter = cmd.ErrOrStderr() 139 | inReader = cmd.InOrStdin() 140 | 141 | if outWriter != os.Stdout { 142 | colorableOut = outWriter 143 | } 144 | }, 145 | } 146 | 147 | func main() { 148 | if err := rootCmd.Execute(); err != nil { 149 | fmt.Println(err) 150 | os.Exit(1) 151 | } 152 | } 153 | 154 | var cfg config.Config 155 | var currentCluster *config.Cluster 156 | 157 | var ( 158 | brokersFlag []string 159 | schemaRegistryURL string 160 | protoFiles []string 161 | protoExclude []string 162 | decodeMsgPack bool 163 | verbose bool 164 | clusterOverride string 165 | ) 166 | 167 | func init() { 168 | rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.kaf/config)") 169 | rootCmd.PersistentFlags().StringSliceVarP(&brokersFlag, "brokers", "b", nil, "Comma separated list of broker ip:port pairs") 170 | rootCmd.PersistentFlags().StringVar(&schemaRegistryURL, "schema-registry", "", "URL to a Confluent schema registry. Used for attempting to decode Avro-encoded messages") 171 | rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Whether to turn on sarama logging") 172 | rootCmd.PersistentFlags().StringVarP(&clusterOverride, "cluster", "c", "", "set a temporary current cluster") 173 | cobra.OnInitialize(onInit) 174 | } 175 | 176 | var setupProtoDescriptorRegistry = func(cmd *cobra.Command, args []string) { 177 | if protoType != "" { 178 | r, err := proto.NewDescriptorRegistry(protoFiles, protoExclude) 179 | if err != nil { 180 | errorExit("Failed to load protobuf files: %v\n", err) 181 | } 182 | reg = r 183 | } 184 | } 185 | 186 | func onInit() { 187 | var err error 188 | cfg, err = config.ReadConfig(cfgFile) 189 | if err != nil { 190 | errorExit("Invalid config: %v", err) 191 | } 192 | 193 | cfg.ClusterOverride = clusterOverride 194 | 195 | cluster := cfg.ActiveCluster() 196 | if cluster != nil { 197 | // Use active cluster from config 198 | currentCluster = cluster 199 | } else { 200 | // Create sane default if not configured 201 | currentCluster = &config.Cluster{ 202 | Brokers: []string{"localhost:9092"}, 203 | } 204 | } 205 | 206 | // Any set flags override the configuration 207 | if schemaRegistryURL != "" { 208 | currentCluster.SchemaRegistryURL = schemaRegistryURL 209 | currentCluster.SchemaRegistryCredentials = nil 210 | } 211 | 212 | if brokersFlag != nil { 213 | currentCluster.Brokers = brokersFlag 214 | } 215 | 216 | if verbose { 217 | sarama.Logger = log.New(errWriter, "[sarama] ", log.Lshortfile|log.LstdFlags) 218 | } 219 | } 220 | 221 | func getClusterAdmin() (admin sarama.ClusterAdmin) { 222 | clusterAdmin, err := sarama.NewClusterAdmin(currentCluster.Brokers, getConfig()) 223 | if err != nil { 224 | errorExit("Unable to get cluster admin: %v\n", err) 225 | } 226 | 227 | return clusterAdmin 228 | } 229 | 230 | func getClient() (client sarama.Client) { 231 | client, err := sarama.NewClient(currentCluster.Brokers, getConfig()) 232 | if err != nil { 233 | errorExit("Unable to get client: %v\n", err) 234 | } 235 | return client 236 | } 237 | 238 | func getClientFromConfig(config *sarama.Config) (client sarama.Client) { 239 | client, err := sarama.NewClient(currentCluster.Brokers, config) 240 | if err != nil { 241 | errorExit("Unable to get client: %v\n", err) 242 | } 243 | return client 244 | } 245 | 246 | func getSchemaCache() (cache *avro.SchemaCache) { 247 | if currentCluster.SchemaRegistryURL == "" { 248 | return nil 249 | } 250 | var username, password string 251 | if creds := currentCluster.SchemaRegistryCredentials; creds != nil { 252 | username = creds.Username 253 | password = creds.Password 254 | } 255 | cache, err := avro.NewSchemaCache(currentCluster.SchemaRegistryURL, username, password) 256 | if err != nil { 257 | errorExit("Unable to get schema cache :%v\n", err) 258 | } 259 | return cache 260 | } 261 | 262 | func errorExit(format string, a ...interface{}) { 263 | fmt.Fprintf(errWriter, format+"\n", a...) 264 | os.Exit(1) 265 | } 266 | -------------------------------------------------------------------------------- /cmd/kaf/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "os" 8 | "testing" 9 | "time" 10 | 11 | "github.com/orlangure/gnomock" 12 | "github.com/orlangure/gnomock/preset/kafka" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | var kafkaAddr string 17 | 18 | func TestMain(m *testing.M) { 19 | os.Exit(testMain(m)) 20 | } 21 | 22 | func testMain(m *testing.M) (code int) { 23 | c, err := gnomock.Start( 24 | kafka.Preset(kafka.WithTopics("kaf-testing", "gnomock-kafka")), 25 | gnomock.WithContainerName("kaf-kafka"), 26 | ) 27 | if err != nil { 28 | return 1 29 | } 30 | 31 | defer func() { 32 | stopErr := gnomock.Stop(c) 33 | if stopErr != nil { 34 | code = 1 35 | } 36 | }() 37 | 38 | kafkaAddr = c.Address(kafka.BrokerPort) 39 | 40 | return m.Run() 41 | } 42 | 43 | func runCmd(t *testing.T, in io.Reader, args ...string) string { 44 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) 45 | defer cancel() 46 | 47 | b := bytes.NewBufferString("") 48 | 49 | rootCmd.SetArgs(args) 50 | rootCmd.SetOut(b) 51 | rootCmd.SetErr(b) 52 | rootCmd.SetIn(in) 53 | 54 | require.NoError(t, rootCmd.ExecuteContext(ctx)) 55 | 56 | bs, err := io.ReadAll(b) 57 | require.NoError(t, err) 58 | 59 | return string(bs) 60 | } 61 | 62 | func runCmdWithBroker(t *testing.T, in io.Reader, args ...string) string { 63 | args = append([]string{"-b", kafkaAddr}, args...) 64 | return runCmd(t, in, args...) 65 | } 66 | -------------------------------------------------------------------------------- /cmd/kaf/node.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "text/tabwriter" 6 | 7 | "sort" 8 | 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | func init() { 13 | rootCmd.AddCommand(nodeCommand) 14 | rootCmd.AddCommand(nodesCommand) 15 | nodeCommand.AddCommand(nodeLsCommand) 16 | nodeLsCommand.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 17 | } 18 | 19 | var nodesCommand = &cobra.Command{ 20 | Use: "nodes", 21 | Short: "List nodes in a cluster", 22 | Run: nodeLsCommand.Run, 23 | } 24 | 25 | var nodeCommand = &cobra.Command{ 26 | Use: "node", 27 | Short: "Describe and List nodes", 28 | } 29 | 30 | var nodeLsCommand = &cobra.Command{ 31 | Use: "ls", 32 | Short: "List nodes in a cluster", 33 | Run: func(cmd *cobra.Command, args []string) { 34 | admin := getClusterAdmin() 35 | 36 | brokers, ctlID, err := admin.DescribeCluster() 37 | if err != nil { 38 | errorExit("Unable to describe cluster: %v\n", err) 39 | } 40 | 41 | sort.Slice(brokers, func(i, j int) bool { 42 | return brokers[i].ID() < brokers[j].ID() 43 | }) 44 | 45 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 46 | if !noHeaderFlag { 47 | _, _ = fmt.Fprintf(w, "ID\tADDRESS\tCONTROLLER\t\n") 48 | } 49 | 50 | for _, broker := range brokers { 51 | _, _ = fmt.Fprintf(w, "%v\t%v\t%v\t\n", broker.ID(), broker.Addr(), broker.ID() == ctlID) 52 | } 53 | 54 | w.Flush() 55 | }, 56 | } 57 | -------------------------------------------------------------------------------- /cmd/kaf/node_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestNode(t *testing.T) { 10 | out := runCmdWithBroker(t, nil, "node", "ls") 11 | require.Contains(t, out, kafkaAddr) 12 | } 13 | -------------------------------------------------------------------------------- /cmd/kaf/nodes_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestNodes(t *testing.T) { 10 | out := runCmdWithBroker(t, nil, "nodes") 11 | require.Contains(t, out, kafkaAddr) 12 | } 13 | -------------------------------------------------------------------------------- /cmd/kaf/oauth.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "sync" 7 | "time" 8 | 9 | "github.com/IBM/sarama" 10 | aws_signer "github.com/aws/aws-msk-iam-sasl-signer-go/signer" 11 | aws_config "github.com/aws/aws-sdk-go-v2/config" 12 | "golang.org/x/oauth2" 13 | "golang.org/x/oauth2/clientcredentials" 14 | ) 15 | 16 | var ( 17 | once sync.Once 18 | tokenProv *tokenProvider 19 | refreshBuffer time.Duration = time.Second * 20 20 | tokenFetchTimeout time.Duration = time.Second * 10 21 | ) 22 | 23 | var _ sarama.AccessTokenProvider = &tokenProvider{} 24 | 25 | type tokenProvider struct { 26 | // refreshMutex is used to ensure that tokens are not refreshed concurrently. 27 | refreshMutex sync.Mutex 28 | // The time at which the token expires. 29 | expiresAt time.Time 30 | // The time at which the token should be replaced. 31 | replaceAt time.Time 32 | // The currently cached token value. 33 | currentToken string 34 | // ctx for token fetching 35 | ctx context.Context 36 | // cfg for token fetching from 37 | oauthClientCFG *clientcredentials.Config 38 | // static token 39 | staticToken bool 40 | } 41 | 42 | // This is a singleton 43 | func newTokenProvider() *tokenProvider { 44 | once.Do(func() { 45 | cluster := currentCluster 46 | ctx := context.Background() 47 | 48 | // token either from tokenURL, static or AWS API 49 | if cluster.SASL.Mechanism == "AWS_MSK_IAM" { 50 | cfg, err := aws_config.LoadDefaultConfig(ctx) 51 | if err != nil { 52 | errorExit("Could not load AWS config: " + err.Error()) 53 | } 54 | token, _, err := aws_signer.GenerateAuthToken(ctx, cfg.Region) 55 | if err != nil { 56 | errorExit("Could not generate auth token: " + err.Error()) 57 | } 58 | tokenProv = &tokenProvider{ 59 | oauthClientCFG: &clientcredentials.Config{}, 60 | staticToken: true, 61 | currentToken: token, 62 | } 63 | } else if len(cluster.SASL.Token) != 0 { 64 | tokenProv = &tokenProvider{ 65 | oauthClientCFG: &clientcredentials.Config{}, 66 | staticToken: true, 67 | currentToken: cluster.SASL.Token, 68 | } 69 | } else { 70 | tokenProv = &tokenProvider{ 71 | oauthClientCFG: &clientcredentials.Config{ 72 | ClientID: cluster.SASL.ClientID, 73 | ClientSecret: cluster.SASL.ClientSecret, 74 | TokenURL: cluster.SASL.TokenURL, 75 | Scopes: cluster.SASL.Scopes, 76 | }, 77 | staticToken: false, 78 | } 79 | } 80 | if !tokenProv.staticToken { 81 | // create context with timeout 82 | httpClient := &http.Client{Timeout: tokenFetchTimeout} 83 | ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) 84 | tokenProv.ctx = ctx 85 | 86 | // get first token 87 | firstToken, err := tokenProv.oauthClientCFG.Token(ctx) 88 | if err != nil { 89 | errorExit("Could not fetch OAUTH token: " + err.Error()) 90 | } 91 | tokenProv.currentToken = firstToken.AccessToken 92 | tokenProv.expiresAt = firstToken.Expiry 93 | tokenProv.replaceAt = firstToken.Expiry.Add(-refreshBuffer) 94 | } 95 | }) 96 | return tokenProv 97 | } 98 | 99 | func (tp *tokenProvider) Token() (*sarama.AccessToken, error) { 100 | 101 | if !tp.staticToken { 102 | if time.Now().After(tp.replaceAt) { 103 | if err := tp.refreshToken(); err != nil { 104 | return nil, err 105 | } 106 | 107 | } 108 | } 109 | return &sarama.AccessToken{ 110 | Token: tp.currentToken, 111 | Extensions: nil, 112 | }, nil 113 | } 114 | 115 | func (tp *tokenProvider) refreshToken() error { 116 | // Get a lock on the update 117 | tp.refreshMutex.Lock() 118 | defer tp.refreshMutex.Unlock() 119 | 120 | // Check whether another call refreshed the token while waiting for the lock to be acquired here 121 | if time.Now().Before(tp.replaceAt) { 122 | return nil 123 | } 124 | 125 | token, err := tp.oauthClientCFG.Token(tp.ctx) 126 | if err != nil { 127 | return err 128 | } 129 | // Save the token 130 | tp.currentToken = token.AccessToken 131 | tp.expiresAt = token.Expiry 132 | tp.replaceAt = token.Expiry.Add(-refreshBuffer) 133 | return nil 134 | } 135 | -------------------------------------------------------------------------------- /cmd/kaf/produce.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/base64" 7 | "fmt" 8 | "io" 9 | "os" 10 | "strings" 11 | "text/template" 12 | 13 | "time" 14 | 15 | "github.com/IBM/sarama" 16 | "github.com/Masterminds/sprig" 17 | "github.com/birdayz/kaf/pkg/partitioner" 18 | pb "github.com/golang/protobuf/proto" 19 | "github.com/spf13/cobra" 20 | ) 21 | 22 | var ( 23 | keyFlag string 24 | rawKeyFlag bool 25 | headerFlag []string 26 | repeatFlag int 27 | partitionerFlag string 28 | timestampFlag string 29 | partitionFlag int32 30 | bufferSizeFlag int 31 | inputModeFlag string 32 | avroSchemaID int 33 | avroKeySchemaID int 34 | templateFlag bool 35 | ) 36 | 37 | func init() { 38 | rootCmd.AddCommand(produceCmd) 39 | 40 | produceCmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key for the record. Currently only strings are supported.") 41 | produceCmd.Flags().BoolVar(&rawKeyFlag, "raw-key", false, "Treat value of --key as base64 and use its decoded raw value as key") 42 | produceCmd.Flags().StringArrayVarP(&headerFlag, "header", "H", []string{}, "Header in format :. May be used multiple times to add more headers.") 43 | produceCmd.Flags().IntVarP(&repeatFlag, "repeat", "n", 1, "Repeat records to send.") 44 | 45 | produceCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") 46 | produceCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") 47 | produceCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") 48 | 49 | produceCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") 50 | produceCmd.Flags().StringVar(&partitionerFlag, "partitioner", "", "Select partitioner: [jvm|rand|rr|hash]") 51 | produceCmd.Flags().StringVar(×tampFlag, "timestamp", "", "Select timestamp for record") 52 | produceCmd.Flags().Int32VarP(&partitionFlag, "partition", "p", -1, "Partition to produce to") 53 | 54 | produceCmd.Flags().IntVarP(&avroSchemaID, "avro-schema-id", "", -1, "Value schema id for avro messsage encoding") 55 | produceCmd.Flags().IntVarP(&avroKeySchemaID, "avro-key-schema-id", "", -1, "Key schema id for avro messsage encoding") 56 | 57 | produceCmd.Flags().StringVarP(&inputModeFlag, "input-mode", "", "line", "Scanning input mode: [line|full]") 58 | produceCmd.Flags().IntVarP(&bufferSizeFlag, "line-length-limit", "", 0, "line length limit in line input mode") 59 | 60 | produceCmd.Flags().BoolVar(&templateFlag, "template", false, "run data through go template engine") 61 | 62 | } 63 | 64 | func readLines(reader io.Reader, out chan []byte) { 65 | scanner := bufio.NewScanner(reader) 66 | if bufferSizeFlag > 0 { 67 | scanner.Buffer(make([]byte, bufferSizeFlag), bufferSizeFlag) 68 | } 69 | 70 | for scanner.Scan() { 71 | out <- bytes.Clone(scanner.Bytes()) 72 | } 73 | close(out) 74 | 75 | if err := scanner.Err(); err != nil { 76 | errorExit("scanning input failed: %v\n", err) 77 | } 78 | } 79 | 80 | func readFull(reader io.Reader, out chan []byte) { 81 | data, err := io.ReadAll(inReader) 82 | if err != nil { 83 | errorExit("Unable to read data\n") 84 | } 85 | out <- data 86 | close(out) 87 | } 88 | 89 | var produceCmd = &cobra.Command{ 90 | Use: "produce TOPIC", 91 | Short: "Produce record. Reads data from stdin.", 92 | Args: cobra.ExactArgs(1), 93 | ValidArgsFunction: validTopicArgs, 94 | PreRun: setupProtoDescriptorRegistry, 95 | Run: func(cmd *cobra.Command, args []string) { 96 | cfg := getConfig() 97 | switch partitionerFlag { 98 | case "jvm": 99 | cfg.Producer.Partitioner = partitioner.NewJVMCompatiblePartitioner 100 | case "rand": 101 | cfg.Producer.Partitioner = sarama.NewRandomPartitioner 102 | case "rr": 103 | cfg.Producer.Partitioner = sarama.NewRoundRobinPartitioner 104 | } 105 | 106 | if partitionFlag != int32(-1) { 107 | cfg.Producer.Partitioner = sarama.NewManualPartitioner 108 | } 109 | 110 | producer, err := sarama.NewSyncProducer(currentCluster.Brokers, cfg) 111 | if err != nil { 112 | errorExit("Unable to create new sync producer: %v\n", err) 113 | } 114 | 115 | if avroSchemaID != -1 || avroKeySchemaID != -1 { 116 | schemaCache = getSchemaCache() 117 | if schemaCache == nil { 118 | errorExit("Could not connect to schema registry") 119 | } 120 | } 121 | 122 | out := make(chan []byte, 1) 123 | switch inputModeFlag { 124 | case "full": 125 | go readFull(inReader, out) 126 | default: 127 | go readLines(inReader, out) 128 | } 129 | 130 | var key sarama.Encoder 131 | if rawKeyFlag { 132 | keyBytes, err := base64.RawStdEncoding.DecodeString(keyFlag) 133 | if err != nil { 134 | errorExit("--raw-key is given, but value of --key is not base64") 135 | } 136 | key = sarama.ByteEncoder(keyBytes) 137 | } else { 138 | key = sarama.StringEncoder(keyFlag) 139 | } 140 | if keyProtoType != "" { 141 | if dynamicMessage := reg.MessageForType(keyProtoType); dynamicMessage != nil { 142 | err = dynamicMessage.UnmarshalJSON([]byte(keyFlag)) 143 | if err != nil { 144 | errorExit("Failed to parse input JSON as proto type %v: %v", protoType, err) 145 | } 146 | 147 | pb, err := pb.Marshal(dynamicMessage) 148 | if err != nil { 149 | errorExit("Failed to marshal proto: %v", err) 150 | } 151 | 152 | key = sarama.ByteEncoder(pb) 153 | } else { 154 | errorExit("Failed to load key proto type") 155 | } 156 | 157 | } else if avroKeySchemaID != -1 { 158 | avroKey, err := schemaCache.EncodeMessage(avroKeySchemaID, []byte(keyFlag)) 159 | if err != nil { 160 | errorExit("Failed to encode avro key", err) 161 | } 162 | key = sarama.ByteEncoder(avroKey) 163 | } 164 | 165 | var headers []sarama.RecordHeader 166 | for _, h := range headerFlag { 167 | v := strings.SplitN(h, ":", 2) 168 | if len(v) == 2 { 169 | headers = append(headers, sarama.RecordHeader{ 170 | Key: []byte(v[0]), 171 | Value: []byte(v[1]), 172 | }) 173 | } 174 | } 175 | 176 | for data := range out { 177 | 178 | for i := 0; i < repeatFlag; i++ { 179 | 180 | input := data 181 | 182 | if templateFlag { 183 | vars := map[string]interface{}{} 184 | vars["i"] = i 185 | tpl := template.New("kaf").Funcs(sprig.TxtFuncMap()) 186 | 187 | tpl, err = tpl.Parse(string(data)) 188 | if err != nil { 189 | errorExit("failed to parse go template: %v", err) 190 | } 191 | 192 | buf := bytes.NewBuffer(nil) 193 | 194 | if err := tpl.Execute(buf, vars); err != nil { 195 | errorExit("failed to execute go template: %v", err) 196 | } 197 | 198 | input = buf.Bytes() 199 | } 200 | 201 | // Encode to..something 202 | 203 | var marshaledInput []byte 204 | 205 | if protoType != "" { 206 | if dynamicMessage := reg.MessageForType(protoType); dynamicMessage != nil { 207 | err = dynamicMessage.UnmarshalJSON(input) 208 | if err != nil { 209 | errorExit("Failed to parse input JSON as proto type %v: %v", protoType, err) 210 | } 211 | 212 | pb, err := pb.Marshal(dynamicMessage) 213 | if err != nil { 214 | errorExit("Failed to marshal proto: %v", err) 215 | } 216 | 217 | marshaledInput = pb 218 | } else { 219 | errorExit("Failed to load payload proto type") 220 | } 221 | } else if avroSchemaID != -1 { 222 | avro, err := schemaCache.EncodeMessage(avroSchemaID, data) 223 | if err != nil { 224 | errorExit("Failed to encode avro value", err) 225 | } 226 | marshaledInput = avro 227 | } else { 228 | marshaledInput = input 229 | } 230 | 231 | var ts time.Time 232 | t, err := time.Parse(time.RFC3339, timestampFlag) 233 | if err != nil { 234 | ts = time.Now() 235 | } else { 236 | ts = t 237 | } 238 | 239 | msg := &sarama.ProducerMessage{ 240 | Topic: args[0], 241 | Key: key, 242 | Headers: headers, 243 | Timestamp: ts, 244 | Value: sarama.ByteEncoder(marshaledInput), 245 | } 246 | if partitionFlag != -1 { 247 | msg.Partition = partitionFlag 248 | } 249 | partition, offset, err := producer.SendMessage(msg) 250 | if err != nil { 251 | fmt.Fprintf(outWriter, "Failed to send record: %v.", err) 252 | os.Exit(1) 253 | } 254 | 255 | fmt.Fprintf(outWriter, "Sent record to partition %v at offset %v.\n", partition, offset) 256 | } 257 | } 258 | }, 259 | } 260 | -------------------------------------------------------------------------------- /cmd/kaf/produce_consume_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestProduceConsume(t *testing.T) { 11 | msg := "this is a test" 12 | 13 | t.Run("produce a message", func(t *testing.T) { 14 | buf := bytes.NewBufferString(msg) 15 | 16 | out := runCmdWithBroker(t, buf, "produce", "gnomock-kafka") 17 | require.Contains(t, out, "Sent record") 18 | }) 19 | 20 | t.Run("consume a message", func(t *testing.T) { 21 | out := runCmdWithBroker(t, nil, "consume", "gnomock-kafka") 22 | require.Contains(t, out, msg) 23 | }) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/kaf/query.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "strings" 8 | 9 | "github.com/IBM/sarama" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var grepValue string 14 | 15 | func init() { 16 | rootCmd.AddCommand(queryCmd) 17 | 18 | queryCmd.Flags().StringVarP(&keyFlag, "key", "k", "", "Key to search for") 19 | queryCmd.Flags().StringSliceVar(&protoFiles, "proto-include", []string{}, "Path to proto files") 20 | queryCmd.Flags().StringSliceVar(&protoExclude, "proto-exclude", []string{}, "Proto exclusions (path prefixes)") 21 | queryCmd.Flags().StringVar(&protoType, "proto-type", "", "Fully qualified name of the proto message type. Example: com.test.SampleMessage") 22 | queryCmd.Flags().StringVar(&keyProtoType, "key-proto-type", "", "Fully qualified name of the proto key type. Example: com.test.SampleMessage") 23 | 24 | queryCmd.Flags().StringVar(&grepValue, "grep", "", "Grep for value") 25 | 26 | } 27 | 28 | var queryCmd = &cobra.Command{ 29 | Use: "query TOPIC", 30 | Short: "Query topic by key", 31 | Args: cobra.ExactArgs(1), 32 | ValidArgsFunction: validTopicArgs, 33 | PreRun: setupProtoDescriptorRegistry, 34 | Run: func(cmd *cobra.Command, args []string) { 35 | topic := args[0] 36 | client := getClient() 37 | 38 | consumer, err := sarama.NewConsumerFromClient(client) 39 | if err != nil { 40 | errorExit("Unable to create consumer from client: %v\n", err) 41 | } 42 | 43 | partitions, err := consumer.Partitions(topic) 44 | if err != nil { 45 | errorExit("Unable to get partitions: %v\n", err) 46 | } 47 | 48 | schemaCache = getSchemaCache() 49 | 50 | wg := sync.WaitGroup{} 51 | 52 | for _, partition := range partitions { 53 | wg.Add(1) 54 | go func(partition int32) { 55 | defer wg.Done() 56 | highWatermark, err := client.GetOffset(topic, partition, sarama.OffsetNewest) 57 | if err != nil { 58 | errorExit("Failed to get high watermark: %w", err) 59 | } 60 | 61 | if highWatermark == 0 { 62 | return 63 | } 64 | 65 | pc, err := consumer.ConsumePartition(topic, partition, sarama.OffsetOldest) 66 | if err != nil { 67 | errorExit("Unable to consume partition: %v\n", err) 68 | } 69 | 70 | for msg := range pc.Messages() { 71 | if string(msg.Key) == keyFlag { 72 | var keyTextRaw string 73 | var valueTextRaw string 74 | if protoType != "" { 75 | d, err := protoDecode(reg, msg.Value, protoType) 76 | if err != nil { 77 | fmt.Println("Failed proto decode") 78 | } 79 | valueTextRaw = string(d) 80 | } else { 81 | valueTextRaw = string(msg.Value) 82 | } 83 | 84 | if keyProtoType != "" { 85 | d, err := protoDecode(reg, msg.Key, keyProtoType) 86 | if err != nil { 87 | fmt.Println("Failed proto decode") 88 | } 89 | keyTextRaw = string(d) 90 | } else { 91 | keyTextRaw = string(msg.Key) 92 | } 93 | 94 | match := true 95 | if grepValue != "" { 96 | if !strings.Contains(valueTextRaw, grepValue) { 97 | match = false 98 | } 99 | } 100 | 101 | if match { 102 | fmt.Printf("Key: %v\n", keyTextRaw) 103 | fmt.Printf("Value: %v\n", valueTextRaw) 104 | } 105 | 106 | if msg.Offset == pc.HighWaterMarkOffset()-1 { 107 | break 108 | } 109 | } 110 | } 111 | }(partition) 112 | } 113 | 114 | wg.Wait() 115 | }, 116 | } 117 | -------------------------------------------------------------------------------- /cmd/kaf/scram_client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha256" 5 | "crypto/sha512" 6 | "hash" 7 | 8 | "github.com/xdg/scram" 9 | ) 10 | 11 | var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } 12 | var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } 13 | 14 | type XDGSCRAMClient struct { 15 | *scram.Client 16 | *scram.ClientConversation 17 | scram.HashGeneratorFcn 18 | } 19 | 20 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 21 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 22 | if err != nil { 23 | return err 24 | } 25 | x.ClientConversation = x.Client.NewConversation() 26 | return nil 27 | } 28 | 29 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 30 | response, err = x.ClientConversation.Step(challenge) 31 | return 32 | } 33 | 34 | func (x *XDGSCRAMClient) Done() bool { 35 | return x.ClientConversation.Done() 36 | } 37 | -------------------------------------------------------------------------------- /cmd/kaf/topic.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "slices" 8 | "sort" 9 | "strings" 10 | "text/tabwriter" 11 | 12 | "github.com/IBM/sarama" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var ( 17 | partitionsFlag int32 18 | partitionAssignmentsFlag string 19 | replicasFlag int16 20 | noHeaderFlag bool 21 | compactFlag bool 22 | ) 23 | 24 | func init() { 25 | rootCmd.AddCommand(topicCmd) 26 | rootCmd.AddCommand(topicsCmd) 27 | topicCmd.AddCommand(createTopicCmd) 28 | topicCmd.AddCommand(deleteTopicCmd) 29 | topicCmd.AddCommand(lsTopicsCmd) 30 | topicCmd.AddCommand(describeTopicCmd) 31 | topicCmd.AddCommand(addConfigCmd) 32 | topicCmd.AddCommand(removeConfigCmd) 33 | topicCmd.AddCommand(topicSetConfig) 34 | topicCmd.AddCommand(updateTopicCmd) 35 | topicCmd.AddCommand(lagCmd) 36 | 37 | createTopicCmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(1), "Number of partitions") 38 | createTopicCmd.Flags().Int16VarP(&replicasFlag, "replicas", "r", int16(1), "Number of replicas") 39 | createTopicCmd.Flags().BoolVar(&compactFlag, "compact", false, "Enable topic compaction") 40 | 41 | lsTopicsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 42 | topicsCmd.Flags().BoolVar(&noHeaderFlag, "no-headers", false, "Hide table headers") 43 | updateTopicCmd.Flags().Int32VarP(&partitionsFlag, "partitions", "p", int32(-1), "Number of partitions") 44 | updateTopicCmd.Flags().StringVar(&partitionAssignmentsFlag, "partition-assignments", "", "Partition Assignments. Optional. If set in combination with -p, an assignment must be provided for each new partition. Example: '[[1,2,3],[1,2,3]]' (JSON Array syntax) assigns two new partitions to brokers 1,2,3. If used by itself, a reassignment must be provided for all partitions.") 45 | } 46 | 47 | var topicCmd = &cobra.Command{ 48 | Use: "topic", 49 | Short: "Create and describe topics.", 50 | } 51 | 52 | var topicsCmd = &cobra.Command{ 53 | Use: "topics", 54 | Short: "List topics", 55 | Run: lsTopicsCmd.Run, 56 | } 57 | 58 | var topicSetConfig = &cobra.Command{ 59 | Use: "set-config", 60 | Short: "set topic config. requires Kafka >=2.3.0 on broker side and kaf cluster config.", 61 | Example: "kaf topic set-config topic.name \"cleanup.policy=delete\"", 62 | Args: cobra.ExactArgs(2), 63 | Run: func(cmd *cobra.Command, args []string) { 64 | admin := getClusterAdmin() 65 | 66 | topic := args[0] 67 | 68 | splt := strings.Split(args[1], ",") 69 | configs := make(map[string]sarama.IncrementalAlterConfigsEntry) 70 | 71 | for _, kv := range splt { 72 | s := strings.Split(kv, "=") 73 | 74 | if len(s) != 2 { 75 | continue 76 | } 77 | 78 | key := s[0] 79 | value := s[1] 80 | configs[key] = sarama.IncrementalAlterConfigsEntry{ 81 | Operation: sarama.IncrementalAlterConfigsOperationSet, 82 | Value: &value, 83 | } 84 | } 85 | 86 | if len(configs) < 1 { 87 | errorExit("No valid configs found") 88 | } 89 | 90 | err := admin.IncrementalAlterConfig(sarama.TopicResource, topic, configs, false) 91 | if err != nil { 92 | errorExit("Unable to alter topic config: %v\n", err) 93 | } 94 | fmt.Printf("\xE2\x9C\x85 Updated config.") 95 | }, 96 | } 97 | 98 | var updateTopicCmd = &cobra.Command{ 99 | Use: "update", 100 | Short: "Update topic", 101 | Example: "kaf topic update -p 5 --partition-assignments '[[1,2,3],[1,2,3]]'", 102 | Args: cobra.ExactArgs(1), 103 | Run: func(cmd *cobra.Command, args []string) { 104 | admin := getClusterAdmin() 105 | 106 | if partitionsFlag == -1 && partitionAssignmentsFlag == "" { 107 | errorExit("Number of partitions and/or partition assigments must be given") 108 | } 109 | 110 | var assignments [][]int32 111 | if partitionAssignmentsFlag != "" { 112 | if err := json.Unmarshal([]byte(partitionAssignmentsFlag), &assignments); err != nil { 113 | errorExit("Invalid partition assignments: %v", err) 114 | } 115 | } 116 | 117 | if partitionsFlag != int32(-1) { 118 | err := admin.CreatePartitions(args[0], partitionsFlag, assignments, false) 119 | if err != nil { 120 | errorExit("Failed to create partitions: %v", err) 121 | } 122 | } else { 123 | // Needs at least Kafka version 2.4.0. 124 | err := admin.AlterPartitionReassignments(args[0], assignments) 125 | if err != nil { 126 | errorExit("Failed to reassign the partition assigments: %v", err) 127 | } 128 | } 129 | fmt.Printf("\xE2\x9C\x85 Updated topic!\n") 130 | }, 131 | } 132 | 133 | var lsTopicsCmd = &cobra.Command{ 134 | Use: "ls", 135 | Aliases: []string{"list"}, 136 | Short: "List topics", 137 | Args: cobra.ExactArgs(0), 138 | Run: func(cmd *cobra.Command, args []string) { 139 | admin := getClusterAdmin() 140 | 141 | topics, err := admin.ListTopics() 142 | if err != nil { 143 | errorExit("Unable to list topics: %v\n", err) 144 | } 145 | 146 | sortedTopics := make( 147 | []struct { 148 | name string 149 | sarama.TopicDetail 150 | }, len(topics)) 151 | 152 | i := 0 153 | for name, topic := range topics { 154 | sortedTopics[i].name = name 155 | sortedTopics[i].TopicDetail = topic 156 | i++ 157 | } 158 | 159 | sort.Slice(sortedTopics, func(i int, j int) bool { 160 | return sortedTopics[i].name < sortedTopics[j].name 161 | }) 162 | 163 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 164 | 165 | if !noHeaderFlag { 166 | fmt.Fprintf(w, "NAME\tPARTITIONS\tREPLICAS\t\n") 167 | } 168 | 169 | for _, topic := range sortedTopics { 170 | fmt.Fprintf(w, "%v\t%v\t%v\t\n", topic.name, topic.NumPartitions, topic.ReplicationFactor) 171 | } 172 | w.Flush() 173 | }, 174 | } 175 | 176 | var describeTopicCmd = &cobra.Command{ 177 | Use: "describe", 178 | Short: "Describe topic", 179 | Long: "Describe a topic. Default values of the configuration are omitted.", 180 | Args: cobra.ExactArgs(1), 181 | ValidArgsFunction: validTopicArgs, 182 | Run: func(cmd *cobra.Command, args []string) { 183 | admin := getClusterAdmin() 184 | 185 | topicDetails, err := admin.DescribeTopics([]string{args[0]}) 186 | if err != nil { 187 | errorExit("Unable to describe topics: %v\n", err) 188 | } 189 | 190 | if topicDetails[0].Err == sarama.ErrUnknownTopicOrPartition { 191 | fmt.Printf("Topic %v not found.\n", args[0]) 192 | return 193 | } 194 | 195 | cfg, err := admin.DescribeConfig(sarama.ConfigResource{ 196 | Type: sarama.TopicResource, 197 | Name: args[0], 198 | }) 199 | if err != nil { 200 | errorExit("Unable to describe config: %v\n", err) 201 | } 202 | 203 | var compacted bool 204 | for _, e := range cfg { 205 | if e.Name == "cleanup.policy" { 206 | for _, setting := range strings.Split(e.Value, ",") { 207 | if setting == "compact" { 208 | compacted = true 209 | } 210 | } 211 | } 212 | } 213 | 214 | detail := topicDetails[0] 215 | sort.Slice(detail.Partitions, func(i, j int) bool { return detail.Partitions[i].ID < detail.Partitions[j].ID }) 216 | 217 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 218 | fmt.Fprintf(w, "Name:\t%v\t\n", detail.Name) 219 | fmt.Fprintf(w, "Internal:\t%v\t\n", detail.IsInternal) 220 | fmt.Fprintf(w, "Compacted:\t%v\t\n", compacted) 221 | fmt.Fprintf(w, "Partitions:\n") 222 | 223 | w.Flush() 224 | w.Init(outWriter, tabwriterMinWidthNested, 4, 2, tabwriterPadChar, tabwriterFlags) 225 | 226 | fmt.Fprintf(w, "\tPartition\tHigh Watermark\tLeader\tReplicas\tISR\t\n") 227 | fmt.Fprintf(w, "\t---------\t--------------\t------\t--------\t---\t\n") 228 | 229 | partitions := make([]int32, 0, len(detail.Partitions)) 230 | for _, partition := range detail.Partitions { 231 | partitions = append(partitions, partition.ID) 232 | } 233 | highWatermarks := getHighWatermarks(args[0], partitions) 234 | highWatermarksSum := 0 235 | 236 | for _, partition := range detail.Partitions { 237 | sortedReplicas := partition.Replicas 238 | sort.Slice(sortedReplicas, func(i, j int) bool { return sortedReplicas[i] < sortedReplicas[j] }) 239 | 240 | sortedISR := partition.Isr 241 | sort.Slice(sortedISR, func(i, j int) bool { return sortedISR[i] < sortedISR[j] }) 242 | 243 | highWatermarksSum += int(highWatermarks[partition.ID]) 244 | 245 | fmt.Fprintf(w, "\t%v\t%v\t%v\t%v\t%v\t\n", partition.ID, highWatermarks[partition.ID], partition.Leader, sortedReplicas, sortedISR) 246 | } 247 | 248 | w.Flush() 249 | 250 | fmt.Fprintf(w, "Summed HighWatermark:\t%d\n", highWatermarksSum) 251 | w.Flush() 252 | 253 | fmt.Fprintf(w, "Config:\n") 254 | fmt.Fprintf(w, "\tName\tValue\tReadOnly\tSensitive\t\n") 255 | fmt.Fprintf(w, "\t----\t-----\t--------\t---------\t\n") 256 | 257 | for _, entry := range cfg { 258 | if entry.Default { 259 | continue 260 | } 261 | fmt.Fprintf(w, "\t%v\t%v\t%v\t%v\t\n", entry.Name, entry.Value, entry.ReadOnly, entry.Sensitive) 262 | } 263 | 264 | w.Flush() 265 | }, 266 | } 267 | 268 | var createTopicCmd = &cobra.Command{ 269 | Use: "create TOPIC", 270 | Short: "Create a topic", 271 | Args: cobra.ExactArgs(1), 272 | Run: func(cmd *cobra.Command, args []string) { 273 | admin := getClusterAdmin() 274 | 275 | topicName := args[0] 276 | compact := "delete" 277 | if compactFlag { 278 | compact = "compact" 279 | } 280 | err := admin.CreateTopic(topicName, &sarama.TopicDetail{ 281 | NumPartitions: partitionsFlag, 282 | ReplicationFactor: replicasFlag, 283 | ConfigEntries: map[string]*string{ 284 | "cleanup.policy": &compact, 285 | }, 286 | }, false) 287 | if err != nil { 288 | errorExit("Could not create topic %v: %v\n", topicName, err.Error()) 289 | } else { 290 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 291 | fmt.Fprintf(w, "\xE2\x9C\x85 Created topic!\n") 292 | fmt.Fprintln(w, "\tTopic Name:\t", topicName) 293 | fmt.Fprintln(w, "\tPartitions:\t", partitionsFlag) 294 | fmt.Fprintln(w, "\tReplication Factor:\t", replicasFlag) 295 | fmt.Fprintln(w, "\tCleanup Policy:\t", compact) 296 | w.Flush() 297 | } 298 | }, 299 | } 300 | 301 | var addConfigCmd = &cobra.Command{ 302 | Use: "add-config TOPIC KEY VALUE", 303 | Short: "Add config key/value pair to topic", 304 | Args: cobra.ExactArgs(3), // TODO how to unset ? support empty VALUE ? 305 | Run: func(cmd *cobra.Command, args []string) { 306 | admin := getClusterAdmin() 307 | 308 | topic := args[0] 309 | key := args[1] 310 | value := args[2] 311 | 312 | err := admin.AlterConfig(sarama.TopicResource, topic, map[string]*string{ 313 | key: &value, 314 | }, false) 315 | if err != nil { 316 | errorExit("failed to update topic config: %v", err) 317 | } else { 318 | fmt.Printf("Added config %v=%v to topic %v.\n", key, value, topic) 319 | } 320 | }, 321 | } 322 | 323 | var removeConfigCmd = &cobra.Command{ 324 | Use: "rm-config TOPIC ATTR1,ATTR2...", 325 | Short: "Remove attributes from topic", 326 | Args: cobra.ExactArgs(2), 327 | Run: func(cmd *cobra.Command, args []string) { 328 | admin := getClusterAdmin() 329 | 330 | topic := args[0] 331 | attrsToRemove := strings.Split(args[1], ",") 332 | 333 | updatedTopicConfigs := make(map[string]*string) 334 | 335 | allTopicConfigs, err := admin.DescribeConfig(sarama.ConfigResource{ 336 | Type: sarama.TopicResource, 337 | Name: topic, 338 | }) 339 | if err != nil { 340 | errorExit("failed to describe topic config: %v", err) 341 | } 342 | 343 | for _, v := range allTopicConfigs { 344 | if !slices.Contains(attrsToRemove, v.Name) { 345 | updatedTopicConfigs[v.Name] = &v.Value 346 | } 347 | } 348 | 349 | err = admin.AlterConfig(sarama.TopicResource, topic, updatedTopicConfigs, false) 350 | if err != nil { 351 | errorExit("failed to remove attributes from topic config: %v", err) 352 | } 353 | fmt.Printf("Removed attributes %v from topic %v.\n", attrsToRemove, topic) 354 | }, 355 | } 356 | 357 | var deleteTopicCmd = &cobra.Command{ 358 | Use: "delete TOPIC", 359 | Short: "Delete a topic", 360 | Args: cobra.ExactArgs(1), 361 | ValidArgsFunction: validTopicArgs, 362 | Run: func(cmd *cobra.Command, args []string) { 363 | admin := getClusterAdmin() 364 | 365 | topicName := args[0] 366 | err := admin.DeleteTopic(topicName) 367 | if err != nil { 368 | errorExit("Could not delete topic %v: %v\n", topicName, err.Error()) 369 | } else { 370 | fmt.Fprintf(outWriter, "\xE2\x9C\x85 Deleted topic %v!\n", topicName) 371 | } 372 | }, 373 | } 374 | 375 | var lagCmd = &cobra.Command{ 376 | Use: "lag", 377 | Short: "Display the total lags for each consumer group", 378 | Args: cobra.ExactArgs(1), 379 | Run: func(cmd *cobra.Command, args []string) { 380 | topic := args[0] 381 | admin := getClusterAdmin() 382 | defer admin.Close() 383 | 384 | // Describe the topic 385 | topicDetails, err := admin.DescribeTopics([]string{topic}) 386 | if err != nil || len(topicDetails) == 0 { 387 | errorExit("Unable to describe topics: %v\n", err) 388 | } 389 | 390 | // Get the list of partitions for the topic 391 | partitions := make([]int32, 0, len(topicDetails[0].Partitions)) 392 | for _, partition := range topicDetails[0].Partitions { 393 | partitions = append(partitions, partition.ID) 394 | } 395 | highWatermarks := getHighWatermarks(topic, partitions) 396 | 397 | // List all consumer groups 398 | consumerGroups, err := admin.ListConsumerGroups() 399 | if err != nil { 400 | errorExit("Unable to list consumer groups: %v\n", err) 401 | } 402 | 403 | var groups []string 404 | for group := range consumerGroups { 405 | groups = append(groups, group) 406 | } 407 | 408 | // Describe all consumer groups 409 | groupsInfo, err := admin.DescribeConsumerGroups(groups) 410 | if err != nil { 411 | errorExit("Unable to describe consumer groups: %v\n", err) 412 | } 413 | 414 | // Calculate lag for each group 415 | lagInfo := make(map[string]int64) 416 | groupStates := make(map[string]string) // To store the state of each group 417 | for _, group := range groupsInfo { 418 | var sum int64 419 | show := false 420 | for _, member := range group.Members { 421 | assignment, err := member.GetMemberAssignment() 422 | if err != nil || assignment == nil { 423 | continue 424 | } 425 | 426 | metadata, err := member.GetMemberMetadata() 427 | if err != nil || metadata == nil { 428 | continue 429 | } 430 | 431 | if topicPartitions, exist := assignment.Topics[topic]; exist { 432 | show = true 433 | resp, err := admin.ListConsumerGroupOffsets(group.GroupId, map[string][]int32{topic: topicPartitions}) 434 | if err != nil { 435 | fmt.Fprintf(os.Stderr, "Error fetching offsets for group %s: %v\n", group.GroupId, err) 436 | continue 437 | } 438 | 439 | if blocks, ok := resp.Blocks[topic]; ok { 440 | for pid, block := range blocks { 441 | if hwm, ok := highWatermarks[pid]; ok { 442 | if block.Offset > hwm { 443 | fmt.Fprintf(os.Stderr, "Warning: Consumer offset (%d) is greater than high watermark (%d) for partition %d in group %s\n", block.Offset, hwm, pid, group.GroupId) 444 | } else if block.Offset < 0 { 445 | // Skip partitions with negative offsets 446 | } else { 447 | sum += hwm - block.Offset 448 | } 449 | } 450 | } 451 | } 452 | } 453 | } 454 | 455 | if show && sum >= 0 { 456 | lagInfo[group.GroupId] = sum 457 | groupStates[group.GroupId] = group.State // Store the state of the group 458 | } 459 | } 460 | 461 | // Print the lag information along with group state 462 | w := tabwriter.NewWriter(outWriter, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) 463 | if !noHeaderFlag { 464 | fmt.Fprintf(w, "GROUP ID\tSTATE\tLAG\n") 465 | } 466 | for group, lag := range lagInfo { 467 | fmt.Fprintf(w, "%v\t%v\t%v\n", group, groupStates[group], lag) 468 | } 469 | w.Flush() 470 | }, 471 | } 472 | -------------------------------------------------------------------------------- /cmd/kaf/topic_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestTopic(t *testing.T) { 12 | newTopic := fmt.Sprintf("new-topic-%d", time.Now().Unix()) 13 | 14 | t.Run("ls before new topic", func(t *testing.T) { 15 | out := runCmdWithBroker(t, nil, "topic", "ls") 16 | require.NotContains(t, out, newTopic) 17 | }) 18 | 19 | t.Run("create new topic", func(t *testing.T) { 20 | out := runCmdWithBroker(t, nil, "topic", "create", newTopic) 21 | require.Contains(t, out, "Created topic!") 22 | require.Contains(t, out, newTopic) 23 | }) 24 | 25 | t.Run("ls", func(t *testing.T) { 26 | out := runCmdWithBroker(t, nil, "topic", "ls") 27 | require.Contains(t, out, newTopic) 28 | }) 29 | 30 | t.Run("describe", func(t *testing.T) { 31 | out := runCmdWithBroker(t, nil, "topic", "describe", newTopic) 32 | require.Contains(t, out, newTopic) 33 | }) 34 | 35 | t.Run("delete", func(t *testing.T) { 36 | out := runCmdWithBroker(t, nil, "topic", "delete", newTopic) 37 | require.Contains(t, out, fmt.Sprintf("Deleted topic %s!", newTopic)) 38 | }) 39 | 40 | t.Run("ls after deleted", func(t *testing.T) { 41 | out := runCmdWithBroker(t, nil, "topic", "ls") 42 | require.NotContains(t, out, newTopic) 43 | }) 44 | } 45 | -------------------------------------------------------------------------------- /cmd/kaf/topics_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestTopics(t *testing.T) { 10 | out := runCmdWithBroker(t, nil, "topics") 11 | require.Contains(t, out, "kaf-testing") 12 | require.Contains(t, out, "gnomock-kafka") 13 | } 14 | -------------------------------------------------------------------------------- /cmd/kaf/valid.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | func validConfigArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 8 | clusterList := make([]string, 0, len(cfg.Clusters)) 9 | for _, cluster := range cfg.Clusters { 10 | clusterList = append(clusterList, cluster.Name) 11 | } 12 | return clusterList, cobra.ShellCompDirectiveNoFileComp 13 | } 14 | 15 | func validGroupArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 16 | admin := getClusterAdmin() 17 | 18 | groups, err := admin.ListConsumerGroups() 19 | if err != nil { 20 | errorExit("Unable to list consumer groups: %v\n", err) 21 | } 22 | groupList := make([]string, 0, len(groups)) 23 | for grp := range groups { 24 | groupList = append(groupList, grp) 25 | } 26 | return groupList, cobra.ShellCompDirectiveNoFileComp 27 | } 28 | 29 | func validTopicArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { 30 | admin := getClusterAdmin() 31 | 32 | topics, err := admin.ListTopics() 33 | if err != nil { 34 | errorExit("Unable to list topics: %v\n", err) 35 | } 36 | topicList := make([]string, 0, len(topics)) 37 | for topic := range topics { 38 | topicList = append(topicList, topic) 39 | } 40 | return topicList, cobra.ShellCompDirectiveNoFileComp 41 | } 42 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:latest 5 | environment: 6 | ZOOKEEPER_CLIENT_PORT: 2181 7 | ZOOKEEPER_TICK_TIME: 2000 8 | kafka: 9 | image: confluentinc/cp-kafka:latest 10 | depends_on: 11 | - zookeeper 12 | ports: 13 | - 9092:9092 14 | environment: 15 | KAFKA_BROKER_ID: 1 16 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 17 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 18 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 19 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT 20 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 21 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Configuration examples 2 | 3 | This folder contains various configuration examples, meant to help composing your `~/.kaf/config` file. 4 | -------------------------------------------------------------------------------- /examples/aws_msk_iam.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: AWS_MSK_IAM 7 | TLS: null 8 | security-protocol: SASL_SSL 9 | # set the region using the AWS_REGION envvar or saved profiles 10 | -------------------------------------------------------------------------------- /examples/basic.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: local 3 | brokers: 4 | - localhost:9092 5 | SASL: null 6 | TLS: null 7 | security-protocol: "" 8 | version: "1.0.0" 9 | -------------------------------------------------------------------------------- /examples/sasl_plaintext.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: PLAIN 7 | username: admin 8 | password: mypasswordisnotsosimple 9 | -------------------------------------------------------------------------------- /examples/sasl_ssl.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: PLAIN 7 | username: admin 8 | password: mypasswordisnotsosimple 9 | TLS: null 10 | security-protocol: SASL_SSL 11 | -------------------------------------------------------------------------------- /examples/sasl_ssl_custom_ca.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: PLAIN 7 | username: admin 8 | password: mypasswordisnotsosimple 9 | TLS: 10 | cafile: /path/ca.pem 11 | security-protocol: SASL_SSL 12 | -------------------------------------------------------------------------------- /examples/sasl_ssl_insecure.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: PLAIN 7 | username: admin 8 | password: mypasswordisnotsosimple 9 | TLS: 10 | insecure: true 11 | security-protocol: SASL_SSL 12 | -------------------------------------------------------------------------------- /examples/sasl_ssl_oauth.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: OAUTHBEARER 7 | clientID: my_client_oauth 8 | clientSecret: my_secret_oauth 9 | tokenURL: https//some.token.endpoint.com/token 10 | scopes: 11 | - scope1 12 | - scope2 13 | TLS: 14 | insecure: true 15 | security-protocol: SASL_SSL 16 | -------------------------------------------------------------------------------- /examples/sasl_ssl_oauth_token.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: OAUTHBEARER 7 | token: someTOKENfromYOURprovider===== 8 | TLS: 9 | insecure: true 10 | security-protocol: SASL_SSL 11 | -------------------------------------------------------------------------------- /examples/sasl_ssl_scram.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: SCRAM-SHA-512 7 | username: user 8 | password: pass 9 | TLS: 10 | cafile: /path/ca.pem 11 | security-protocol: SASL_SSL -------------------------------------------------------------------------------- /examples/sasl_v1_handshake.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | SASL: 6 | mechanism: PLAIN 7 | username: admin 8 | password: mypasswordisnotsosimple 9 | version: 1 10 | -------------------------------------------------------------------------------- /examples/schema_registry_basic_auth.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: local 3 | brokers: 4 | - localhost:9092 5 | SASL: null 6 | TLS: null 7 | security-protocol: "" 8 | version: "1.0.0" 9 | schema-registry-url: https://schema.registry.url 10 | schema-registry-credentials: 11 | username: httpbasicauthuser 12 | password: mypasswordisnotsobasic 13 | -------------------------------------------------------------------------------- /examples/ssl_keys.yaml: -------------------------------------------------------------------------------- 1 | clusters: 2 | - name: test 3 | brokers: 4 | - localhost:9092 5 | TLS: 6 | cafile: ca.pem 7 | clientfile: public.pem 8 | clientkeyfile: private.pem 9 | insecure: false 10 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/birdayz/kaf 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/IBM/sarama v1.43.2 7 | github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891 8 | github.com/Masterminds/sprig v2.22.0+incompatible 9 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 10 | github.com/aws/aws-sdk-go-v2/config v1.27.39 11 | github.com/golang/protobuf v1.5.4 12 | github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e 13 | github.com/jhump/protoreflect v1.16.0 14 | github.com/linkedin/goavro/v2 v2.13.1 15 | github.com/magiconair/properties v1.8.7 16 | github.com/manifoldco/promptui v0.9.0 17 | github.com/mattn/go-colorable v0.1.13 18 | github.com/mitchellh/go-homedir v1.1.0 19 | github.com/orlangure/gnomock v0.28.0 20 | github.com/spf13/cobra v1.8.1 21 | github.com/stretchr/testify v1.9.0 22 | github.com/vmihailenco/msgpack/v5 v5.4.1 23 | github.com/xdg/scram v1.0.5 24 | golang.org/x/oauth2 v0.18.0 25 | gopkg.in/yaml.v2 v2.4.0 26 | ) 27 | 28 | require ( 29 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect 30 | github.com/Masterminds/goutils v1.1.1 // indirect 31 | github.com/Masterminds/semver v1.5.0 // indirect 32 | github.com/Microsoft/go-winio v0.5.2 // indirect 33 | github.com/aws/aws-sdk-go-v2 v1.31.0 // indirect 34 | github.com/aws/aws-sdk-go-v2/credentials v1.17.37 // indirect 35 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect 36 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect 37 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect 38 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect 39 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect 40 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect 41 | github.com/aws/aws-sdk-go-v2/service/sso v1.23.3 // indirect 42 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 // indirect 43 | github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 // indirect 44 | github.com/aws/smithy-go v1.21.0 // indirect 45 | github.com/bufbuild/protocompile v0.10.0 // indirect 46 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect 47 | github.com/davecgh/go-spew v1.1.1 // indirect 48 | github.com/docker/distribution v2.8.2+incompatible // indirect 49 | github.com/docker/docker v24.0.9+incompatible // indirect 50 | github.com/docker/go-connections v0.4.0 // indirect 51 | github.com/docker/go-units v0.4.0 // indirect 52 | github.com/eapache/go-resiliency v1.6.0 // indirect 53 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect 54 | github.com/eapache/queue v1.1.0 // indirect 55 | github.com/fatih/color v1.13.0 // indirect 56 | github.com/gogo/protobuf v1.3.2 // indirect 57 | github.com/golang/snappy v0.0.4 // indirect 58 | github.com/google/uuid v1.3.0 // indirect 59 | github.com/hashicorp/errwrap v1.1.0 // indirect 60 | github.com/hashicorp/go-multierror v1.1.1 // indirect 61 | github.com/hashicorp/go-uuid v1.0.3 // indirect 62 | github.com/huandu/xstrings v1.3.2 // indirect 63 | github.com/imdario/mergo v0.3.12 // indirect 64 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 65 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 66 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 67 | github.com/jcmturner/gofork v1.7.6 // indirect 68 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 69 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 70 | github.com/klauspost/compress v1.17.8 // indirect 71 | github.com/kr/text v0.2.0 // indirect 72 | github.com/mattn/go-isatty v0.0.16 // indirect 73 | github.com/mitchellh/copystructure v1.2.0 // indirect 74 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 75 | github.com/opencontainers/go-digest v1.0.0 // indirect 76 | github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect 77 | github.com/pierrec/lz4/v4 v4.1.21 // indirect 78 | github.com/pkg/errors v0.9.1 // indirect 79 | github.com/pmezard/go-difflib v1.0.0 // indirect 80 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 81 | github.com/rogpeppe/go-internal v1.10.0 // indirect 82 | github.com/segmentio/kafka-go v0.4.39 // indirect 83 | github.com/spf13/pflag v1.0.5 // indirect 84 | github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 85 | github.com/xdg/stringprep v1.0.3 // indirect 86 | go.uber.org/atomic v1.9.0 // indirect 87 | go.uber.org/multierr v1.7.0 // indirect 88 | go.uber.org/zap v1.24.0 // indirect 89 | golang.org/x/crypto v0.22.0 // indirect 90 | golang.org/x/net v0.24.0 // indirect 91 | golang.org/x/sync v0.7.0 // indirect 92 | golang.org/x/sys v0.19.0 // indirect 93 | golang.org/x/text v0.14.0 // indirect 94 | google.golang.org/appengine v1.6.7 // indirect 95 | google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002 // indirect 96 | gopkg.in/yaml.v3 v3.0.1 // indirect 97 | ) 98 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= 2 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= 3 | github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= 4 | github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= 5 | github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891 h1:DeXNO7Cb5W1ofU/xPjDE1kg6JL21vYZGow54ywwTKBA= 6 | github.com/Landoop/schema-registry v0.0.0-20190327143759-50a5701c1891/go.mod h1:IwIgXaypux+daBFS0gWtSfpSD38wK3mUzVBkX3Kneoo= 7 | github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= 8 | github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= 9 | github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= 10 | github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= 11 | github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= 12 | github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= 13 | github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= 14 | github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= 15 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= 16 | github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= 17 | github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= 18 | github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= 19 | github.com/aws/aws-sdk-go-v2/config v1.27.39 h1:FCylu78eTGzW1ynHcongXK9YHtoXD5AiiUqq3YfJYjU= 20 | github.com/aws/aws-sdk-go-v2/config v1.27.39/go.mod h1:wczj2hbyskP4LjMKBEZwPRO1shXY+GsQleab+ZXT2ik= 21 | github.com/aws/aws-sdk-go-v2/credentials v1.17.37 h1:G2aOH01yW8X373JK419THj5QVqu9vKEwxSEsGxihoW0= 22 | github.com/aws/aws-sdk-go-v2/credentials v1.17.37/go.mod h1:0ecCjlb7htYCptRD45lXJ6aJDQac6D2NlKGpZqyTG6A= 23 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= 24 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= 25 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= 26 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= 27 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= 28 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= 29 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= 30 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= 31 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= 32 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= 33 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= 34 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= 35 | github.com/aws/aws-sdk-go-v2/service/sso v1.23.3 h1:rs4JCczF805+FDv2tRhZ1NU0RB2H6ryAvsWPanAr72Y= 36 | github.com/aws/aws-sdk-go-v2/service/sso v1.23.3/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= 37 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3 h1:S7EPdMVZod8BGKQQPTBK+FcX9g7bKR7c4+HxWqHP7Vg= 38 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.3/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= 39 | github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 h1:VzudTFrDCIDakXtemR7l6Qzt2+JYsVqo2MxBPt5k8T8= 40 | github.com/aws/aws-sdk-go-v2/service/sts v1.31.3/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= 41 | github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= 42 | github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= 43 | github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= 44 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 45 | github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= 46 | github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= 47 | github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= 48 | github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= 49 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= 50 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 51 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= 52 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= 53 | github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 54 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 55 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 56 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 57 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 58 | github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= 59 | github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= 60 | github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= 61 | github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 62 | github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= 63 | github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= 64 | github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= 65 | github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 66 | github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= 67 | github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= 68 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= 69 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= 70 | github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= 71 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 72 | github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= 73 | github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= 74 | github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= 75 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 76 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 77 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 78 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 79 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 80 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 81 | github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 82 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 83 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 84 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 85 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 86 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 87 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 88 | github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= 89 | github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= 90 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 91 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 92 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 93 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 94 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 95 | github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 96 | github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= 97 | github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 98 | github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e h1:0aewS5NTyxftZHSnFaJmWE5oCCrj4DyEXkAiMa1iZJM= 99 | github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= 100 | github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= 101 | github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= 102 | github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= 103 | github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= 104 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 105 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 106 | github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= 107 | github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= 108 | github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= 109 | github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= 110 | github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= 111 | github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= 112 | github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= 113 | github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= 114 | github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= 115 | github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= 116 | github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= 117 | github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= 118 | github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= 119 | github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= 120 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 121 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 122 | github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= 123 | github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= 124 | github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= 125 | github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= 126 | github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= 127 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 128 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 129 | github.com/linkedin/goavro/v2 v2.13.1 h1:4qZ5M0QzQFDRqccsroJlgOJznqAS/TpdvXg55h429+I= 130 | github.com/linkedin/goavro/v2 v2.13.1/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= 131 | github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= 132 | github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= 133 | github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= 134 | github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= 135 | github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 136 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 137 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 138 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 139 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 140 | github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= 141 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 142 | github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= 143 | github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= 144 | github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= 145 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 146 | github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= 147 | github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= 148 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= 149 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= 150 | github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= 151 | github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= 152 | github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= 153 | github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= 154 | github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= 155 | github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= 156 | github.com/orlangure/gnomock v0.28.0 h1:3xlGullCJxjWjWGjEXUzvGH1tP6nXL0HY/lHt9w8oC8= 157 | github.com/orlangure/gnomock v0.28.0/go.mod h1:mPcZ4UaVkWrN5pdOkkNWtaWwiTA/4KMME9pH/IHg5Gc= 158 | github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 159 | github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= 160 | github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 161 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 162 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 163 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 164 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 165 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= 166 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 167 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 168 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 169 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 170 | github.com/segmentio/kafka-go v0.4.39 h1:75smaomhvkYRwtuOwqLsdhgCG30B82NsbdkdDfFbvrw= 171 | github.com/segmentio/kafka-go v0.4.39/go.mod h1:T0MLgygYvmqmBvC+s8aCcbVNfJN4znVne5j0Pzowp/Q= 172 | github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= 173 | github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= 174 | github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= 175 | github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= 176 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 177 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 178 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 179 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 180 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 181 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 182 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 183 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 184 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 185 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 186 | github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 187 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 188 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 189 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 190 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 191 | github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= 192 | github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= 193 | github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= 194 | github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= 195 | github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= 196 | github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= 197 | github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= 198 | github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= 199 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 200 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 201 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 202 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 203 | go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= 204 | go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 205 | go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= 206 | go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 207 | go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= 208 | go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= 209 | go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= 210 | go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= 211 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 212 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 213 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 214 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 215 | golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= 216 | golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= 217 | golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= 218 | golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= 219 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 220 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 221 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 222 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 223 | golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= 224 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 225 | golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 226 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 227 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 228 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 229 | golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 230 | golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 231 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 232 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 233 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 234 | golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= 235 | golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= 236 | golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= 237 | golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= 238 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 239 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 240 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 241 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 242 | golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= 243 | golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 244 | golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 245 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 246 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 247 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 248 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 249 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 250 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 251 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 252 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 253 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 254 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 255 | golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 256 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 257 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 258 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 259 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 260 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 261 | golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= 262 | golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 263 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 264 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 265 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 266 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 267 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 268 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 269 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 270 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 271 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 272 | golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= 273 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 274 | golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= 275 | golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 276 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 277 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 278 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 279 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 280 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 281 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 282 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 283 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 284 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 285 | google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= 286 | google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= 287 | google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= 288 | google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= 289 | google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= 290 | google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= 291 | google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002 h1:V7Da7qt0MkY3noVANIMVBk28nOnijADeOR3i5Hcvpj4= 292 | google.golang.org/protobuf v1.33.1-0.20240408130810-98873a205002/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 293 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 294 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 295 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 296 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 297 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 298 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 299 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 300 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 301 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 302 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 303 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 304 | gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= 305 | gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= 306 | -------------------------------------------------------------------------------- /godownloader.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | # Code generated by godownloader on 2021-12-09T12:34:28Z. DO NOT EDIT. 4 | # 5 | 6 | usage() { 7 | this=$1 8 | cat </dev/null 129 | } 130 | echoerr() { 131 | echo "$@" 1>&2 132 | } 133 | log_prefix() { 134 | echo "$0" 135 | } 136 | _logp=6 137 | log_set_priority() { 138 | _logp="$1" 139 | } 140 | log_priority() { 141 | if test -z "$1"; then 142 | echo "$_logp" 143 | return 144 | fi 145 | [ "$1" -le "$_logp" ] 146 | } 147 | log_tag() { 148 | case $1 in 149 | 0) echo "emerg" ;; 150 | 1) echo "alert" ;; 151 | 2) echo "crit" ;; 152 | 3) echo "err" ;; 153 | 4) echo "warning" ;; 154 | 5) echo "notice" ;; 155 | 6) echo "info" ;; 156 | 7) echo "debug" ;; 157 | *) echo "$1" ;; 158 | esac 159 | } 160 | log_debug() { 161 | log_priority 7 || return 0 162 | echoerr "$(log_prefix)" "$(log_tag 7)" "$@" 163 | } 164 | log_info() { 165 | log_priority 6 || return 0 166 | echoerr "$(log_prefix)" "$(log_tag 6)" "$@" 167 | } 168 | log_err() { 169 | log_priority 3 || return 0 170 | echoerr "$(log_prefix)" "$(log_tag 3)" "$@" 171 | } 172 | log_crit() { 173 | log_priority 2 || return 0 174 | echoerr "$(log_prefix)" "$(log_tag 2)" "$@" 175 | } 176 | uname_os() { 177 | os=$(uname -s | tr '[:upper:]' '[:lower:]') 178 | case "$os" in 179 | cygwin_nt*) os="windows" ;; 180 | mingw*) os="windows" ;; 181 | msys_nt*) os="windows" ;; 182 | esac 183 | echo "$os" 184 | } 185 | uname_arch() { 186 | arch=$(uname -m) 187 | case $arch in 188 | x86_64) arch="amd64" ;; 189 | x86) arch="386" ;; 190 | i686) arch="386" ;; 191 | i386) arch="386" ;; 192 | aarch64) arch="arm64" ;; 193 | armv5*) arch="armv5" ;; 194 | armv6*) arch="armv6" ;; 195 | armv7*) arch="armv7" ;; 196 | esac 197 | echo ${arch} 198 | } 199 | uname_os_check() { 200 | os=$(uname_os) 201 | case "$os" in 202 | darwin) return 0 ;; 203 | dragonfly) return 0 ;; 204 | freebsd) return 0 ;; 205 | linux) return 0 ;; 206 | android) return 0 ;; 207 | nacl) return 0 ;; 208 | netbsd) return 0 ;; 209 | openbsd) return 0 ;; 210 | plan9) return 0 ;; 211 | solaris) return 0 ;; 212 | windows) return 0 ;; 213 | esac 214 | log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" 215 | return 1 216 | } 217 | uname_arch_check() { 218 | arch=$(uname_arch) 219 | case "$arch" in 220 | 386) return 0 ;; 221 | amd64) return 0 ;; 222 | arm64) return 0 ;; 223 | armv5) return 0 ;; 224 | armv6) return 0 ;; 225 | armv7) return 0 ;; 226 | ppc64) return 0 ;; 227 | ppc64le) return 0 ;; 228 | mips) return 0 ;; 229 | mipsle) return 0 ;; 230 | mips64) return 0 ;; 231 | mips64le) return 0 ;; 232 | s390x) return 0 ;; 233 | amd64p32) return 0 ;; 234 | esac 235 | log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" 236 | return 1 237 | } 238 | untar() { 239 | tarball=$1 240 | case "${tarball}" in 241 | *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;; 242 | *.tar) tar --no-same-owner -xf "${tarball}" ;; 243 | *.zip) unzip "${tarball}" ;; 244 | *) 245 | log_err "untar unknown archive format for ${tarball}" 246 | return 1 247 | ;; 248 | esac 249 | } 250 | http_download_curl() { 251 | local_file=$1 252 | source_url=$2 253 | header=$3 254 | if [ -z "$header" ]; then 255 | code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") 256 | else 257 | code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") 258 | fi 259 | if [ "$code" != "200" ]; then 260 | log_debug "http_download_curl received HTTP status $code" 261 | return 1 262 | fi 263 | return 0 264 | } 265 | http_download_wget() { 266 | local_file=$1 267 | source_url=$2 268 | header=$3 269 | if [ -z "$header" ]; then 270 | wget -q -O "$local_file" "$source_url" 271 | else 272 | wget -q --header "$header" -O "$local_file" "$source_url" 273 | fi 274 | } 275 | http_download() { 276 | log_debug "http_download $2" 277 | if is_command curl; then 278 | http_download_curl "$@" 279 | return 280 | elif is_command wget; then 281 | http_download_wget "$@" 282 | return 283 | fi 284 | log_crit "http_download unable to find wget or curl" 285 | return 1 286 | } 287 | http_copy() { 288 | tmp=$(mktemp) 289 | http_download "${tmp}" "$1" "$2" || return 1 290 | body=$(cat "$tmp") 291 | rm -f "${tmp}" 292 | echo "$body" 293 | } 294 | github_release() { 295 | owner_repo=$1 296 | version=$2 297 | test -z "$version" && version="latest" 298 | giturl="https://github.com/${owner_repo}/releases/${version}" 299 | json=$(http_copy "$giturl" "Accept:application/json") 300 | test -z "$json" && return 1 301 | version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') 302 | test -z "$version" && return 1 303 | echo "$version" 304 | } 305 | hash_sha256() { 306 | TARGET=${1:-/dev/stdin} 307 | if is_command gsha256sum; then 308 | hash=$(gsha256sum "$TARGET") || return 1 309 | echo "$hash" | cut -d ' ' -f 1 310 | elif is_command sha256sum; then 311 | hash=$(sha256sum "$TARGET") || return 1 312 | echo "$hash" | cut -d ' ' -f 1 313 | elif is_command shasum; then 314 | hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 315 | echo "$hash" | cut -d ' ' -f 1 316 | elif is_command openssl; then 317 | hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 318 | echo "$hash" | cut -d ' ' -f a 319 | else 320 | log_crit "hash_sha256 unable to find command to compute sha-256 hash" 321 | return 1 322 | fi 323 | } 324 | hash_sha256_verify() { 325 | TARGET=$1 326 | checksums=$2 327 | if [ -z "$checksums" ]; then 328 | log_err "hash_sha256_verify checksum file not specified in arg2" 329 | return 1 330 | fi 331 | BASENAME=${TARGET##*/} 332 | want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) 333 | if [ -z "$want" ]; then 334 | log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" 335 | return 1 336 | fi 337 | got=$(hash_sha256 "$TARGET") 338 | if [ "$want" != "$got" ]; then 339 | log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" 340 | return 1 341 | fi 342 | } 343 | cat /dev/null <> r) 70 | k *= m 71 | h *= m 72 | h ^= k 73 | } 74 | 75 | switch length % 4 { 76 | case 3: 77 | h ^= int32(data[(length & ^3)+2]&0xff) << 16 78 | fallthrough 79 | case 2: 80 | h ^= int32(data[(length & ^3)+1]&0xff) << 8 81 | fallthrough 82 | case 1: 83 | h ^= int32(data[length & ^3] & 0xff) 84 | h *= m 85 | } 86 | 87 | h ^= int32(uint32(h) >> 13) 88 | h *= m 89 | h ^= int32(uint32(h) >> 15) 90 | 91 | return h 92 | } 93 | 94 | // toPositive converts i to positive number as per the original implementation in the JVM clients for Kafka. 95 | // See the original implementation: https://github.com/apache/kafka/blob/1.0.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L741 96 | func toPositive(i int32) int32 { 97 | return i & 0x7fffffff 98 | } 99 | -------------------------------------------------------------------------------- /pkg/proto/proto.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | 7 | "strings" 8 | 9 | "github.com/jhump/protoreflect/desc" 10 | "github.com/jhump/protoreflect/desc/protoparse" 11 | "github.com/jhump/protoreflect/dynamic" 12 | ) 13 | 14 | type DescriptorRegistry struct { 15 | descriptors []*desc.FileDescriptor 16 | } 17 | 18 | func NewDescriptorRegistry(importPaths []string, exclusions []string) (*DescriptorRegistry, error) { 19 | p := &protoparse.Parser{ 20 | ImportPaths: importPaths, 21 | } 22 | 23 | var protoFiles []string 24 | 25 | for _, importPath := range importPaths { 26 | err := filepath.Walk(importPath, func(path string, info os.FileInfo, err error) error { 27 | if info != nil && !info.IsDir() && strings.HasSuffix(path, ".proto") { 28 | protoFiles = append(protoFiles, path) 29 | } 30 | 31 | return nil 32 | }) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | } 38 | 39 | resolved, err := protoparse.ResolveFilenames(importPaths, protoFiles...) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | var deduped []string 45 | for _, i := range resolved { 46 | 47 | var exclusionFound bool 48 | for _, exclusion := range exclusions { 49 | if strings.HasPrefix(i, exclusion) { 50 | exclusionFound = true 51 | break 52 | } 53 | } 54 | 55 | if !exclusionFound { 56 | deduped = append(deduped, i) 57 | } 58 | } 59 | 60 | descs, err := p.ParseFiles(deduped...) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | return &DescriptorRegistry{descriptors: descs}, nil 66 | } 67 | 68 | func (d *DescriptorRegistry) MessageForType(_type string) *dynamic.Message { 69 | for _, descriptor := range d.descriptors { 70 | if messageDescriptor := descriptor.FindMessage(_type); messageDescriptor != nil { 71 | return dynamic.NewMessage(messageDescriptor) 72 | } 73 | } 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/streams/decoder.go: -------------------------------------------------------------------------------- 1 | package streams 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "math" 7 | ) 8 | 9 | var errInvalidArrayLength = errors.New("invalid array length") 10 | var errInvalidByteSliceLength = errors.New("invalid byteslice length") 11 | 12 | //var errInvalidByteSliceLengthType = errors.New("invalid byteslice length type") 13 | var errInvalidStringLength = errors.New("invalid string length") 14 | 15 | //var errInvalidSubsetSize = errors.New("invalid subset size") 16 | var errVarintOverflow = errors.New("varint overflow") 17 | var errInvalidBool = errors.New("invalid bool") 18 | 19 | // ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected 20 | // when requesting messages, since as an optimization the server is allowed to return a partial message at the end 21 | // of the message set. 22 | var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") 23 | 24 | type realDecoder struct { 25 | raw []byte 26 | off int 27 | } 28 | 29 | func NewDecoder(raw []byte) PacketDecoder { 30 | return &realDecoder{ 31 | raw: raw, 32 | } 33 | } 34 | 35 | type PacketDecoder interface { 36 | // Primitives 37 | getInt8() (int8, error) 38 | getInt16() (int16, error) 39 | getInt32() (int32, error) 40 | getInt64() (int64, error) 41 | getVarint() (int64, error) 42 | getArrayLength() (int, error) 43 | getBool() (bool, error) 44 | 45 | // Collections 46 | getBytes() ([]byte, error) 47 | getVarintBytes() ([]byte, error) 48 | getRawBytes(length int) ([]byte, error) 49 | getString() (string, error) 50 | getNullableString() (*string, error) 51 | getInt32Array() ([]int32, error) 52 | getInt64Array() ([]int64, error) 53 | getStringArray() ([]string, error) 54 | 55 | // Subsets 56 | remaining() int 57 | getSubset(length int) (PacketDecoder, error) 58 | peek(offset, length int) (PacketDecoder, error) // similar to getSubset, but it doesn't advance the offset 59 | 60 | } 61 | 62 | // primitives 63 | 64 | func (rd *realDecoder) getInt8() (int8, error) { 65 | if rd.remaining() < 1 { 66 | rd.off = len(rd.raw) 67 | return -1, ErrInsufficientData 68 | } 69 | tmp := int8(rd.raw[rd.off]) 70 | rd.off++ 71 | return tmp, nil 72 | } 73 | 74 | func (rd *realDecoder) getInt16() (int16, error) { 75 | if rd.remaining() < 2 { 76 | rd.off = len(rd.raw) 77 | return -1, ErrInsufficientData 78 | } 79 | tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) 80 | rd.off += 2 81 | return tmp, nil 82 | } 83 | 84 | func (rd *realDecoder) getInt32() (int32, error) { 85 | if rd.remaining() < 4 { 86 | rd.off = len(rd.raw) 87 | return -1, ErrInsufficientData 88 | } 89 | tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) 90 | rd.off += 4 91 | return tmp, nil 92 | } 93 | 94 | func (rd *realDecoder) getInt64() (int64, error) { 95 | if rd.remaining() < 8 { 96 | rd.off = len(rd.raw) 97 | return -1, ErrInsufficientData 98 | } 99 | tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) 100 | rd.off += 8 101 | return tmp, nil 102 | } 103 | 104 | func (rd *realDecoder) getVarint() (int64, error) { 105 | tmp, n := binary.Varint(rd.raw[rd.off:]) 106 | if n == 0 { 107 | rd.off = len(rd.raw) 108 | return -1, ErrInsufficientData 109 | } 110 | if n < 0 { 111 | rd.off -= n 112 | return -1, errVarintOverflow 113 | } 114 | rd.off += n 115 | return tmp, nil 116 | } 117 | 118 | func (rd *realDecoder) getArrayLength() (int, error) { 119 | if rd.remaining() < 4 { 120 | rd.off = len(rd.raw) 121 | return -1, ErrInsufficientData 122 | } 123 | tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))) 124 | rd.off += 4 125 | if tmp > rd.remaining() { 126 | rd.off = len(rd.raw) 127 | return -1, ErrInsufficientData 128 | } else if tmp > 2*math.MaxUint16 { 129 | return -1, errInvalidArrayLength 130 | } 131 | return tmp, nil 132 | } 133 | 134 | func (rd *realDecoder) getBool() (bool, error) { 135 | b, err := rd.getInt8() 136 | if err != nil || b == 0 { 137 | return false, err 138 | } 139 | if b != 1 { 140 | return false, errInvalidBool 141 | } 142 | return true, nil 143 | } 144 | 145 | // collections 146 | 147 | func (rd *realDecoder) getBytes() ([]byte, error) { 148 | tmp, err := rd.getInt32() 149 | if err != nil { 150 | return nil, err 151 | } 152 | if tmp == -1 { 153 | return nil, nil 154 | } 155 | 156 | return rd.getRawBytes(int(tmp)) 157 | } 158 | 159 | func (rd *realDecoder) getVarintBytes() ([]byte, error) { 160 | tmp, err := rd.getVarint() 161 | if err != nil { 162 | return nil, err 163 | } 164 | if tmp == -1 { 165 | return nil, nil 166 | } 167 | 168 | return rd.getRawBytes(int(tmp)) 169 | } 170 | 171 | func (rd *realDecoder) getStringLength() (int, error) { 172 | length, err := rd.getInt16() 173 | if err != nil { 174 | return 0, err 175 | } 176 | 177 | n := int(length) 178 | 179 | switch { 180 | case n < -1: 181 | return 0, errInvalidStringLength 182 | case n > rd.remaining(): 183 | rd.off = len(rd.raw) 184 | return 0, ErrInsufficientData 185 | } 186 | 187 | return n, nil 188 | } 189 | 190 | func (rd *realDecoder) getString() (string, error) { 191 | n, err := rd.getStringLength() 192 | if err != nil || n == -1 { 193 | return "", err 194 | } 195 | 196 | tmpStr := string(rd.raw[rd.off : rd.off+n]) 197 | rd.off += n 198 | return tmpStr, nil 199 | } 200 | 201 | func (rd *realDecoder) getNullableString() (*string, error) { 202 | n, err := rd.getStringLength() 203 | if err != nil || n == -1 { 204 | return nil, err 205 | } 206 | 207 | tmpStr := string(rd.raw[rd.off : rd.off+n]) 208 | rd.off += n 209 | return &tmpStr, err 210 | } 211 | 212 | func (rd *realDecoder) getInt32Array() ([]int32, error) { 213 | if rd.remaining() < 4 { 214 | rd.off = len(rd.raw) 215 | return nil, ErrInsufficientData 216 | } 217 | n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) 218 | rd.off += 4 219 | 220 | if rd.remaining() < 4*n { 221 | rd.off = len(rd.raw) 222 | return nil, ErrInsufficientData 223 | } 224 | 225 | if n == 0 { 226 | return nil, nil 227 | } 228 | 229 | if n < 0 { 230 | return nil, errInvalidArrayLength 231 | } 232 | 233 | ret := make([]int32, n) 234 | for i := range ret { 235 | ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) 236 | rd.off += 4 237 | } 238 | return ret, nil 239 | } 240 | 241 | func (rd *realDecoder) getInt64Array() ([]int64, error) { 242 | if rd.remaining() < 4 { 243 | rd.off = len(rd.raw) 244 | return nil, ErrInsufficientData 245 | } 246 | n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) 247 | rd.off += 4 248 | 249 | if rd.remaining() < 8*n { 250 | rd.off = len(rd.raw) 251 | return nil, ErrInsufficientData 252 | } 253 | 254 | if n == 0 { 255 | return nil, nil 256 | } 257 | 258 | if n < 0 { 259 | return nil, errInvalidArrayLength 260 | } 261 | 262 | ret := make([]int64, n) 263 | for i := range ret { 264 | ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) 265 | rd.off += 8 266 | } 267 | return ret, nil 268 | } 269 | 270 | func (rd *realDecoder) getStringArray() ([]string, error) { 271 | if rd.remaining() < 4 { 272 | rd.off = len(rd.raw) 273 | return nil, ErrInsufficientData 274 | } 275 | n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) 276 | rd.off += 4 277 | 278 | if n == 0 { 279 | return nil, nil 280 | } 281 | 282 | if n < 0 { 283 | return nil, errInvalidArrayLength 284 | } 285 | 286 | ret := make([]string, n) 287 | for i := range ret { 288 | str, err := rd.getString() 289 | if err != nil { 290 | return nil, err 291 | } 292 | 293 | ret[i] = str 294 | } 295 | return ret, nil 296 | } 297 | 298 | // subsets 299 | 300 | func (rd *realDecoder) remaining() int { 301 | return len(rd.raw) - rd.off 302 | } 303 | 304 | func (rd *realDecoder) getSubset(length int) (PacketDecoder, error) { 305 | buf, err := rd.getRawBytes(length) 306 | if err != nil { 307 | return nil, err 308 | } 309 | return &realDecoder{raw: buf}, nil 310 | } 311 | 312 | func (rd *realDecoder) getRawBytes(length int) ([]byte, error) { 313 | if length < 0 { 314 | return nil, errInvalidByteSliceLength 315 | } else if length > rd.remaining() { 316 | rd.off = len(rd.raw) 317 | return nil, ErrInsufficientData 318 | } 319 | 320 | start := rd.off 321 | rd.off += length 322 | return rd.raw[start:rd.off], nil 323 | } 324 | 325 | func (rd *realDecoder) peek(offset, length int) (PacketDecoder, error) { 326 | if rd.remaining() < offset+length { 327 | return nil, ErrInsufficientData 328 | } 329 | off := rd.off + offset 330 | return &realDecoder{raw: rd.raw[off : off+length]}, nil 331 | } 332 | -------------------------------------------------------------------------------- /pkg/streams/subscription_info.go: -------------------------------------------------------------------------------- 1 | package streams 2 | 3 | type SubscriptionInfo struct { 4 | Version int32 5 | UUID []byte // 16-byte UUID 6 | ProcessID string 7 | PrevTasks []TaskID 8 | StandbyTasks []TaskID 9 | UserEndpoint string 10 | } 11 | 12 | // Support version 1+2 13 | func (s *SubscriptionInfo) Decode(pd PacketDecoder) (err error) { 14 | s.Version, err = pd.getInt32() 15 | if err != nil { 16 | return err 17 | } 18 | 19 | s.UUID, err = pd.getRawBytes(16) 20 | if err != nil { 21 | return err 22 | } 23 | 24 | numPrevs, err := pd.getInt32() 25 | if err != nil { 26 | return err 27 | } 28 | 29 | for i := 0; i < int(numPrevs); i++ { 30 | t := TaskID{} 31 | 32 | t.TopicGroupID, err = pd.getInt32() 33 | if err != nil { 34 | return err 35 | } 36 | 37 | t.Partition, err = pd.getInt32() 38 | if err != nil { 39 | return err 40 | } 41 | 42 | s.PrevTasks = append(s.PrevTasks, t) 43 | } 44 | 45 | numStandby, err := pd.getInt32() 46 | if err != nil { 47 | return err 48 | } 49 | 50 | for i := 0; i < int(numStandby); i++ { 51 | t := TaskID{} 52 | 53 | t.TopicGroupID, err = pd.getInt32() 54 | if err != nil { 55 | return err 56 | } 57 | 58 | t.Partition, err = pd.getInt32() 59 | if err != nil { 60 | return err 61 | } 62 | 63 | s.StandbyTasks = append(s.StandbyTasks, t) 64 | } 65 | 66 | userEndpointBytes, err := pd.getBytes() 67 | if err != nil { 68 | return err 69 | } 70 | 71 | s.UserEndpoint = string(userEndpointBytes) 72 | 73 | return nil 74 | } 75 | 76 | type TaskID struct { 77 | TopicGroupID int32 78 | Partition int32 79 | } 80 | --------------------------------------------------------------------------------