├── .dockerignore ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CODEOWNERS ├── Dockerfile ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── cmd └── topicctl │ ├── main.go │ └── subcmd │ ├── apply.go │ ├── bootstrap.go │ ├── check.go │ ├── create.go │ ├── delete.go │ ├── get.go │ ├── rebalance.go │ ├── repl.go │ ├── reset.go │ ├── root.go │ ├── shared.go │ ├── tail.go │ └── tester.go ├── docker-compose-auth.yml ├── docker-compose.yml ├── examples ├── auth │ ├── acls │ │ └── acl-default.yaml │ ├── certs │ │ ├── ca.crt │ │ ├── ca.key │ │ ├── client.crt │ │ ├── client.key │ │ ├── kafka.keystore.jks │ │ └── kafka.truststore.jks │ ├── cluster.yaml │ └── topics │ │ └── topic-default.yaml ├── local-cluster │ ├── cluster.yaml │ └── topics │ │ ├── topic-default.yaml │ │ ├── topic-in-rack.yaml │ │ ├── topic-static-in-rack.yaml │ │ └── topic-static.yaml └── msk │ ├── cluster.yaml │ └── topics │ └── topic-default.yaml ├── go.mod ├── go.sum ├── pkg ├── acl │ ├── acl.go │ └── acl_test.go ├── admin │ ├── brokerclient.go │ ├── brokerclient_test.go │ ├── client.go │ ├── connector.go │ ├── format.go │ ├── support.go │ ├── throttles.go │ ├── throttles_test.go │ ├── types.go │ ├── types_test.go │ ├── zkclient.go │ └── zkclient_test.go ├── apply │ ├── apply.go │ ├── apply_test.go │ ├── assigners │ │ ├── assigner.go │ │ ├── balanced_leader_test.go │ │ ├── balancer_leader.go │ │ ├── cross_rack.go │ │ ├── cross_rack_test.go │ │ ├── evaluate.go │ │ ├── evaluate_test.go │ │ ├── single_rack.go │ │ ├── single_rack_test.go │ │ ├── static.go │ │ ├── static_single_rack.go │ │ ├── static_single_rack_test.go │ │ ├── static_test.go │ │ └── testing.go │ ├── extenders │ │ ├── balanced.go │ │ ├── balanced_test.go │ │ ├── extender.go │ │ ├── static.go │ │ ├── static_test.go │ │ └── testing.go │ ├── format.go │ ├── pickers │ │ ├── cluster_use.go │ │ ├── cluster_use_test.go │ │ ├── lowest_index.go │ │ ├── lowest_index_test.go │ │ ├── picker.go │ │ ├── randomize_test.go │ │ ├── randomized.go │ │ └── testing.go │ └── rebalancers │ │ ├── frequency.go │ │ ├── frequency_test.go │ │ ├── rebalancer.go │ │ └── testing.go ├── check │ ├── check.go │ ├── check_test.go │ ├── format.go │ └── result.go ├── cli │ ├── cli.go │ ├── command.go │ ├── command_test.go │ └── repl.go ├── config │ ├── acl.go │ ├── acl_test.go │ ├── cluster.go │ ├── cluster_test.go │ ├── load.go │ ├── load_test.go │ ├── meta.go │ ├── meta_test.go │ ├── settings.go │ ├── settings_test.go │ ├── testdata │ │ └── test-cluster │ │ │ ├── acls │ │ │ ├── acl-test-invalid.yaml │ │ │ ├── acl-test-multi.yaml │ │ │ ├── acl-test-no-match.yaml │ │ │ └── acl-test.yaml │ │ │ ├── cluster-extra-fields.yaml │ │ │ ├── cluster-invalid.yaml │ │ │ ├── cluster.yaml │ │ │ └── topics │ │ │ ├── topic-test-invalid.yaml │ │ │ ├── topic-test-multi.yaml │ │ │ ├── topic-test-no-match.yaml │ │ │ └── topic-test.yaml │ ├── topic.go │ └── topic_test.go ├── create │ ├── acl.go │ └── acl_test.go ├── groups │ ├── format.go │ ├── groups.go │ ├── groups_test.go │ └── types.go ├── messages │ ├── bounds.go │ ├── bounds_test.go │ ├── format.go │ ├── tail.go │ └── tail_test.go ├── util │ ├── confirm.go │ ├── durations.go │ ├── durations_test.go │ ├── error.go │ ├── maps.go │ ├── progress.go │ ├── slices.go │ ├── strings.go │ ├── strings_test.go │ ├── terminal.go │ └── testing.go ├── version │ └── version.go └── zk │ ├── client.go │ ├── client_test.go │ ├── lock.go │ ├── logger.go │ └── testing_util.go └── scripts └── set_up_net_alias.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | build 2 | Dockerfile 3 | .buildkite 4 | .circleci 5 | .git 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | vendor/ 16 | 17 | build/ 18 | 19 | .vscode 20 | 21 | # Emacs backups 22 | *~ -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Order is important; the last matching pattern takes the most precedence. 2 | 3 | # These owners will be the default owners for everything in the repo. 4 | * @segmentio/data-platform 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder 2 | ENV SRC github.com/segmentio/topicctl 3 | ENV CGO_ENABLED=0 4 | 5 | ARG VERSION 6 | RUN test -n "${VERSION}" 7 | 8 | COPY . /go/src/${SRC} 9 | 10 | ARG TARGETOS TARGETARCH 11 | RUN cd /go/src/${SRC} && \ 12 | GOOS=$TARGETOS GOARCH=$TARGETARCH make topicctl VERSION=${VERSION} 13 | 14 | FROM scratch 15 | 16 | COPY --from=builder \ 17 | /go/src/github.com/segmentio/topicctl/build/topicctl \ 18 | /bin/topicctl 19 | ENTRYPOINT ["/bin/topicctl"] 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Segment.io, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ifndef VERSION 2 | VERSION := $(shell git describe --tags --always --dirty="-dev") 3 | endif 4 | 5 | PKG := ./cmd/topicctl 6 | BIN := build/topicctl 7 | 8 | LDFLAGS := -ldflags='-X "main.version=$(VERSION)"' 9 | 10 | .PHONY: topicctl 11 | topicctl: 12 | go build -o $(BIN) $(LDFLAGS) $(PKG) 13 | 14 | .PHONY: install 15 | install: 16 | go install $(LDFLAGS) $(PKG) 17 | 18 | .PHONY: vet 19 | vet: 20 | go vet ./... 21 | 22 | .PHONY: test 23 | test: vet 24 | go test -count 1 -p 1 ./... 25 | 26 | .PHONY: test-v2 27 | test-v2: vet 28 | KAFKA_TOPICS_TEST_BROKER_ADMIN=1 go test -count 1 -p 1 ./... 29 | 30 | .PHONY: clean 31 | clean: 32 | rm -Rf build 33 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | topicctl 2 | Copyright 2020 Segment.io, Inc. 3 | Released under the MIT License. See LICENSE for license terms and conditions. 4 | -------------------------------------------------------------------------------- /cmd/topicctl/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/segmentio/topicctl/cmd/topicctl/subcmd" 5 | ) 6 | 7 | var ( 8 | // Version is the version of this binary. Overridden as part of the build process. 9 | Version = "dev" 10 | ) 11 | 12 | func main() { 13 | subcmd.Execute(Version) 14 | } 15 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/bootstrap.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/segmentio/topicctl/pkg/cli" 7 | "github.com/segmentio/topicctl/pkg/config" 8 | log "github.com/sirupsen/logrus" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | var bootstrapCmd = &cobra.Command{ 13 | Use: "bootstrap [topics]", 14 | Short: "bootstrap topic configs from existing topic(s) in a cluster", 15 | RunE: bootstrapRun, 16 | } 17 | 18 | type bootstrapCmdConfig struct { 19 | matchRegexp string 20 | excludeRegexp string 21 | outputDir string 22 | overwrite bool 23 | 24 | allowInternalTopics bool 25 | 26 | shared sharedOptions 27 | } 28 | 29 | var bootstrapConfig bootstrapCmdConfig 30 | 31 | func init() { 32 | bootstrapCmd.Flags().StringVar( 33 | &bootstrapConfig.matchRegexp, 34 | "match", 35 | ".*", 36 | "Match regexp", 37 | ) 38 | bootstrapCmd.Flags().StringVar( 39 | &bootstrapConfig.excludeRegexp, 40 | "exclude", 41 | ".^", 42 | "Exclude regexp", 43 | ) 44 | bootstrapCmd.Flags().StringVarP( 45 | &bootstrapConfig.outputDir, 46 | "output", 47 | "o", 48 | "", 49 | "Output directory", 50 | ) 51 | bootstrapCmd.Flags().BoolVar( 52 | &bootstrapConfig.overwrite, 53 | "overwrite", 54 | false, 55 | "Overwrite existing configs in output directory", 56 | ) 57 | bootstrapCmd.Flags().BoolVar( 58 | &bootstrapConfig.allowInternalTopics, 59 | "allow-internal-topics", 60 | false, 61 | "Include topics that start with __ (typically these are internal topics)") 62 | 63 | addSharedConfigOnlyFlags(bootstrapCmd, &bootstrapConfig.shared) 64 | bootstrapCmd.MarkFlagRequired("cluster-config") 65 | RootCmd.AddCommand(bootstrapCmd) 66 | } 67 | 68 | func bootstrapRun(cmd *cobra.Command, args []string) error { 69 | ctx, cancel := context.WithCancel(context.Background()) 70 | defer cancel() 71 | 72 | clusterConfig, err := config.LoadClusterFile( 73 | bootstrapConfig.shared.clusterConfig, 74 | bootstrapConfig.shared.expandEnv, 75 | ) 76 | if err != nil { 77 | return err 78 | } 79 | adminClient, err := clusterConfig.NewAdminClient( 80 | ctx, 81 | nil, 82 | config.AdminClientOpts{ 83 | ReadOnly: true, 84 | UsernameOverride: bootstrapConfig.shared.saslUsername, 85 | PasswordOverride: bootstrapConfig.shared.saslPassword, 86 | SecretsManagerArnOverride: bootstrapConfig.shared.saslSecretsManagerArn, 87 | }, 88 | ) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | cliRunner := cli.NewCLIRunner(adminClient, log.Infof, false) 94 | return cliRunner.BootstrapTopics( 95 | ctx, 96 | args, 97 | clusterConfig, 98 | bootstrapConfig.matchRegexp, 99 | bootstrapConfig.excludeRegexp, 100 | bootstrapConfig.outputDir, 101 | bootstrapConfig.overwrite, 102 | bootstrapConfig.allowInternalTopics, 103 | ) 104 | } 105 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/check.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/segmentio/topicctl/pkg/admin" 10 | "github.com/segmentio/topicctl/pkg/check" 11 | "github.com/segmentio/topicctl/pkg/cli" 12 | "github.com/segmentio/topicctl/pkg/config" 13 | log "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | ) 16 | 17 | var checkCmd = &cobra.Command{ 18 | Use: "check [topic configs]", 19 | Short: "check that configs are valid and (optionally) match cluster state", 20 | RunE: checkRun, 21 | } 22 | 23 | type checkCmdConfig struct { 24 | checkLeaders bool 25 | pathPrefix string 26 | validateOnly bool 27 | 28 | shared sharedOptions 29 | } 30 | 31 | var checkConfig checkCmdConfig 32 | 33 | func init() { 34 | checkCmd.Flags().StringVar( 35 | &checkConfig.pathPrefix, 36 | "path-prefix", 37 | os.Getenv("TOPICCTL_APPLY_PATH_PREFIX"), 38 | "Prefix for topic config paths", 39 | ) 40 | checkCmd.Flags().BoolVar( 41 | &checkConfig.checkLeaders, 42 | "check-leaders", 43 | false, 44 | "Check leaders", 45 | ) 46 | checkCmd.Flags().BoolVar( 47 | &checkConfig.validateOnly, 48 | "validate-only", 49 | false, 50 | "Validate configs only, without connecting to cluster", 51 | ) 52 | 53 | addSharedConfigOnlyFlags(checkCmd, &checkConfig.shared) 54 | RootCmd.AddCommand(checkCmd) 55 | } 56 | 57 | func checkRun(cmd *cobra.Command, args []string) error { 58 | ctx := context.Background() 59 | 60 | // Keep a cache of the admin clients with the cluster config path as the key 61 | adminClients := map[string]admin.Client{} 62 | 63 | defer func() { 64 | for _, adminClient := range adminClients { 65 | adminClient.Close() 66 | } 67 | }() 68 | 69 | matchCount := 0 70 | okCount := 0 71 | 72 | for _, arg := range args { 73 | if checkConfig.pathPrefix != "" && !filepath.IsAbs(arg) { 74 | arg = filepath.Join(checkConfig.pathPrefix, arg) 75 | } 76 | 77 | matches, err := filepath.Glob(arg) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | for _, match := range matches { 83 | matchCount++ 84 | 85 | ok, err := checkTopicFile(ctx, match, adminClients) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | if ok { 91 | okCount++ 92 | } 93 | } 94 | } 95 | 96 | if matchCount == 0 { 97 | return fmt.Errorf("No topic configs match the provided args (%+v)", args) 98 | } else if matchCount > okCount { 99 | return fmt.Errorf( 100 | "Check failed for %d/%d topic configs", 101 | matchCount-okCount, 102 | matchCount, 103 | ) 104 | } 105 | 106 | return nil 107 | } 108 | 109 | func checkTopicFile( 110 | ctx context.Context, 111 | topicConfigPath string, 112 | adminClients map[string]admin.Client, 113 | ) (bool, error) { 114 | clusterConfigPath, err := clusterConfigForTopicCheck(topicConfigPath) 115 | if err != nil { 116 | return false, err 117 | } 118 | 119 | clusterConfig, err := config.LoadClusterFile(clusterConfigPath, checkConfig.shared.expandEnv) 120 | if err != nil { 121 | return false, err 122 | } 123 | 124 | topicConfigs, err := config.LoadTopicsFile(topicConfigPath) 125 | if err != nil { 126 | return false, err 127 | } 128 | 129 | var adminClient admin.Client 130 | 131 | numRacks := -1 132 | 133 | if !checkConfig.validateOnly { 134 | var ok bool 135 | adminClient, ok = adminClients[clusterConfigPath] 136 | if !ok { 137 | adminClient, err = clusterConfig.NewAdminClient( 138 | ctx, 139 | nil, 140 | config.AdminClientOpts{ 141 | ReadOnly: true, 142 | UsernameOverride: checkConfig.shared.saslUsername, 143 | PasswordOverride: checkConfig.shared.saslPassword, 144 | SecretsManagerArnOverride: checkConfig.shared.saslSecretsManagerArn, 145 | }, 146 | ) 147 | if err != nil { 148 | return false, err 149 | } 150 | adminClients[clusterConfigPath] = adminClient 151 | numRacks, err = countRacks(ctx, adminClient) 152 | if err != nil { 153 | return false, err 154 | } 155 | } 156 | } 157 | 158 | cliRunner := cli.NewCLIRunner(adminClient, log.Infof, false) 159 | 160 | for _, topicConfig := range topicConfigs { 161 | topicConfig.SetDefaults() 162 | log.Debugf( 163 | "Processing topic %s in config %s with cluster config %s", 164 | topicConfig.Meta.Name, 165 | topicConfigPath, 166 | clusterConfigPath, 167 | ) 168 | 169 | topicCheckConfig := check.CheckConfig{ 170 | AdminClient: adminClient, 171 | CheckLeaders: checkConfig.checkLeaders, 172 | ClusterConfig: clusterConfig, 173 | NumRacks: numRacks, 174 | TopicConfig: topicConfig, 175 | ValidateOnly: checkConfig.validateOnly, 176 | } 177 | result, err := cliRunner.CheckTopic( 178 | ctx, 179 | topicCheckConfig, 180 | ) 181 | if !result || err != nil { 182 | return result, err 183 | } 184 | } 185 | 186 | return true, nil 187 | } 188 | 189 | func clusterConfigForTopicCheck(topicConfigPath string) (string, error) { 190 | if checkConfig.shared.clusterConfig != "" { 191 | return checkConfig.shared.clusterConfig, nil 192 | } 193 | 194 | return filepath.Abs( 195 | filepath.Join( 196 | filepath.Dir(topicConfigPath), 197 | "..", 198 | "cluster.yaml", 199 | ), 200 | ) 201 | } 202 | 203 | func countRacks(ctx context.Context, c admin.Client) (int, error) { 204 | ids, err := c.GetBrokerIDs(ctx) 205 | if err != nil { 206 | return 0, err 207 | } 208 | brokers, err := c.GetBrokers(ctx, ids) 209 | if err != nil { 210 | return 0, err 211 | } 212 | racks := make(map[string]struct{}, len(brokers)) 213 | for i := range brokers { 214 | racks[brokers[i].Rack] = struct{}{} 215 | } 216 | return len(racks), nil 217 | } 218 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/create.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "path/filepath" 9 | "syscall" 10 | 11 | "github.com/segmentio/topicctl/pkg/acl" 12 | "github.com/segmentio/topicctl/pkg/admin" 13 | "github.com/segmentio/topicctl/pkg/cli" 14 | "github.com/segmentio/topicctl/pkg/config" 15 | log "github.com/sirupsen/logrus" 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | var createCmd = &cobra.Command{ 20 | Use: "create [resource type]", 21 | Short: "creates one or more resources", 22 | PersistentPreRunE: createPreRun, 23 | } 24 | 25 | type createCmdConfig struct { 26 | dryRun bool 27 | pathPrefix string 28 | skipConfirm bool 29 | 30 | shared sharedOptions 31 | } 32 | 33 | var createConfig createCmdConfig 34 | 35 | func init() { 36 | createCmd.PersistentFlags().BoolVar( 37 | &createConfig.dryRun, 38 | "dry-run", 39 | false, 40 | "Do a dry-run", 41 | ) 42 | createCmd.PersistentFlags().StringVar( 43 | &createConfig.pathPrefix, 44 | "path-prefix", 45 | os.Getenv("TOPICCTL_ACL_PATH_PREFIX"), 46 | "Prefix for ACL config paths", 47 | ) 48 | createCmd.PersistentFlags().BoolVar( 49 | &createConfig.skipConfirm, 50 | "skip-confirm", 51 | false, 52 | "Skip confirmation prompts during creation process", 53 | ) 54 | 55 | addSharedFlags(createCmd, &createConfig.shared) 56 | createCmd.AddCommand( 57 | createACLsCmd(), 58 | ) 59 | RootCmd.AddCommand(createCmd) 60 | } 61 | 62 | func createPreRun(cmd *cobra.Command, args []string) error { 63 | if err := RootCmd.PersistentPreRunE(cmd, args); err != nil { 64 | return err 65 | } 66 | return createConfig.shared.validate() 67 | } 68 | 69 | func createACLsCmd() *cobra.Command { 70 | cmd := &cobra.Command{ 71 | Use: "acls [acl configs]", 72 | Short: "creates ACLs from configuration files", 73 | Args: cobra.MinimumNArgs(1), 74 | RunE: createACLRun, 75 | PreRunE: createPreRun, 76 | } 77 | 78 | return cmd 79 | } 80 | 81 | func createACLRun(cmd *cobra.Command, args []string) error { 82 | ctx, cancel := context.WithCancel(context.Background()) 83 | defer cancel() 84 | 85 | sigChan := make(chan os.Signal, 1) 86 | signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) 87 | go func() { 88 | <-sigChan 89 | cancel() 90 | }() 91 | 92 | // Keep a cache of the admin clients with the cluster config path as the key 93 | adminClients := map[string]admin.Client{} 94 | 95 | defer func() { 96 | for _, adminClient := range adminClients { 97 | adminClient.Close() 98 | } 99 | }() 100 | 101 | matchCount := 0 102 | 103 | for _, arg := range args { 104 | if createConfig.pathPrefix != "" && !filepath.IsAbs(arg) { 105 | arg = filepath.Join(createConfig.pathPrefix, arg) 106 | } 107 | 108 | matches, err := filepath.Glob(arg) 109 | if err != nil { 110 | return err 111 | } 112 | 113 | for _, match := range matches { 114 | matchCount++ 115 | if err := createACL(ctx, match, adminClients); err != nil { 116 | return err 117 | } 118 | } 119 | } 120 | 121 | if matchCount == 0 { 122 | return fmt.Errorf("No ACL configs match the provided args (%+v)", args) 123 | } 124 | 125 | return nil 126 | } 127 | 128 | func createACL( 129 | ctx context.Context, 130 | aclConfigPath string, 131 | adminClients map[string]admin.Client, 132 | ) error { 133 | clusterConfigPath, err := clusterConfigForACLCreate(aclConfigPath) 134 | if err != nil { 135 | return err 136 | } 137 | 138 | aclConfigs, err := config.LoadACLsFile(aclConfigPath) 139 | if err != nil { 140 | return err 141 | } 142 | 143 | clusterConfig, err := config.LoadClusterFile(clusterConfigPath, createConfig.shared.expandEnv) 144 | if err != nil { 145 | return err 146 | } 147 | 148 | adminClient, ok := adminClients[clusterConfigPath] 149 | if !ok { 150 | adminClient, err = clusterConfig.NewAdminClient( 151 | ctx, 152 | nil, 153 | config.AdminClientOpts{ 154 | ReadOnly: createConfig.dryRun, 155 | UsernameOverride: createConfig.shared.saslUsername, 156 | PasswordOverride: createConfig.shared.saslPassword, 157 | SecretsManagerArnOverride: createConfig.shared.saslSecretsManagerArn, 158 | }, 159 | ) 160 | if err != nil { 161 | return err 162 | } 163 | adminClients[clusterConfigPath] = adminClient 164 | } 165 | 166 | cliRunner := cli.NewCLIRunner(adminClient, log.Infof, false) 167 | 168 | for _, aclConfig := range aclConfigs { 169 | aclConfig.SetDefaults() 170 | log.Infof( 171 | "Processing ACL %s in config %s with cluster config %s", 172 | aclConfig.Meta.Name, 173 | aclConfigPath, 174 | clusterConfigPath, 175 | ) 176 | 177 | aclAdminConfig := acl.ACLAdminConfig{ 178 | DryRun: createConfig.dryRun, 179 | SkipConfirm: createConfig.skipConfirm, 180 | ACLConfig: aclConfig, 181 | ClusterConfig: clusterConfig, 182 | } 183 | 184 | if err := cliRunner.CreateACL(ctx, aclAdminConfig); err != nil { 185 | return err 186 | } 187 | } 188 | 189 | return nil 190 | } 191 | 192 | func clusterConfigForACLCreate(aclConfigPath string) (string, error) { 193 | if createConfig.shared.clusterConfig != "" { 194 | return createConfig.shared.clusterConfig, nil 195 | } 196 | 197 | return filepath.Abs( 198 | filepath.Join( 199 | filepath.Dir(aclConfigPath), 200 | "..", 201 | "cluster.yaml", 202 | ), 203 | ) 204 | } 205 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/delete.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/aws/aws-sdk-go/aws/session" 8 | "github.com/segmentio/kafka-go" 9 | "github.com/segmentio/topicctl/pkg/acl" 10 | "github.com/segmentio/topicctl/pkg/admin" 11 | "github.com/segmentio/topicctl/pkg/cli" 12 | log "github.com/sirupsen/logrus" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var deleteCmd = &cobra.Command{ 17 | Use: "delete [resource type]", 18 | Short: "delete instances of a particular type", 19 | Long: strings.Join( 20 | []string{ 21 | "Deletes instances of a particular type.", 22 | }, 23 | "\n", 24 | ), 25 | PersistentPreRunE: deletePreRun, 26 | } 27 | 28 | type deleteCmdConfig struct { 29 | dryRun bool 30 | 31 | shared sharedOptions 32 | } 33 | 34 | var deleteConfig deleteCmdConfig 35 | 36 | func init() { 37 | deleteCmd.PersistentFlags().BoolVar( 38 | &deleteConfig.dryRun, 39 | "dry-run", 40 | false, 41 | "Do a dry-run", 42 | ) 43 | 44 | addSharedFlags(deleteCmd, &deleteConfig.shared) 45 | deleteCmd.AddCommand( 46 | deleteACLCmd(), 47 | ) 48 | RootCmd.AddCommand(deleteCmd) 49 | } 50 | 51 | func deletePreRun(cmd *cobra.Command, args []string) error { 52 | return deleteConfig.shared.validate() 53 | } 54 | 55 | var deleteACLsConfig = aclsCmdConfig{ 56 | // This was added in a later version of Kafka, so we provide a default 57 | // value to avoid breaking existing users by making this required. 58 | resourcePatternType: admin.PatternType(kafka.PatternTypeAny), 59 | } 60 | 61 | func deleteACLCmd() *cobra.Command { 62 | cmd := &cobra.Command{ 63 | Use: "acls [flags]", 64 | Short: "Delete ACLs. Requires providing flags to target ACLs for deletion.", 65 | Args: cobra.NoArgs, 66 | Example: `Delete read acls for topic my-topic, user 'User:default', and host '*' 67 | $ topicctl delete acls --resource-type topic --resource-pattern-type literal --resource-name my-topic --principal 'User:default' --host '*' --operation read --permission-type allow 68 | `, 69 | RunE: func(cmd *cobra.Command, args []string) error { 70 | ctx := context.Background() 71 | sess := session.Must(session.NewSession()) 72 | 73 | adminClient, err := deleteConfig.shared.getAdminClient(ctx, sess, deleteConfig.dryRun) 74 | if err != nil { 75 | return err 76 | } 77 | defer adminClient.Close() 78 | 79 | cliRunner := cli.NewCLIRunner(adminClient, log.Infof, !noSpinner) 80 | 81 | filter := kafka.DeleteACLsFilter{ 82 | ResourceTypeFilter: kafka.ResourceType(deleteACLsConfig.resourceType), 83 | ResourceNameFilter: deleteACLsConfig.resourceNameFilter, 84 | ResourcePatternTypeFilter: kafka.PatternType(deleteACLsConfig.resourcePatternType), 85 | PrincipalFilter: deleteACLsConfig.principalFilter, 86 | HostFilter: deleteACLsConfig.hostFilter, 87 | Operation: kafka.ACLOperationType(deleteACLsConfig.operationType), 88 | PermissionType: kafka.ACLPermissionType(deleteACLsConfig.permissionType), 89 | } 90 | 91 | aclAdminConfig := acl.ACLAdminConfig{ 92 | // Omit fields we don't need for deletes 93 | DryRun: deleteConfig.dryRun, 94 | // Deletes cannot be skipped 95 | SkipConfirm: false, 96 | } 97 | 98 | return cliRunner.DeleteACL(ctx, aclAdminConfig, filter) 99 | }, 100 | } 101 | cmd.Flags().StringVar( 102 | &deleteACLsConfig.hostFilter, 103 | "host", 104 | "", 105 | `The host to filter on. (e.g. 198.51.100.0) (Required)`, 106 | ) 107 | cmd.MarkFlagRequired("host") 108 | 109 | cmd.Flags().Var( 110 | &deleteACLsConfig.operationType, 111 | "operation", 112 | `The operation that is being allowed or denied to filter on. allowed: [any, all, read, write, create, delete, alter, describe, clusteraction, describeconfigs, alterconfigs, idempotentwrite] (Required)`, 113 | ) 114 | cmd.MarkFlagRequired("operation") 115 | 116 | cmd.Flags().Var( 117 | &deleteACLsConfig.permissionType, 118 | "permission-type", 119 | `The permission type to filter on. allowed: [any, allow, deny] (Required)`, 120 | ) 121 | cmd.MarkFlagRequired("permission-type") 122 | 123 | cmd.Flags().StringVar( 124 | &deleteACLsConfig.principalFilter, 125 | "principal", 126 | "", 127 | `The principal to filter on in principalType:name format (e.g. User:alice). (Required)`, 128 | ) 129 | cmd.MarkFlagRequired("principal") 130 | 131 | cmd.Flags().StringVar( 132 | &deleteACLsConfig.resourceNameFilter, 133 | "resource-name", 134 | "", 135 | `The resource name to filter on. (e.g. my-topic) (Required)`, 136 | ) 137 | cmd.MarkFlagRequired("resource-name") 138 | 139 | cmd.Flags().Var( 140 | &deleteACLsConfig.resourcePatternType, 141 | "resource-pattern-type", 142 | `The type of the resource pattern or filter. allowed: [any, match, literal, prefixed]. "any" will match any pattern type (literal or prefixed), but will match the resource name exactly, where as "match" will perform pattern matching to list all acls that affect the supplied resource(s).`, 143 | ) 144 | 145 | cmd.Flags().Var( 146 | &deleteACLsConfig.resourceType, 147 | "resource-type", 148 | `The type of resource to filter on. allowed: [any, topic, group, cluster, transactionalid, delegationtoken] (Required)`, 149 | ) 150 | cmd.MarkFlagRequired("resource-type") 151 | return cmd 152 | } 153 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/repl.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-sdk-go/aws/session" 7 | "github.com/segmentio/topicctl/pkg/cli" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var replCmd = &cobra.Command{ 12 | Use: "repl", 13 | Short: "repl allows interactively running commands against a cluster", 14 | PreRunE: replPreRun, 15 | RunE: replRun, 16 | } 17 | 18 | type replCmdConfig struct { 19 | shared sharedOptions 20 | } 21 | 22 | var replConfig replCmdConfig 23 | 24 | func init() { 25 | addSharedFlags(replCmd, &replConfig.shared) 26 | RootCmd.AddCommand(replCmd) 27 | } 28 | 29 | func replPreRun(cmd *cobra.Command, args []string) error { 30 | return replConfig.shared.validate() 31 | } 32 | 33 | func replRun(cmd *cobra.Command, args []string) error { 34 | ctx := context.Background() 35 | sess := session.Must(session.NewSession()) 36 | 37 | adminClient, err := replConfig.shared.getAdminClient(ctx, sess, true) 38 | if err != nil { 39 | return err 40 | } 41 | defer adminClient.Close() 42 | 43 | repl, err := cli.NewRepl(ctx, adminClient) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | repl.Run() 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/root.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/segmentio/topicctl/pkg/version" 8 | log "github.com/sirupsen/logrus" 9 | "github.com/spf13/cobra" 10 | prefixed "github.com/x-cray/logrus-prefixed-formatter" 11 | ) 12 | 13 | var debug bool 14 | var noSpinner bool 15 | 16 | // RootCmd is the cobra CLI root command. 17 | var RootCmd = &cobra.Command{ 18 | Use: "topicctl", 19 | Short: "topicctl runs topic workflows", 20 | SilenceUsage: true, 21 | SilenceErrors: true, 22 | PersistentPreRunE: preRun, 23 | } 24 | 25 | func init() { 26 | log.SetFormatter(&prefixed.TextFormatter{ 27 | TimestampFormat: "2006-01-02 15:04:05", 28 | FullTimestamp: true, 29 | }) 30 | 31 | RootCmd.PersistentFlags().BoolVar( 32 | &debug, 33 | "debug", 34 | false, 35 | "enable debug logging", 36 | ) 37 | RootCmd.PersistentFlags().BoolVar( 38 | &noSpinner, 39 | "no-spinner", 40 | false, 41 | "disable all UI spinners", 42 | ) 43 | } 44 | 45 | // Execute runs topicctl. 46 | func Execute(versionRef string) { 47 | RootCmd.Version = fmt.Sprintf("v%s (ref:%s)", version.Version, versionRef) 48 | 49 | if err := RootCmd.Execute(); err != nil { 50 | log.Errorf("%+v", err) 51 | os.Exit(1) 52 | } 53 | } 54 | 55 | func preRun(cmd *cobra.Command, args []string) error { 56 | if debug { 57 | log.SetLevel(log.DebugLevel) 58 | } 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/tail.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "strconv" 8 | "syscall" 9 | 10 | "github.com/segmentio/kafka-go" 11 | "github.com/segmentio/topicctl/pkg/cli" 12 | log "github.com/sirupsen/logrus" 13 | "github.com/spf13/cobra" 14 | ) 15 | 16 | var tailCmd = &cobra.Command{ 17 | Use: "tail [topic name]", 18 | Short: "tail events in a topic", 19 | Args: cobra.MinimumNArgs(1), 20 | PreRunE: tailPreRun, 21 | RunE: tailRun, 22 | } 23 | 24 | type tailCmdConfig struct { 25 | offset int64 26 | partitions []int 27 | raw bool 28 | headers bool 29 | 30 | shared sharedOptions 31 | } 32 | 33 | var tailConfig tailCmdConfig 34 | 35 | func init() { 36 | tailCmd.Flags().Int64Var( 37 | &tailConfig.offset, 38 | "offset", 39 | kafka.LastOffset, 40 | "Offset (defaults to last)", 41 | ) 42 | tailCmd.Flags().IntSliceVar( 43 | &tailConfig.partitions, 44 | "partitions", 45 | []int{}, 46 | "Partition (defaults to all)", 47 | ) 48 | tailCmd.Flags().BoolVar( 49 | &tailConfig.raw, 50 | "raw", 51 | false, 52 | "Output raw values only", 53 | ) 54 | tailCmd.Flags().BoolVar( 55 | &tailConfig.headers, 56 | "headers", 57 | true, 58 | "Output message headers", 59 | ) 60 | 61 | addSharedFlags(tailCmd, &tailConfig.shared) 62 | RootCmd.AddCommand(tailCmd) 63 | } 64 | 65 | func tailPreRun(cmd *cobra.Command, args []string) error { 66 | if tailConfig.raw { 67 | // In raw mode, only log out errors 68 | log.SetLevel(log.ErrorLevel) 69 | } 70 | return tailConfig.shared.validate() 71 | } 72 | 73 | func tailRun(cmd *cobra.Command, args []string) error { 74 | ctx, cancel := context.WithCancel(context.Background()) 75 | defer cancel() 76 | 77 | sigChan := make(chan os.Signal, 1) 78 | signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) 79 | go func() { 80 | <-sigChan 81 | cancel() 82 | }() 83 | 84 | adminClient, err := tailConfig.shared.getAdminClient(ctx, nil, true) 85 | if err != nil { 86 | return err 87 | } 88 | defer adminClient.Close() 89 | 90 | cliRunner := cli.NewCLIRunner(adminClient, log.Infof, false) 91 | return cliRunner.Tail( 92 | ctx, 93 | args[0], 94 | tailConfig.offset, 95 | tailConfig.partitions, 96 | -1, 97 | "", 98 | tailConfig.raw, 99 | tailConfig.headers, 100 | ) 101 | } 102 | 103 | func stringsToInts(strs []string) ([]int, error) { 104 | ints := []int{} 105 | 106 | for _, str := range strs { 107 | nextInt, err := strconv.ParseInt(str, 10, 32) 108 | if err != nil { 109 | return nil, err 110 | } 111 | ints = append(ints, int(nextInt)) 112 | } 113 | 114 | return ints, nil 115 | } 116 | -------------------------------------------------------------------------------- /cmd/topicctl/subcmd/tester.go: -------------------------------------------------------------------------------- 1 | package subcmd 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "os" 8 | "os/signal" 9 | "syscall" 10 | "time" 11 | 12 | "github.com/segmentio/kafka-go" 13 | "github.com/segmentio/topicctl/pkg/util" 14 | log "github.com/sirupsen/logrus" 15 | "github.com/spf13/cobra" 16 | ) 17 | 18 | var testerCmd = &cobra.Command{ 19 | Use: "tester", 20 | Short: "tester reads or writes test events to a cluster", 21 | PreRunE: testerPreRun, 22 | RunE: testerRun, 23 | } 24 | 25 | type testerCmdConfig struct { 26 | mode string 27 | readConsumer string 28 | topic string 29 | writeRate int 30 | 31 | shared sharedOptions 32 | } 33 | 34 | var testerConfig testerCmdConfig 35 | 36 | func init() { 37 | testerCmd.Flags().StringVar( 38 | &testerConfig.mode, 39 | "mode", 40 | "writer", 41 | "Tester mode (one of 'reader', 'writer')", 42 | ) 43 | testerCmd.Flags().StringVar( 44 | &testerConfig.readConsumer, 45 | "read-consumer", 46 | "test-consumer", 47 | "Consumer group ID for reads; if blank, no consumer group is set", 48 | ) 49 | testerCmd.Flags().StringVar( 50 | &testerConfig.topic, 51 | "topic", 52 | "", 53 | "Topic to write to", 54 | ) 55 | testerCmd.Flags().IntVar( 56 | &testerConfig.writeRate, 57 | "write-rate", 58 | 5, 59 | "Approximate number of messages to write per sec", 60 | ) 61 | 62 | testerCmd.MarkFlagRequired("topic") 63 | addSharedFlags(testerCmd, &testerConfig.shared) 64 | RootCmd.AddCommand(testerCmd) 65 | } 66 | 67 | func testerPreRun(cmd *cobra.Command, args []string) error { 68 | return testerConfig.shared.validate() 69 | } 70 | 71 | func testerRun(cmd *cobra.Command, args []string) error { 72 | ctx, cancel := context.WithCancel(context.Background()) 73 | defer cancel() 74 | 75 | sigChan := make(chan os.Signal, 1) 76 | signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) 77 | go func() { 78 | <-sigChan 79 | cancel() 80 | }() 81 | 82 | switch testerConfig.mode { 83 | case "reader": 84 | return runTestReader(ctx) 85 | case "writer": 86 | return runTestWriter(ctx) 87 | default: 88 | return fmt.Errorf("Mode must be set to either 'reader' or 'writer'") 89 | } 90 | } 91 | 92 | func runTestReader(ctx context.Context) error { 93 | adminClient, err := testerConfig.shared.getAdminClient(ctx, nil, true) 94 | if err != nil { 95 | return err 96 | } 97 | defer adminClient.Close() 98 | connector := adminClient.GetConnector() 99 | 100 | log.Infof( 101 | "This will read test messages from the '%s' topic in %s using the consumer group ID '%s'", 102 | testerConfig.topic, 103 | connector.Config.BrokerAddr, 104 | testerConfig.readConsumer, 105 | ) 106 | 107 | ok, _ := util.Confirm("OK to continue?", false) 108 | if !ok { 109 | return errors.New("Stopping because of user response") 110 | } 111 | 112 | reader := kafka.NewReader( 113 | kafka.ReaderConfig{ 114 | Brokers: []string{connector.Config.BrokerAddr}, 115 | GroupID: testerConfig.readConsumer, 116 | Dialer: connector.Dialer, 117 | Topic: testerConfig.topic, 118 | MinBytes: 10e3, // 10KB 119 | MaxBytes: 10e6, // 10MB 120 | StartOffset: kafka.LastOffset, 121 | }, 122 | ) 123 | 124 | log.Info("Starting read loop") 125 | 126 | for { 127 | message, err := reader.ReadMessage(ctx) 128 | if err != nil { 129 | return err 130 | } 131 | log.Infof( 132 | "Message at partition %d, offset %d: %s=%s", 133 | message.Partition, 134 | message.Offset, 135 | string(message.Key), 136 | string(message.Value), 137 | ) 138 | } 139 | } 140 | 141 | func runTestWriter(ctx context.Context) error { 142 | adminClient, err := testerConfig.shared.getAdminClient(ctx, nil, true) 143 | if err != nil { 144 | return err 145 | } 146 | defer adminClient.Close() 147 | connector := adminClient.GetConnector() 148 | 149 | log.Infof( 150 | "This will write test messages to the '%s' topic in %s at a rate of %d/sec.", 151 | testerConfig.topic, 152 | connector.Config.BrokerAddr, 153 | testerConfig.writeRate, 154 | ) 155 | 156 | ok, _ := util.Confirm("OK to continue?", false) 157 | if !ok { 158 | return errors.New("Stopping because of user response") 159 | } 160 | 161 | batchSize := 5 162 | 163 | writer := kafka.NewWriter( 164 | kafka.WriterConfig{ 165 | Brokers: []string{connector.Config.BrokerAddr}, 166 | Dialer: connector.Dialer, 167 | Topic: testerConfig.topic, 168 | Balancer: &kafka.LeastBytes{}, 169 | Async: false, 170 | BatchSize: batchSize, 171 | BatchTimeout: 1 * time.Nanosecond, 172 | }, 173 | ) 174 | defer writer.Close() 175 | 176 | index := 0 177 | tickDuration := time.Duration(1000.0/float64(testerConfig.writeRate/batchSize)) * time.Millisecond 178 | sendTicker := time.NewTicker(tickDuration) 179 | logTicker := time.NewTicker(5 * time.Second) 180 | 181 | log.Info("Starting write loop") 182 | 183 | for { 184 | select { 185 | case <-ctx.Done(): 186 | return nil 187 | case <-sendTicker.C: 188 | msgs := []kafka.Message{} 189 | 190 | for i := 0; i < 5; i++ { 191 | msgs = append(msgs, kafka.Message{ 192 | Key: []byte(fmt.Sprintf("msg_%d", index)), 193 | Value: []byte(fmt.Sprintf("Contents of test message %d", index)), 194 | }) 195 | index++ 196 | } 197 | err := writer.WriteMessages( 198 | ctx, 199 | msgs..., 200 | ) 201 | if err != nil { 202 | return err 203 | } 204 | case <-logTicker.C: 205 | log.Infof("%d messages sent", index) 206 | } 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /docker-compose-auth.yml: -------------------------------------------------------------------------------- 1 | # By default, this docker-compose setup uses Kafka 2.7.0. This version can 2 | # be overwritten by setting the KAFKA_IMAGE_TAG environment variable. 3 | # 4 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 5 | # 6 | # This config sets up a simple, single-node cluster that's equipped to use SSL/TLS and/or SASL. 7 | # It exposes access on four separate ports: 8 | # 9 | # 1. 9092: plaintext, no SASL 10 | # 2. 9093: SSL, no SASL 11 | # 3. 9094: SASL over plaintext 12 | # 4. 9095: SASL over SSL 13 | # 14 | # See examples/auth for the associated cluster configs and certs. 15 | version: '3' 16 | 17 | services: 18 | zookeeper: 19 | container_name: zookeeper 20 | hostname: zookeeper 21 | image: bitnami/zookeeper:latest 22 | ports: 23 | - "2181:2181" 24 | environment: 25 | ALLOW_ANONYMOUS_LOGIN: yes 26 | 27 | kafka: 28 | container_name: kafka 29 | hostname: kafka 30 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 31 | depends_on: 32 | - zookeeper 33 | restart: on-failure:3 34 | ports: 35 | - 9092:9092 36 | - 9093:9093 37 | - 9094:9094 38 | - 9095:9095 39 | environment: 40 | KAFKA_CFG_BROKER_ID: 1 41 | KAFKA_CFG_BROKER_RACK: zone1 42 | KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 43 | KAFKA_CFG_MESSAGE_MAX_BYTES: 200000000 44 | KAFKA_CFG_LISTENERS: "PLAINTEXT://:9092,SSL://:9093,SASL_PLAINTEXT://:9094,SASL_SSL://:9095" 45 | KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://localhost:9092,SSL://localhost:9093,SASL_PLAINTEXT://localhost:9094,SASL_SSL://localhost:9095" 46 | KAFKA_CFG_SASL_ENABLED_MECHANISMS: "PLAIN,SCRAM-SHA-256,SCRAM-SHA-512" 47 | KAFKA_CFG_AUTHORIZER_CLASS_NAME: "kafka.security.auth.SimpleAclAuthorizer" 48 | 49 | KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" 50 | 51 | KAFKA_CFG_SSL_KEYSTORE_LOCATION: /opt/bitnami/kafka/config/certs/kafka.keystore.jks 52 | KAFKA_CFG_SSL_KEYSTORE_PASSWORD: test123 53 | 54 | KAFKA_CFG_SSL_TRUSTSTORE_LOCATION: /opt/bitnami/kafka/config/certs/kafka.truststore.jks 55 | KAFKA_CFG_SSL_TRUSTSTORE_PASSWORD: test123 56 | 57 | 58 | KAFKA_CFG_SSL_CLIENT_AUTH: none 59 | KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "" 60 | KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" 61 | ALLOW_PLAINTEXT_LISTENER: "yes" 62 | entrypoint: 63 | - "/bin/bash" 64 | - "-c" 65 | - echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/bitnami/kafka/config/kafka_jaas.conf; /opt/bitnami/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config "SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]" --entity-type users --entity-name adminscram; exec /entrypoint.sh /run.sh 66 | volumes: 67 | - /var/run/docker.sock:/var/run/docker.sock 68 | - ./examples/auth/certs:/opt/bitnami/kafka/config/certs 69 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # By default, this docker-compose setup uses Kafka 2.7.0. This version can 2 | # be overwritten by setting the KAFKA_IMAGE_TAG environment variable. 3 | # 4 | # See https://hub.docker.com/r/bitnami/kafka/tags for the complete list. 5 | version: '3' 6 | services: 7 | zookeeper: 8 | container_name: zookeeper 9 | hostname: zookeeper 10 | image: bitnami/zookeeper:latest 11 | ports: 12 | - "2181:2181" 13 | environment: 14 | ALLOW_ANONYMOUS_LOGIN: yes 15 | 16 | # Zone 1 brokers 17 | kafka1: 18 | container_name: kafka1 19 | hostname: 169.254.123.123 20 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 21 | ports: 22 | - "9092:9092" 23 | environment: 24 | KAFKA_CFG_BROKER_ID: 1 25 | KAFKA_CFG_BROKER_RACK: zone1 26 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 27 | 28 | ALLOW_PLAINTEXT_LISTENER: yes 29 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 30 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9092 31 | restart: on-failure 32 | depends_on: 33 | - zookeeper 34 | 35 | kafka2: 36 | container_name: kafka2 37 | hostname: 169.254.123.123 38 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 39 | ports: 40 | - "9093:9092" 41 | environment: 42 | KAFKA_CFG_BROKER_ID: 2 43 | KAFKA_CFG_BROKER_RACK: zone1 44 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 45 | 46 | ALLOW_PLAINTEXT_LISTENER: yes 47 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 48 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9093 49 | restart: on-failure 50 | depends_on: 51 | - zookeeper 52 | 53 | # Zone 2 brokers 54 | kafka3: 55 | container_name: kafka3 56 | hostname: 169.254.123.123 57 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 58 | ports: 59 | - "9094:9092" 60 | environment: 61 | KAFKA_CFG_BROKER_ID: 3 62 | KAFKA_CFG_BROKER_RACK: zone2 63 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 64 | 65 | ALLOW_PLAINTEXT_LISTENER: yes 66 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 67 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9094 68 | restart: on-failure 69 | depends_on: 70 | - zookeeper 71 | 72 | kafka4: 73 | container_name: kafka4 74 | hostname: 169.254.123.123 75 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 76 | ports: 77 | - "9095:9092" 78 | environment: 79 | KAFKA_CFG_BROKER_ID: 4 80 | KAFKA_CFG_BROKER_RACK: zone2 81 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 82 | 83 | ALLOW_PLAINTEXT_LISTENER: yes 84 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 85 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9095 86 | restart: on-failure 87 | depends_on: 88 | - zookeeper 89 | 90 | # Zone 3 brokers 91 | kafka5: 92 | container_name: kafka5 93 | hostname: 169.254.123.123 94 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 95 | ports: 96 | - "9096:9092" 97 | environment: 98 | KAFKA_CFG_BROKER_ID: 5 99 | KAFKA_CFG_BROKER_RACK: zone3 100 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 101 | 102 | ALLOW_PLAINTEXT_LISTENER: yes 103 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 104 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9096 105 | restart: on-failure 106 | depends_on: 107 | - zookeeper 108 | 109 | kafka6: 110 | container_name: kafka6 111 | hostname: 169.254.123.123 112 | image: bitnami/kafka:${KAFKA_IMAGE_TAG:-2.7.0} 113 | ports: 114 | - "9097:9092" 115 | environment: 116 | KAFKA_CFG_BROKER_ID: 6 117 | KAFKA_CFG_BROKER_RACK: zone3 118 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 119 | 120 | ALLOW_PLAINTEXT_LISTENER: yes 121 | KAFKA_CFG_LISTENERS: PLAINTEXT://:9092 122 | KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://169.254.123.123:9097 123 | restart: on-failure 124 | depends_on: 125 | - zookeeper 126 | -------------------------------------------------------------------------------- /examples/auth/acls/acl-default.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: acl-default 3 | cluster: local-cluster-auth 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | This is a default ACL for the local cluster. 8 | It grants read and describe access to the topic `my-topic` and read access to the group `my-group` 9 | to the user `default`. 10 | 11 | spec: 12 | acls: 13 | - resource: 14 | type: topic 15 | name: my-topic 16 | patternType: literal 17 | principal: 'User:default' 18 | host: '*' 19 | permission: allow 20 | operations: 21 | - Read 22 | - Describe 23 | - resource: 24 | type: group 25 | name: my-group 26 | patternType: prefixed 27 | principal: 'User:default' 28 | host: '*' 29 | permission: allow 30 | operations: 31 | - Read 32 | -------------------------------------------------------------------------------- /examples/auth/certs/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDKDCCAhACCQCxcVKD5FwhXjANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJV 3 | UzELMAkGA1UECAwCQ0ExCzAJBgNVBAcMAlNGMRAwDgYDVQQKDAdTZWdtZW50MRsw 4 | GQYDVQQDDBJ1c2VyLnNlZ21lbnQubG9jYWwwHhcNMjAxMTI0MDI1ODIyWhcNMzAx 5 | MTIyMDI1ODIyWjBWMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExCzAJBgNVBAcM 6 | AlNGMRAwDgYDVQQKDAdTZWdtZW50MRswGQYDVQQDDBJ1c2VyLnNlZ21lbnQubG9j 7 | YWwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4ayiwKN/iS06RJwOL 8 | Bey75INaq92gsPT8yOI2/u43hqO7wiC6AHnf1nais/4P1zUuS6WZeA5rUsJPzhKC 9 | N6fYFNkEMA5ui7LoEjJqD6o4Bw1cxWvQ/+Y9GwDOdK6T/q/ZSu9W7TB/Lgi0dT3C 10 | SNfr/KnBDwsSUjEV7WP84qfbikUInPx7doFTm/pa6J44LvvdLH3qBqdiWdjKP/K3 11 | 9mzADbNAQeLReGBEouaVBdIccDVoNfcG1/f+DDcupCjzFMUM8Hu5991a7Z7f1A1T 12 | FalZmbk+THiP/4lliQgKfvhfVryVmq8YVsPPEiyy9biF/qtrJKFMbxsNn2eqP9ED 13 | ZRCnAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAHnHNiOQ02HfJXcLVAAJFMrfe2a8 14 | cfoD58oB0xuXEjHAHqR4iwp01S8fssNw82RdIxymPD8nfOkyRnd5g+gYgTOJsTL9 15 | ed3UGIH8dSel367z5dxYL3fVGZttqUutDH60TtUVc+qS1W+Vt6pEexETDo5PYj/D 16 | Ho4f3YoORg+w4V0ZUCX3mgNYxyJULuPLBeYn4bXnplCcxG9rZgxMtIOhMnLlYdtn 17 | nJbiruRmReJk+Ifkx//38YkYFjxWuztdWmF3nVotyUNCAV0cb5E/0w8nLwrR8JZr 18 | ychNKuBcy92RJ7XYFMytI4PqisXiQ/VP5R0tpjVxLMAonF7hWj7n+5LSM8I= 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /examples/auth/certs/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN ENCRYPTED PRIVATE KEY----- 2 | MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQIDqzKTLtzeZcCAggA 3 | MB0GCWCGSAFlAwQBKgQQkYvJZrDaFnldNi2Wr53f9wSCBND9mXn1ii3nsPieqy07 4 | 7Gl3iZ0N18jRJfqsbAcbj31Vktcp+dhMLFbz51iFBTqq7xESUFrZEaGSpwqGrLV7 5 | NrvkDgq2khqQGh1dlCbYYyBfq2vyxgpeISxrHCNUtfsBpT6x/r8lLjxrjD2m/PjR 6 | J80ZSfV1KpaSEP4HPR01mC3EwF+wdwK3NYJWkk0PcKKZVhXVFg6v3x6RJhnU7jW1 7 | aKnrT/GWdrhSqTZBvrrW4+x0wr/Iygw/MxuO70k4fpYWfrcLIEs9Ia5RVpFS1lXz 8 | +DRBLZnjsyv7BXSeDRBEWcx1pb5CGuwgQ4yRx2xK2lRReX6lIE53YLD/1nK8xSzS 9 | 5JorKefkwsHhqgfRrzZu6aLMe7f7PJmrfQ9L4dLc27MC7HIrBnA2RZLfHkp9hGOP 10 | 73H5U6s6OhvFC81ECh3+ejHVk+Zb6Rx6Xi/tOPoPNLNiEXaLP9jpCo1uLqnvRdpG 11 | PyjpuP0goOb6u/1HU7Nfxqyxf4LVPb0DTwnooXPveJr9czhuboYjhCk48cheTYy1 12 | kiCym2aKQv62ZxiCVVcHuoCXTbRZKAUtfYG3CQBn3a2Y0I2r1gODeMokpNHuSA9L 13 | htuOuXx5SYKWxSeGsT0FePddHOBWRTOw3OB9fj95/vHVs7Tkiw6Uj9Sr/Q7rtm2O 14 | KuLBw9QvZKSzpJlZ3nKaylWSXJp6zOx32UsrK+XQhL3UCbS2BkZxA5s1VVIg9p4+ 15 | c0gqflIDvoBHM4elLZ/X2AV0lsdfNvyPx+wmI4nW5sazPbeIXY/MzOv7KwSeZkea 16 | phGufA0ioEQUlyJ+P5iDOsaD1uGH7rdyIOGWnaP6jiGAkQ6IoorLSMvzdCHysuZp 17 | ZziOfLL8ydhxpkma4VK1PYHFNlk72T/qeHDRr110osvMiE2DuMQ9RPbLR4hrrEDM 18 | WUx4fgnpCqDDx2/1YScXioMY4BwEHliS7hZlv27ODWRO23YdQSB/e//yh9VBQnGS 19 | dFw1dmtu1XDe+78l/YbuSzE3ppYO6VAMRiFgrWUubZgS5qmzAHn0i68ERnlK7j9s 20 | jQYGltuHetrMnW7aBsV+TU3QsOqWrF9hL1geydwKezXGYkEURY69zLrlVkNxhFtQ 21 | VJNRo9YKiFrdE/94qrhjyNZOwmq/3vs2+XEUGhCCp+Pfxtg8jJNSuEoXKWaaTwtS 22 | fOae6wtbeydQG6iVYz7Mdm/Sc2xKRm3aVz4mkCxcfdyKmP8W0Yiw+6GniNR1Ryfu 23 | gyr2LN44MaPH5fazYVPapocm1xK4HDVAHjFLzfzWbYde8fzf21LWWad5pwsClimL 24 | 4GfkFp0KGfajlcDMquJWn8ILU41unXo3Q/ikV3w8HEr6KydqRYXDctcm6WAx/8NP 25 | 8YXP9NEcq8FCKqjGlenjSwo1YfUJnmDEglpX8gCdYzEe0n6aEq2TfY8AsZ/rzls6 26 | BxRcnR7U3zR2maDr4gAYo7WR2pGe9WbgT+arwtBValmDI5C2sAu83MB6ubGA/lDR 27 | M+DhLy69s4QeKI/X35bUVY3Qjm09Zxj4NOgVZwWV6mEAyMMLyKRF+NPnBdMSxQRe 28 | u6Jpt4Ig1RUXMRNSv3u86W/PFmS7gAQEBMYL2rVrSeF8Vlb/uVPC31fUFDmFfQ3x 29 | nPWzjMvCjivfzVGp5vo/Sd6OLw== 30 | -----END ENCRYPTED PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /examples/auth/certs/client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDNzCCAh8CCQCBQTsa+CfoGTANBgkqhkiG9w0BAQUFADBWMQswCQYDVQQGEwJV 3 | UzELMAkGA1UECAwCQ0ExCzAJBgNVBAcMAlNGMRAwDgYDVQQKDAdTZWdtZW50MRsw 4 | GQYDVQQDDBJ1c2VyLnNlZ21lbnQubG9jYWwwHhcNMjAxMTI0MDI1OTQxWhcNMzAx 5 | MTIyMDI1OTQxWjBlMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExCzAJBgNVBAcT 6 | AlNGMRAwDgYDVQQKEwdTZWdtZW50MRAwDgYDVQQLEwdTZWdtZW50MRgwFgYDVQQD 7 | Ew9CZW5qYW1pbiBZb2xrZW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB 8 | AQCdQR9sA4Rv7sPzBGO5FMTSq7ULxvCEIONE2zppv7DqOCmz6Lq2//7tzCeSNt7p 9 | vh5HH9tpwIN5b8kwoppMKGOVPTmfdbTXXwsSP3JPZJnPSFdoElBh/qnhoKhF6tLD 10 | um3fZFbZ66KintcG5/9PgluanOr4WuakW3YBs9SLwEY28ZvkeBcwfSZENFh74dr6 11 | alkfn+u+0zhyRVvbT6A0gr2zeRUb35UAS5Vlgc5zI65v+d7TT7OaIHkL8rnrsMft 12 | DqBlPhTzlfGz64ipFgFQuTHuFWg5jenwKuLaDq2tMawRLaM0ZZCAu7Tmlmq429VD 13 | CP1wEq1DKUWQ7lR1me9Cft+3AgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAIntZGmd 14 | 1tSxOfgqGz3vQSqoYhC9tsEWbwGnez7c2WLJE3vR6pHB06iWqegf78DAWkz1ZI+d 15 | 1OPYfqPw63sOZYsHIoug5xR8QtU8G3NaT1H7Vc7GiIkU+PIn0V7DzBSqEZWuoTz3 16 | XUbH25O5ynMKyGR6zirRZLDL1lw1dDKeqbaUPt/QxuY1S6Pl+36C2DOBDWqJJWQJ 17 | rtxJ4zFA+ZJEK1EKIJF7ufM0qfCCnKTnvo/4SLGItPrmp/xUFl5T4ises/uLqrY4 18 | fWbpV8hbuPo5f50AD6S1Iw7H1ZVKyHazBIYaC0QS8Vk4vp5I6J7OnuR+jhv0y0vr 19 | Bd/jWLKcAv0MzjQ= 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /examples/auth/certs/client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCdQR9sA4Rv7sPz 3 | BGO5FMTSq7ULxvCEIONE2zppv7DqOCmz6Lq2//7tzCeSNt7pvh5HH9tpwIN5b8kw 4 | oppMKGOVPTmfdbTXXwsSP3JPZJnPSFdoElBh/qnhoKhF6tLDum3fZFbZ66KintcG 5 | 5/9PgluanOr4WuakW3YBs9SLwEY28ZvkeBcwfSZENFh74dr6alkfn+u+0zhyRVvb 6 | T6A0gr2zeRUb35UAS5Vlgc5zI65v+d7TT7OaIHkL8rnrsMftDqBlPhTzlfGz64ip 7 | FgFQuTHuFWg5jenwKuLaDq2tMawRLaM0ZZCAu7Tmlmq429VDCP1wEq1DKUWQ7lR1 8 | me9Cft+3AgMBAAECggEADqdR4UPWpIOQWOXw0P9hc+wyO723DejupKz1HYOSXdEL 9 | +crXE1R5kfkzOsnILenccm5CiPE6jydejRyp2iztUqvY4cYbKvKdWn71DPbn6kvo 10 | cTc7rFYJyI+q/pDqQPjvYiC8gyQVDKhWizs1LFiOZrL2plv6IBixv2jdhoRNRrNI 11 | 65N6Z9wHTNztcJyKDKT6Z4pgBDYSWzUIAGVfw2vhYwDavgiSTAOksGc15rmamc0+ 12 | ZYaDpwfbFEI06jBIJEBzcfdp/0FtNQSTL0FDkXdQyBhc6IxNHIifL5dDvhMo0Aks 13 | dcnlBf13DVskLe7lVxOPuXoljC6Us1WkWJ2VWJ6RYQKBgQDyvxqInnqVS2AK8t+X 14 | q7w4yccv+2exPM5gs9Bht9C/e/OPb39bSAdjLOJOsTAkkBQfTyrawEqUGFsq5y1B 15 | U4SljPJchXfL82pefXfIHCtqhv8ieJb2Tby50PQzeY2+pw8/0xgbVjTAr4JKRXO0 16 | hyMEOj6ElKFnjHlpqSuCl5hGsQKBgQCl1xX1uBimphJJ3Zo74UUop87GeFbWl4oL 17 | pwqI3DOy5KMrbRtWqU3EI8BkjgjHJLF3d4evVAIoBG54fZFjHSPgq3163WUuU6ZW 18 | p1gvHTWyTNbuu8IFqqhbE/vHUZMfX7ksuBooFOsY0Wqq0poVgDWriFS5SfYek43f 19 | WAZiAvn25wKBgFAd3qYEmDS6AeLbMgye86pSfllJwnlutjaYYkg+ILlyMXq/s+ru 20 | pPGImNCcDmWi3+FNgbldCcBDIaPRVNBgvkDdeggrTNSVbB/vjR8QnQu1rnM0Fa8J 21 | DSbO3io3Dh9Eh/Xqt+Qd2Z9WzcuxjHSivV3h00xyuaqxZEkJOoEJg4qhAoGADOND 22 | JJ5S5BiB0VW0V7Tw7/Dig8/0R6btJmyrx+j854kXGRfYiQqNLZHtsKLNEdTLKdKT 23 | K8/mfv+hKiHv+3jXQe1xyeuMomYDxjYpBzhI5PtNtK3IrTIO9Uz/QwUW3thMhqoj 24 | 9jtx7bLQjEfji4o0IYlttByIUOX8n3+yt0kt7b8CgYEA0NMkFD39NBYv4Ec5fx7y 25 | bNcObdI35cyw5xt2uSqC/2fcqLeD8FKlsS9b9JkkhYvEEe1z/3vMuO7erpQuh5j6 26 | P9gO/TcaZOk9DACoI2hEfUlW4SX2rLPbPB0ZG61PStjeyf1uy78jCVQYjM7N+WBE 27 | IY2Y+bXLOjn24ooplcIlZsw= 28 | -----END PRIVATE KEY----- 29 | 30 | -------------------------------------------------------------------------------- /examples/auth/certs/kafka.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/topicctl/d2988ed9e8adbc43caeb15930fafb337d01df449/examples/auth/certs/kafka.keystore.jks -------------------------------------------------------------------------------- /examples/auth/certs/kafka.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/segmentio/topicctl/d2988ed9e8adbc43caeb15930fafb337d01df449/examples/auth/certs/kafka.truststore.jks -------------------------------------------------------------------------------- /examples/auth/cluster.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: local-cluster-auth 3 | environment: local-env 4 | region: local-region 5 | description: | 6 | Test cluster that uses SSL/TLS and SASL to securely connect to brokers. Can be run 7 | against compose setup defined in docker-compose-auth.yml in the repo root. 8 | 9 | spec: 10 | bootstrapAddrs: 11 | # To use just TLS without SASL, switch to port 9093 and disable SASL in the config below. 12 | # To use just SASL without TLS, switch to port 9094 and disabled TLS in the config below. 13 | - localhost:9095 14 | tls: 15 | enabled: true 16 | caCertPath: certs/ca.crt 17 | certPath: certs/client.crt 18 | keyPath: certs/client.key 19 | skipVerify: true 20 | sasl: 21 | enabled: true 22 | mechanism: SCRAM-SHA-512 23 | 24 | # As an alternative to storing these in plain text in the config (probably not super-secure), 25 | # these can also be set via: 26 | # 27 | # 1. The --sasl-username and --sasl-password command-line flags, 28 | # 2. The TOPICCTL_SASL_USERNAME and TOPICCTL_SASL_PASSWORD environment variables, or 29 | # 3. Putting placeholder strings in the config and running with the --expand-env flag as 30 | # described in the README. 31 | # 32 | username: adminscram 33 | password: admin-secret-512 34 | # 35 | # Another alternative is to omit these values and use secretsManagerArn to reference 36 | # an AWS Secrets Manager secret containing the username and password. More information 37 | # can be found in the README. An example can be seen below. 38 | # secretsManagerArn: arn:aws:secretsmanager:us-west-2:1000000000:secret:AmazonMSK_kafka-admin-wEiwjV 39 | # 40 | # This can also be set via: 41 | # 42 | # 1. The --sasl-secrets-manager-arn command-line flag, 43 | # 2. The TOPICCTL_SASL_SECRETS_MANAGER_ARN environment variable, or 44 | # 3. Putting a placeholder string in the config and running with the --expand-env flag as 45 | -------------------------------------------------------------------------------- /examples/auth/topics/topic-default.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-default 3 | cluster: local-cluster-auth 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | Topic that uses default (any) strategy for assigning partition brokers. 8 | 9 | spec: 10 | partitions: 3 11 | replicationFactor: 1 12 | retentionMinutes: 100 13 | placement: 14 | strategy: any 15 | settings: 16 | cleanup.policy: delete 17 | max.message.bytes: 5542880 18 | -------------------------------------------------------------------------------- /examples/local-cluster/cluster.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: local-cluster 3 | environment: local-env 4 | region: local-region 5 | description: | 6 | Test cluster that uses plaintext access to brokers. Can be run against compose setup defined 7 | in docker-compose.yml in the repo root. 8 | 9 | spec: 10 | bootstrapAddrs: 11 | - localhost:9092 12 | 13 | # Uncomment these lines to access cluster via ZooKeeper instead of broker APIs (required 14 | # for older cluster versions). 15 | # 16 | # zkAddrs: 17 | # - localhost:2181 18 | # zkLockPath: /topicctl/locks 19 | -------------------------------------------------------------------------------- /examples/local-cluster/topics/topic-default.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-default 3 | cluster: local-cluster 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | Topic that uses default (any) strategy for assigning partition brokers. 8 | 9 | spec: 10 | partitions: 3 11 | replicationFactor: 2 12 | retentionMinutes: 100 13 | placement: 14 | strategy: in-rack 15 | settings: 16 | cleanup.policy: delete 17 | max.message.bytes: 5542880 18 | -------------------------------------------------------------------------------- /examples/local-cluster/topics/topic-in-rack.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-in-rack3 3 | cluster: local-cluster 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | Topic that uses in-rack strategy for assigning brokers. 8 | 9 | spec: 10 | partitions: 9 11 | replicationFactor: 2 12 | retentionMinutes: 100 13 | placement: 14 | strategy: in-rack 15 | 16 | -------------------------------------------------------------------------------- /examples/local-cluster/topics/topic-static-in-rack.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-static-in-rack 3 | cluster: local-cluster 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | Topic that uses static-in-rack strategy for assigning brokers. 8 | 9 | spec: 10 | partitions: 9 11 | replicationFactor: 2 12 | retentionMinutes: 100 13 | placement: 14 | strategy: static-in-rack 15 | staticRackAssignments: 16 | - zone1 17 | - zone1 18 | - zone1 19 | - zone2 20 | - zone2 21 | - zone2 22 | - zone3 23 | - zone3 24 | - zone3 25 | 26 | -------------------------------------------------------------------------------- /examples/local-cluster/topics/topic-static.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-static 3 | cluster: local-cluster 4 | environment: local-env 5 | region: local-region 6 | description: | 7 | Topic that uses static broker assignments. 8 | 9 | spec: 10 | partitions: 10 11 | replicationFactor: 2 12 | retentionMinutes: 290 13 | placement: 14 | strategy: static 15 | staticAssignments: 16 | - [3, 4] 17 | - [5, 6] 18 | - [2, 1] 19 | - [2, 3] 20 | - [5, 1] 21 | - [2, 1] 22 | - [1, 3] 23 | - [2, 4] 24 | - [1, 3] 25 | - [2, 4] 26 | -------------------------------------------------------------------------------- /examples/msk/cluster.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: msk-cluster 3 | environment: aws-env 4 | region: aws-region 5 | description: | 6 | Example of config for AWS MSK cluster with IAM authentication enabled. 7 | 8 | spec: 9 | bootstrapAddrs: 10 | # These are dummy placeholders; replace them with the broker addresses for your MSK cluster. 11 | - "b-1.my-cluster.kafka.aws-region.amazonaws.com:9098" 12 | - "b-2.my-cluster.kafka.aws-region.amazonaws.com:9098" 13 | - "b-3.my-cluster.kafka.aws-region.amazonaws.com:9098" 14 | 15 | tls: 16 | # TLS is enabled on the IAM-authenticated broker endpoints by default 17 | enabled: true 18 | sasl: 19 | # No credentials are set here; instead, they'll be pulled from 20 | # the environment, a shared credentials file, a shared configuration file, or the EC2 metadata 21 | # service as described here: https://docs.aws.amazon.com/sdk-for-go/api/aws/session/. 22 | enabled: true 23 | mechanism: AWS-MSK-IAM 24 | -------------------------------------------------------------------------------- /examples/msk/topics/topic-default.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-default 3 | cluster: msk-cluster 4 | environment: aws-env 5 | region: aws-region 6 | description: | 7 | Topic that uses default (any) strategy for assigning partition brokers. 8 | 9 | spec: 10 | partitions: 3 11 | replicationFactor: 3 12 | retentionMinutes: 100 13 | placement: 14 | strategy: any 15 | settings: 16 | cleanup.policy: delete 17 | max.message.bytes: 5542880 18 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/segmentio/topicctl 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.49.12 7 | github.com/briandowns/spinner v1.19.0 8 | github.com/c-bata/go-prompt v0.2.3 9 | github.com/fatih/color v1.13.0 10 | github.com/ghodss/yaml v1.0.0 11 | github.com/hashicorp/go-multierror v1.1.1 12 | github.com/olekukonko/tablewriter v0.0.5 13 | github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da 14 | github.com/segmentio/kafka-go v0.4.48 15 | github.com/segmentio/kafka-go/sasl/aws_msk_iam v0.0.0-20220211180808-78889264d070 16 | github.com/sirupsen/logrus v1.9.0 17 | github.com/spf13/cobra v1.5.0 18 | github.com/stretchr/testify v1.8.0 19 | github.com/x-cray/logrus-prefixed-formatter v0.5.2 20 | golang.org/x/crypto v0.37.0 21 | ) 22 | 23 | require ( 24 | github.com/davecgh/go-spew v1.1.1 // indirect 25 | github.com/hashicorp/errwrap v1.0.0 // indirect 26 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 27 | github.com/jmespath/go-jmespath v0.4.0 // indirect 28 | github.com/klauspost/compress v1.15.9 // indirect 29 | github.com/mattn/go-colorable v0.1.9 // indirect 30 | github.com/mattn/go-isatty v0.0.14 // indirect 31 | github.com/mattn/go-runewidth v0.0.9 // indirect 32 | github.com/mattn/go-tty v0.0.3 // indirect 33 | github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect 34 | github.com/onsi/ginkgo v1.6.0 // indirect 35 | github.com/onsi/gomega v1.5.0 // indirect 36 | github.com/pierrec/lz4/v4 v4.1.15 // indirect 37 | github.com/pkg/term v0.0.0-20200520122047-c3ffed290a03 // indirect 38 | github.com/pmezard/go-difflib v1.0.0 // indirect 39 | github.com/spf13/pflag v1.0.5 // indirect 40 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 41 | github.com/xdg-go/scram v1.1.2 // indirect 42 | github.com/xdg-go/stringprep v1.0.4 // indirect 43 | golang.org/x/sys v0.32.0 // indirect 44 | golang.org/x/term v0.31.0 // indirect 45 | golang.org/x/text v0.24.0 // indirect 46 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 47 | gopkg.in/yaml.v2 v2.4.0 // indirect 48 | gopkg.in/yaml.v3 v3.0.1 // indirect 49 | ) 50 | -------------------------------------------------------------------------------- /pkg/admin/client.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/segmentio/kafka-go" 7 | "github.com/segmentio/topicctl/pkg/zk" 8 | ) 9 | 10 | // Client is an interface for interacting with a cluster for administrative tasks. 11 | type Client interface { 12 | // GetClusterID gets the ID of the cluster. 13 | GetClusterID(ctx context.Context) (string, error) 14 | 15 | // GetBrokers gets information about all brokers in the cluster. 16 | GetBrokers(ctx context.Context, ids []int) ([]BrokerInfo, error) 17 | 18 | // GetControllerID get the active controller broker ID in the cluster. 19 | GetControllerID(ctx context.Context) (int, error) 20 | 21 | // GetBrokerIDs get the IDs of all brokers in the cluster. 22 | GetBrokerIDs(ctx context.Context) ([]int, error) 23 | 24 | // GetConnector gets the Connector instance for this cluster. 25 | GetConnector() *Connector 26 | 27 | // GetTopics gets full information about each topic in the cluster. 28 | GetTopics( 29 | ctx context.Context, 30 | names []string, 31 | detailed bool, 32 | ) ([]TopicInfo, error) 33 | 34 | // GetTopicNames gets just the names of each topic in the cluster. 35 | GetTopicNames(ctx context.Context) ([]string, error) 36 | 37 | // GetTopic gets the details of a single topic in the cluster. 38 | GetTopic( 39 | ctx context.Context, 40 | name string, 41 | detailed bool, 42 | ) (TopicInfo, error) 43 | 44 | // GetACLs gets full information about each ACL in the cluster. 45 | GetACLs( 46 | ctx context.Context, 47 | filter kafka.ACLFilter, 48 | ) ([]ACLInfo, error) 49 | 50 | // GetAllTopicsMetadata performs kafka-go metadata call to get topic information 51 | GetAllTopicsMetadata(ctx context.Context) (*kafka.MetadataResponse, error) 52 | 53 | // GetUsers gets information about users in the cluster. 54 | GetUsers( 55 | ctx context.Context, 56 | names []string, 57 | ) ([]UserInfo, error) 58 | 59 | // UpdateTopicConfig updates the configuration for the argument topic. It returns the config 60 | // keys that were updated. 61 | UpdateTopicConfig( 62 | ctx context.Context, 63 | name string, 64 | configEntries []kafka.ConfigEntry, 65 | overwrite bool, 66 | ) ([]string, error) 67 | 68 | // UpdateBrokerConfig updates the configuration for the argument broker. It returns the config 69 | // keys that were updated. 70 | UpdateBrokerConfig( 71 | ctx context.Context, 72 | id int, 73 | configEntries []kafka.ConfigEntry, 74 | overwrite bool, 75 | ) ([]string, error) 76 | 77 | // CreateTopic creates a topic in the cluster. 78 | CreateTopic( 79 | ctx context.Context, 80 | config kafka.TopicConfig, 81 | ) error 82 | 83 | // CreateACLs creates ACLs in the cluster. 84 | CreateACLs( 85 | ctx context.Context, 86 | acls []kafka.ACLEntry, 87 | ) error 88 | 89 | // DeleteACLs deletes ACLs in the cluster. 90 | DeleteACLs( 91 | ctx context.Context, 92 | filters []kafka.DeleteACLsFilter, 93 | ) (*kafka.DeleteACLsResponse, error) 94 | 95 | // UpsertUser creates or updates an user in zookeeper. 96 | UpsertUser( 97 | ctx context.Context, 98 | user kafka.UserScramCredentialsUpsertion, 99 | ) error 100 | 101 | // AssignPartitions sets the replica broker IDs for one or more partitions in a topic. 102 | AssignPartitions( 103 | ctx context.Context, 104 | topic string, 105 | assignments []PartitionAssignment, 106 | ) error 107 | 108 | // AddPartitions extends a topic by adding one or more new partitions to it. 109 | AddPartitions( 110 | ctx context.Context, 111 | topic string, 112 | newAssignments []PartitionAssignment, 113 | ) error 114 | 115 | // RunLeaderElection triggers a leader election for one or more partitions in a topic. 116 | RunLeaderElection( 117 | ctx context.Context, 118 | topic string, 119 | partitions []int, 120 | ) error 121 | 122 | // AcquireLock acquires a lock that can be used to prevent simultaneous changes to a topic. 123 | AcquireLock(ctx context.Context, path string) (zk.Lock, error) 124 | 125 | // LockHeld returns whether a lock is currently held for the given path. 126 | LockHeld(ctx context.Context, path string) (bool, error) 127 | 128 | // GetSupportedFeatures gets the features supported by the cluster for this client. 129 | GetSupportedFeatures() SupportedFeatures 130 | 131 | // Close closes the client. 132 | Close() error 133 | } 134 | -------------------------------------------------------------------------------- /pkg/admin/support.go: -------------------------------------------------------------------------------- 1 | package admin 2 | 3 | // SupportedFeatures provides a summary of what an admin client supports. 4 | type SupportedFeatures struct { 5 | // Reads indicates whether the client supports reading basic cluster information 6 | // (metadata, configs, etc.). 7 | Reads bool 8 | 9 | // Applies indicates whether the client supports the functionality required for applying 10 | // (e.g., changing configs, electing leaders, etc.). 11 | Applies bool 12 | 13 | // Locks indicates whether the client supports locking. 14 | Locks bool 15 | 16 | // DynamicBrokerConfigs indicates whether the client can return dynamic broker configs 17 | // like leader.replication.throttled.rate. 18 | DynamicBrokerConfigs bool 19 | 20 | // ACLs indicates whether the client supports access control levels. 21 | ACLs bool 22 | 23 | // Users indicates whether the client supports SASL Users. 24 | Users bool 25 | } 26 | -------------------------------------------------------------------------------- /pkg/apply/assigners/assigner.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "github.com/segmentio/topicctl/pkg/admin" 5 | ) 6 | 7 | // Assigner is an interface for structs that figure out how to 8 | // reassign replicas in existing topic partitions. 9 | type Assigner interface { 10 | Assign( 11 | topic string, 12 | currAssignments []admin.PartitionAssignment, 13 | ) ([]admin.PartitionAssignment, error) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/apply/assigners/balanced_leader_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | "github.com/segmentio/topicctl/pkg/config" 9 | ) 10 | 11 | func TestBalancedLeaderAssigner(t *testing.T) { 12 | brokers := testBrokers(12, 3) 13 | assigner := NewBalancedLeaderAssigner(brokers, pickers.NewLowestIndexPicker()) 14 | checker := func(result []admin.PartitionAssignment) bool { 15 | ok, _ := EvaluateAssignments( 16 | result, 17 | brokers, 18 | config.TopicPlacementConfig{ 19 | Strategy: config.PlacementStrategyBalancedLeaders, 20 | }, 21 | ) 22 | return ok 23 | } 24 | 25 | testCases := []assignerTestCase{ 26 | { 27 | description: "Already balanced", 28 | curr: [][]int{ 29 | {1, 2, 3}, 30 | {2, 4, 5}, 31 | {3, 6, 7}, 32 | }, 33 | expected: [][]int{ 34 | {1, 2, 3}, 35 | {2, 4, 5}, 36 | {3, 6, 7}, 37 | }, 38 | checker: checker, 39 | }, 40 | { 41 | description: "Single swap", 42 | curr: [][]int{ 43 | {1, 2, 3}, 44 | {2, 4, 5}, 45 | {5, 6, 7}, 46 | }, 47 | expected: [][]int{ 48 | {1, 2, 3}, 49 | {2, 4, 5}, 50 | // Single swap 51 | {6, 5, 7}, 52 | }, 53 | checker: checker, 54 | }, 55 | { 56 | description: "Multiple swaps", 57 | curr: [][]int{ 58 | {1, 2, 3}, 59 | {2, 3, 4}, 60 | {3, 4, 5}, 61 | {1, 2, 3}, 62 | {2, 3, 4}, 63 | {3, 4, 5}, 64 | {1, 2, 3}, 65 | {2, 3, 4}, 66 | {3, 4, 5}, 67 | // Rack 2 is overrepresented as a leader 68 | {2, 5, 6}, 69 | {2, 3, 4}, 70 | {2, 4, 5}, 71 | }, 72 | expected: [][]int{ 73 | {1, 2, 3}, 74 | // Swap here to get a rack1 leader 75 | {4, 3, 2}, 76 | {3, 4, 5}, 77 | {1, 2, 3}, 78 | // Swap here to get a rack3 leader 79 | {3, 2, 4}, 80 | {3, 4, 5}, 81 | {1, 2, 3}, 82 | {2, 3, 4}, 83 | {3, 4, 5}, 84 | {2, 5, 6}, 85 | {2, 3, 4}, 86 | {2, 4, 5}, 87 | }, 88 | checker: checker, 89 | }, 90 | { 91 | description: "Swap not possible", 92 | curr: [][]int{ 93 | {1, 2, 3}, 94 | // Overrepresented leader racks with no swap possible (want rack3) 95 | {2, 5, 1}, 96 | {2, 5, 1}, 97 | {1, 2, 3}, 98 | {2, 5, 1}, 99 | {3, 4, 5}, 100 | }, 101 | expected: [][]int{ 102 | {1, 2, 3}, 103 | // Pick the first rack3 broker that hasn't already been a leader 104 | {6, 5, 1}, 105 | {2, 5, 1}, 106 | {1, 2, 3}, 107 | {2, 5, 1}, 108 | {3, 4, 5}, 109 | }, 110 | checker: checker, 111 | }, 112 | } 113 | 114 | for _, testCase := range testCases { 115 | testCase.evaluate(t, assigner) 116 | } 117 | } 118 | 119 | func TestBalancedLeaderAssignerRandomized(t *testing.T) { 120 | brokers := testBrokers(12, 3) 121 | assigner := NewBalancedLeaderAssigner(brokers, pickers.NewRandomizedPicker()) 122 | checker := func(result []admin.PartitionAssignment) bool { 123 | ok, _ := EvaluateAssignments( 124 | result, 125 | brokers, 126 | config.TopicPlacementConfig{ 127 | Strategy: config.PlacementStrategyBalancedLeaders, 128 | }, 129 | ) 130 | return ok 131 | } 132 | 133 | testCases := []assignerTestCase{ 134 | { 135 | description: "Single swap", 136 | curr: [][]int{ 137 | {1, 2, 3}, 138 | {2, 4, 5}, 139 | {5, 6, 7}, 140 | }, 141 | expected: [][]int{ 142 | {1, 2, 3}, 143 | {2, 4, 5}, 144 | // Single swap 145 | {6, 5, 7}, 146 | }, 147 | checker: checker, 148 | }, 149 | { 150 | description: "Multiple swaps", 151 | curr: [][]int{ 152 | {1, 2, 3}, 153 | {2, 3, 4}, 154 | {3, 4, 5}, 155 | {1, 2, 3}, 156 | {2, 3, 4}, 157 | {3, 4, 5}, 158 | {1, 2, 3}, 159 | {2, 3, 4}, 160 | {3, 4, 5}, 161 | // Rack 2 is overrepresented as a leader 162 | {2, 5, 6}, 163 | {2, 3, 4}, 164 | {2, 4, 5}, 165 | }, 166 | expected: [][]int{ 167 | {1, 2, 3}, 168 | // Swap here to get a rack1 leader 169 | {4, 3, 2}, 170 | {3, 4, 5}, 171 | {1, 2, 3}, 172 | // Swap here to get a rack3 leader 173 | {3, 2, 4}, 174 | {3, 4, 5}, 175 | {1, 2, 3}, 176 | {2, 3, 4}, 177 | {3, 4, 5}, 178 | {2, 5, 6}, 179 | {2, 3, 4}, 180 | {2, 4, 5}, 181 | }, 182 | checker: checker, 183 | }, 184 | { 185 | description: "Swap not possible", 186 | curr: [][]int{ 187 | {1, 2, 3}, 188 | // Overrepresented leader racks with no swap possible (want rack3) 189 | {2, 5, 1}, 190 | {2, 5, 1}, 191 | {1, 2, 3}, 192 | {2, 5, 1}, 193 | {3, 4, 5}, 194 | }, 195 | expected: [][]int{ 196 | {1, 2, 3}, 197 | // Pick random rack3 broker that hasn't already been a leader 198 | {9, 5, 1}, 199 | {2, 5, 1}, 200 | {1, 2, 3}, 201 | {2, 5, 1}, 202 | {3, 4, 5}, 203 | }, 204 | checker: checker, 205 | }, 206 | } 207 | 208 | for _, testCase := range testCases { 209 | testCase.evaluate(t, assigner) 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /pkg/apply/assigners/balancer_leader.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/apply/pickers" 9 | ) 10 | 11 | // BalancedLeaderAssigner is an Assigner that tries to ensure that the leaders 12 | // of each partition are balanced across all of the broker racks. The general 13 | // goals are to do this in a way that: 14 | // 15 | // 1. Does in-partition leader swaps whenever possible as opposed to changing 16 | // the set of brokers in a partition 17 | // 2. Prevents particular brokers from being overrepresented among the leaders 18 | // 19 | // The algorithm currently used is as follows: 20 | // 21 | // while not balanced: 22 | // find racks with fewest and most leaders (i.e., the overrepresented and underrepresented) 23 | // improve balance by doing a single leader replacement: 24 | // use the picker to rank the partitions that have an overrepresented leader 25 | // for each leader: 26 | // for each partition with the leader: 27 | // swap the leader with one of its followers if possible, then stop 28 | // otherwise, use the picker to replace the leader in the top-ranked partition with 29 | // a new broker from the target rack 30 | type BalancedLeaderAssigner struct { 31 | brokers []admin.BrokerInfo 32 | racks []string 33 | brokerRacks map[int]string 34 | brokersPerRack map[string][]int 35 | picker pickers.Picker 36 | } 37 | 38 | var _ Assigner = (*BalancedLeaderAssigner)(nil) 39 | 40 | // NewBalancedLeaderAssigner creates and returns a BalancedLeaderAssigner instance. 41 | func NewBalancedLeaderAssigner( 42 | brokers []admin.BrokerInfo, 43 | picker pickers.Picker, 44 | ) *BalancedLeaderAssigner { 45 | return &BalancedLeaderAssigner{ 46 | brokers: brokers, 47 | picker: picker, 48 | racks: admin.DistinctRacks(brokers), 49 | brokerRacks: admin.BrokerRacks(brokers), 50 | brokersPerRack: admin.BrokersPerRack(brokers), 51 | } 52 | } 53 | 54 | // Assign returns a new partition assignment according to the assigner-specific logic. 55 | func (b *BalancedLeaderAssigner) Assign( 56 | topic string, 57 | curr []admin.PartitionAssignment, 58 | ) ([]admin.PartitionAssignment, error) { 59 | if err := admin.CheckAssignments(curr); err != nil { 60 | return nil, err 61 | } 62 | if len(curr)%len(b.racks) != 0 { 63 | return nil, 64 | fmt.Errorf( 65 | "Cannot balance leaders because the partition count is not a multiple of the number of racks", 66 | ) 67 | } 68 | 69 | // First, copy current into desired 70 | desired := admin.CopyAssignments(curr) 71 | 72 | count := 0 73 | 74 | for { 75 | count++ 76 | minRack, maxRack := b.minMaxRacks(desired) 77 | if minRack == maxRack { 78 | break 79 | } 80 | 81 | err := b.replaceLeader(topic, count, desired, maxRack, minRack) 82 | if err != nil { 83 | return desired, err 84 | } 85 | 86 | if count > 1000 { 87 | return nil, errors.New("Too many loops") 88 | } 89 | } 90 | 91 | return desired, nil 92 | } 93 | 94 | // minMaxRacks returns the racks with the fewest and most leaders. This 95 | // is used as an input into the next step, where leaders are swapped around. 96 | func (b *BalancedLeaderAssigner) minMaxRacks( 97 | curr []admin.PartitionAssignment, 98 | ) (string, string) { 99 | leaderRackCounts := map[string]int{} 100 | 101 | for _, rack := range b.racks { 102 | leaderRackCounts[rack] = 0 103 | } 104 | 105 | for _, assignment := range curr { 106 | leader := assignment.Replicas[0] 107 | 108 | leaderRack := b.brokerRacks[leader] 109 | leaderRackCounts[leaderRack]++ 110 | } 111 | 112 | var minRack, maxRack string 113 | var minCount, maxCount int 114 | 115 | // Don't iterate over map to ensure ordering is consistent 116 | for _, rack := range b.racks { 117 | count := leaderRackCounts[rack] 118 | 119 | if minRack == "" { 120 | minRack = rack 121 | maxRack = rack 122 | minCount = count 123 | maxCount = count 124 | } else { 125 | if count < minCount { 126 | minRack = rack 127 | minCount = count 128 | } 129 | if count > maxCount { 130 | maxRack = rack 131 | maxCount = count 132 | } 133 | } 134 | } 135 | 136 | // If everything is balanced, make sure min and max racks are the same 137 | if minCount == maxCount { 138 | maxRack = minRack 139 | } 140 | 141 | return minRack, maxRack 142 | } 143 | 144 | func (b *BalancedLeaderAssigner) replaceLeader( 145 | topic string, 146 | count int, 147 | curr []admin.PartitionAssignment, 148 | fromRack string, 149 | toRack string, 150 | ) error { 151 | // First get the partitions that have a leader in the from rack 152 | fromPartitions := []int{} 153 | 154 | for _, assignment := range curr { 155 | leader := assignment.Replicas[0] 156 | 157 | if b.brokerRacks[leader] == fromRack { 158 | fromPartitions = append(fromPartitions, assignment.ID) 159 | } 160 | } 161 | 162 | b.picker.SortRemovals( 163 | topic, 164 | fromPartitions, 165 | curr, 166 | 0, 167 | ) 168 | 169 | // Go through all of the leaders in descending order. 170 | for _, partition := range fromPartitions { 171 | assignment := curr[partition] 172 | leader := assignment.Replicas[0] 173 | 174 | for r, replica := range assignment.Replicas[1:] { 175 | if b.brokerRacks[replica] == toRack { 176 | // We can swap within the same partition. Do this 177 | // and exit. 178 | curr[partition].Replicas[0] = replica 179 | curr[partition].Replicas[r+1] = leader 180 | return nil 181 | } 182 | } 183 | } 184 | 185 | // We could not do an in-partition swap, so use the picker to replace the leader 186 | // of the highest ranked partition with a leader in the target rack. 187 | return b.picker.PickNew( 188 | topic, 189 | b.brokersPerRack[toRack], 190 | curr, 191 | fromPartitions[0], 192 | 0, 193 | ) 194 | } 195 | -------------------------------------------------------------------------------- /pkg/apply/assigners/cross_rack.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "fmt" 5 | "github.com/segmentio/topicctl/pkg/admin" 6 | "github.com/segmentio/topicctl/pkg/apply/pickers" 7 | "sort" 8 | ) 9 | 10 | // CrossRackAssigner is an assigner that ensures that the replicas of each 11 | // partition are on different racks than each other. The algorithm is: 12 | // 13 | // https://segment.atlassian.net/browse/DRES-922?focusedCommentId=237288 14 | // 15 | // for each partition: 16 | // for each non-leader replica: 17 | // if replica is in same rack as leader: 18 | // change replica to a placeholder (-1) 19 | // 20 | // then: 21 | // 22 | // for each partition: 23 | // for each non-leader replica: 24 | // if replica is set to placeholder: 25 | // use picker to replace it with a broker in a different rack than the leader and any other replicas 26 | // 27 | // Note that this assigner doesn't make any leader changes. Thus, the assignments 28 | // need to already be leader balanced before we make the changes with this assigner. 29 | type CrossRackAssigner struct { 30 | brokers []admin.BrokerInfo 31 | brokerRacks map[int]string 32 | brokersPerRack map[string][]int 33 | picker pickers.Picker 34 | } 35 | 36 | var _ Assigner = (*CrossRackAssigner)(nil) 37 | 38 | // NewCrossRackAssigner creates and returns a CrossRackAssigner instance. 39 | func NewCrossRackAssigner( 40 | brokers []admin.BrokerInfo, 41 | picker pickers.Picker, 42 | ) *CrossRackAssigner { 43 | return &CrossRackAssigner{ 44 | brokers: brokers, 45 | brokerRacks: admin.BrokerRacks(brokers), 46 | brokersPerRack: admin.BrokersPerRack(brokers), 47 | picker: picker, 48 | } 49 | } 50 | 51 | // Assign returns a new partition assignment according to the assigner-specific logic. 52 | func (s *CrossRackAssigner) Assign( 53 | topic string, 54 | curr []admin.PartitionAssignment, 55 | ) ([]admin.PartitionAssignment, error) { 56 | if err := admin.CheckAssignments(curr); err != nil { 57 | return nil, err 58 | } 59 | 60 | // Check to make sure that the number of racks is >= number of replicas. 61 | // Otherwise, we won't be able to find a feasible assignment. 62 | if len(s.brokersPerRack) < len(curr[0].Replicas) { 63 | return nil, fmt.Errorf("Do not have enough racks for cross-rack placement") 64 | } 65 | 66 | desired := admin.CopyAssignments(curr) 67 | 68 | // First, null-out any replicas that are in the wrong rack, and record the racks we keep 69 | usedRacksPerPartition := make([]map[string]bool, len(curr)) 70 | for index, assignment := range desired { 71 | usedRacks := make(map[string]bool, len(curr)) 72 | for r, replica := range assignment.Replicas { 73 | replicaRack := s.brokerRacks[replica] 74 | if _, used := usedRacks[replicaRack]; used { 75 | // Rack has already been seen, null it out since we need to replace it 76 | desired[index].Replicas[r] = -1 77 | } else { 78 | // First time using this rack for this partition 79 | usedRacks[replicaRack] = true 80 | } 81 | } 82 | usedRacksPerPartition[index] = usedRacks 83 | } 84 | 85 | // Which racks did we not use yet? 86 | availableRacksPerPartition := make([][]string, 0, len(curr)) 87 | for _, usedRacks := range usedRacksPerPartition { 88 | availableRacks := make(map[string]bool) 89 | for _, rack := range s.brokerRacks { 90 | if _, used := usedRacks[rack]; !used { 91 | availableRacks[rack] = true 92 | } 93 | } 94 | sortedRacks := make([]string, 0, len(availableRacks)) 95 | for r := range availableRacks { 96 | sortedRacks = append(sortedRacks, r) 97 | } 98 | sort.Strings(sortedRacks) 99 | availableRacksPerPartition = append(availableRacksPerPartition, sortedRacks) 100 | } 101 | 102 | // Then, go back and replace all of the marked replicas with replicas from available racks 103 | for index, assignment := range desired { 104 | for r, replica := range assignment.Replicas { 105 | if replica == -1 { 106 | // Pop the 1st rack off and pick one of the brokers in that rack 107 | targetRack, remainingRacks := availableRacksPerPartition[index][0], availableRacksPerPartition[index][1:] 108 | availableRacksPerPartition[index] = remainingRacks 109 | targetBrokers := s.brokersPerRack[targetRack] 110 | err := s.picker.PickNew( 111 | topic, 112 | targetBrokers, 113 | desired, 114 | index, 115 | r, 116 | ) 117 | if err != nil { 118 | return nil, err 119 | } 120 | } 121 | } 122 | } 123 | 124 | return desired, nil 125 | } 126 | -------------------------------------------------------------------------------- /pkg/apply/assigners/cross_rack_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/apply/pickers" 9 | "github.com/segmentio/topicctl/pkg/config" 10 | ) 11 | 12 | func TestCrossRackAssignerThreeReplicas(t *testing.T) { 13 | brokers := testBrokers(12, 3) 14 | assigner := NewCrossRackAssigner(brokers, pickers.NewLowestIndexPicker()) 15 | checker := func(result []admin.PartitionAssignment) bool { 16 | ok, _ := EvaluateAssignments( 17 | result, 18 | brokers, 19 | config.TopicPlacementConfig{ 20 | Strategy: config.PlacementStrategyCrossRack, 21 | }, 22 | ) 23 | return ok 24 | } 25 | 26 | testCases := []assignerTestCase{ 27 | { 28 | description: "Already cross rack", 29 | curr: [][]int{ 30 | {1, 2, 3}, 31 | {4, 5, 6}, 32 | {7, 8, 9}, 33 | }, 34 | expected: [][]int{ 35 | {1, 2, 3}, 36 | {4, 5, 6}, 37 | {7, 8, 9}, 38 | }, 39 | checker: checker, 40 | }, 41 | { 42 | description: "Single change", 43 | curr: [][]int{ 44 | {1, 2, 3}, 45 | {4, 5, 6}, 46 | {7, 4, 9}, 47 | }, 48 | expected: [][]int{ 49 | {1, 2, 3}, 50 | {4, 5, 6}, 51 | {7, 8, 9}, 52 | }, 53 | checker: checker, 54 | }, 55 | { 56 | description: "Multiple changes", 57 | curr: [][]int{ 58 | {1, 4, 7}, 59 | {2, 5, 8}, 60 | {9, 3, 11}, 61 | {8, 4, 11}, 62 | {10, 4, 11}, 63 | {5, 4, 11}, 64 | {7, 4, 11}, 65 | {3, 4, 11}, 66 | {12, 4, 11}, 67 | }, 68 | expected: [][]int{ 69 | {1, 2, 3}, 70 | {2, 1, 6}, 71 | // 2nd position should be replaced with different rack than other 2 replicas 72 | // 3rd position is a valid rack already, should be left alone 73 | {9, 7, 11}, 74 | // 2nd & 3rd both were invalid racks 75 | {8, 4, 9}, 76 | // 3rd was a valid rack 77 | {10, 3, 11}, 78 | {5, 4, 12}, 79 | {7, 6, 11}, 80 | {3, 4, 11}, 81 | {12, 4, 11}, 82 | }, 83 | checker: checker, 84 | }, 85 | { 86 | description: "Changes with all replicas", 87 | curr: [][]int{ 88 | {1, 4, 7}, 89 | {2, 5, 8}, 90 | {3, 6, 9}, 91 | {4, 7, 10}, 92 | {5, 8, 11}, 93 | {6, 9, 12}, 94 | {7, 10, 1}, 95 | {8, 11, 2}, 96 | {9, 12, 3}, 97 | {10, 1, 4}, 98 | {11, 2, 5}, 99 | {12, 3, 6}, 100 | }, 101 | expected: [][]int{ 102 | {1, 2, 3}, 103 | {2, 1, 6}, 104 | {3, 4, 2}, 105 | {4, 5, 9}, 106 | {5, 7, 12}, 107 | {6, 10, 5}, 108 | {7, 8, 3}, 109 | {8, 1, 6}, 110 | {9, 4, 8}, 111 | {10, 11, 9}, 112 | {11, 7, 12}, 113 | {12, 10, 11}, 114 | }, 115 | checker: checker, 116 | }, 117 | } 118 | 119 | for _, testCase := range testCases { 120 | testCase.evaluate(t, assigner) 121 | } 122 | } 123 | 124 | func TestCrossRackAssignerThreeReplicasRandomized(t *testing.T) { 125 | brokers := testBrokers(12, 3) 126 | assigner := NewCrossRackAssigner(brokers, pickers.NewRandomizedPicker()) 127 | checker := func(result []admin.PartitionAssignment) bool { 128 | ok, _ := EvaluateAssignments( 129 | result, 130 | brokers, 131 | config.TopicPlacementConfig{ 132 | Strategy: config.PlacementStrategyCrossRack, 133 | }, 134 | ) 135 | return ok 136 | } 137 | 138 | testCases := []assignerTestCase{ 139 | { 140 | description: "Already cross rack", 141 | curr: [][]int{ 142 | {1, 2, 3}, 143 | {4, 5, 6}, 144 | {7, 8, 9}, 145 | }, 146 | expected: [][]int{ 147 | {1, 2, 3}, 148 | {4, 5, 6}, 149 | {7, 8, 9}, 150 | }, 151 | checker: checker, 152 | }, 153 | { 154 | description: "Single change", 155 | curr: [][]int{ 156 | {1, 2, 3}, 157 | {4, 10, 6}, 158 | {7, 10, 9}, 159 | }, 160 | expected: [][]int{ 161 | {1, 2, 3}, 162 | {4, 11, 6}, 163 | {7, 5, 9}, 164 | }, 165 | checker: checker, 166 | }, 167 | { 168 | description: "Changes with all replicas", 169 | curr: [][]int{ 170 | {1, 4, 7}, 171 | {2, 5, 8}, 172 | {3, 6, 9}, 173 | {4, 7, 10}, 174 | {5, 8, 11}, 175 | {6, 9, 12}, 176 | {7, 10, 1}, 177 | {8, 11, 2}, 178 | {9, 12, 3}, 179 | {10, 1, 4}, 180 | {11, 2, 5}, 181 | {12, 3, 6}, 182 | }, 183 | expected: [][]int{ 184 | {1, 2, 6}, 185 | {2, 10, 9}, 186 | {3, 1, 8}, 187 | {4, 5, 3}, 188 | {5, 4, 12}, 189 | {6, 7, 11}, 190 | {7, 8, 9}, 191 | {8, 10, 12}, 192 | {9, 1, 5}, 193 | {10, 11, 3}, 194 | {11, 4, 6}, 195 | {12, 7, 2}, 196 | }, 197 | checker: checker, 198 | }, 199 | } 200 | 201 | for _, testCase := range testCases { 202 | testCase.evaluate(t, assigner) 203 | } 204 | } 205 | 206 | func TestCrossRackAssignerTwoReplicas(t *testing.T) { 207 | brokers := testBrokers(6, 3) 208 | assigner := NewCrossRackAssigner(brokers, pickers.NewLowestIndexPicker()) 209 | checker := func(result []admin.PartitionAssignment) bool { 210 | ok, _ := EvaluateAssignments( 211 | result, 212 | brokers, 213 | config.TopicPlacementConfig{ 214 | Strategy: config.PlacementStrategyCrossRack, 215 | }, 216 | ) 217 | return ok 218 | } 219 | 220 | testCases := []assignerTestCase{ 221 | { 222 | description: "Already cross rack", 223 | curr: [][]int{ 224 | {1, 2}, 225 | {3, 4}, 226 | {5, 6}, 227 | }, 228 | expected: [][]int{ 229 | {1, 2}, 230 | {3, 4}, 231 | {5, 6}, 232 | }, 233 | checker: checker, 234 | }, 235 | { 236 | description: "Error due to more replicas than racks", 237 | curr: [][]int{ 238 | {1, 2, 3, 4}, 239 | {5, 6, 1, 2}, 240 | }, 241 | err: errors.New("more replicas than racks"), 242 | }, 243 | } 244 | 245 | for _, testCase := range testCases { 246 | testCase.evaluate(t, assigner) 247 | } 248 | } -------------------------------------------------------------------------------- /pkg/apply/assigners/evaluate.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/config" 9 | ) 10 | 11 | // EvaluateAssignments determines whether the given assignments are consistent 12 | // with the provided placement strategy. 13 | func EvaluateAssignments( 14 | assignments []admin.PartitionAssignment, 15 | brokers []admin.BrokerInfo, 16 | placementConfig config.TopicPlacementConfig, 17 | ) (bool, error) { 18 | if err := admin.CheckAssignments(assignments); err != nil { 19 | return false, err 20 | } 21 | 22 | minRacks, maxRacks, leaderRackCounts := minMaxRacks(assignments, brokers) 23 | balanced := balancedLeaders(leaderRackCounts) 24 | 25 | switch placementConfig.Strategy { 26 | case config.PlacementStrategyAny: 27 | return true, nil 28 | case config.PlacementStrategyStatic: 29 | replicas, err := admin.AssignmentsToReplicas(assignments) 30 | if err != nil { 31 | return false, err 32 | } 33 | return reflect.DeepEqual( 34 | replicas, 35 | placementConfig.StaticAssignments, 36 | ), nil 37 | case config.PlacementStrategyStaticInRack: 38 | if !(minRacks == 1 && maxRacks == 1) { 39 | return false, nil 40 | } 41 | if len(placementConfig.StaticRackAssignments) != len(assignments) { 42 | return false, nil 43 | } 44 | 45 | brokerRacks := admin.BrokerRacks(brokers) 46 | 47 | for a, assignment := range assignments { 48 | partitionRack := brokerRacks[assignment.Replicas[0]] 49 | expectedRack := placementConfig.StaticRackAssignments[a] 50 | if partitionRack != expectedRack { 51 | return false, nil 52 | } 53 | } 54 | 55 | return true, nil 56 | case config.PlacementStrategyBalancedLeaders: 57 | return balanced, nil 58 | case config.PlacementStrategyInRack: 59 | return minRacks == 1 && maxRacks == 1, nil 60 | case config.PlacementStrategyCrossRack: 61 | brokerRacks := admin.BrokerRacks(brokers) 62 | for _, assignment := range assignments { 63 | if len(assignment.Replicas) != len(assignment.DistinctRacks(brokerRacks)) { 64 | return false, nil 65 | } 66 | } 67 | return true, nil 68 | default: 69 | return false, fmt.Errorf( 70 | "Unrecognized placementStrategy: %s", 71 | placementConfig.Strategy, 72 | ) 73 | } 74 | } 75 | 76 | func balancedLeaders(leaderRackCounts map[string]int) bool { 77 | var minCount, maxCount int 78 | first := true 79 | 80 | for _, count := range leaderRackCounts { 81 | if first { 82 | minCount = count 83 | maxCount = count 84 | first = false 85 | } else { 86 | if count < minCount { 87 | minCount = count 88 | } 89 | if count > maxCount { 90 | maxCount = count 91 | } 92 | } 93 | } 94 | 95 | return minCount == maxCount 96 | } 97 | 98 | func minMaxRacks( 99 | assignments []admin.PartitionAssignment, 100 | brokers []admin.BrokerInfo, 101 | ) (int, int, map[string]int) { 102 | brokerRacks := admin.BrokerRacks(brokers) 103 | racks := admin.DistinctRacks(brokers) 104 | 105 | leaderRackCounts := map[string]int{} 106 | 107 | for _, rack := range racks { 108 | leaderRackCounts[rack] = 0 109 | } 110 | 111 | var minRacks, maxRacks int 112 | 113 | for a, assignment := range assignments { 114 | leader := assignment.Replicas[0] 115 | leaderRackCounts[brokerRacks[leader]]++ 116 | 117 | racks := len(assignment.DistinctRacks(brokerRacks)) 118 | 119 | if a == 0 { 120 | minRacks = racks 121 | maxRacks = racks 122 | } else { 123 | if racks < minRacks { 124 | minRacks = racks 125 | } 126 | if racks > maxRacks { 127 | maxRacks = racks 128 | } 129 | } 130 | } 131 | 132 | return minRacks, maxRacks, leaderRackCounts 133 | } 134 | -------------------------------------------------------------------------------- /pkg/apply/assigners/evaluate_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/config" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestEvaluateAssignmentsNonStatic(t *testing.T) { 12 | brokers := testBrokers(12, 3) 13 | 14 | type evaluateTestCase struct { 15 | replicaSlices [][]int 16 | expectedResults map[config.PlacementStrategy]bool 17 | expectedErr map[config.PlacementStrategy]bool 18 | } 19 | 20 | testCases := []evaluateTestCase{ 21 | { 22 | replicaSlices: [][]int{ 23 | {1, 2, 3}, 24 | {4, 5, 6}, 25 | {6, 7, 8}, 26 | }, 27 | expectedResults: map[config.PlacementStrategy]bool{ 28 | config.PlacementStrategyAny: true, 29 | config.PlacementStrategyStatic: false, 30 | config.PlacementStrategyStaticInRack: false, 31 | config.PlacementStrategyBalancedLeaders: false, 32 | config.PlacementStrategyInRack: false, 33 | }, 34 | }, 35 | { 36 | // Matches static assignments set in test run loop below 37 | replicaSlices: [][]int{ 38 | {1, 2, 3}, 39 | {4, 5, 6}, 40 | {6, 7, 1}, 41 | }, 42 | expectedResults: map[config.PlacementStrategy]bool{ 43 | config.PlacementStrategyAny: true, 44 | config.PlacementStrategyStatic: true, 45 | config.PlacementStrategyStaticInRack: false, 46 | config.PlacementStrategyBalancedLeaders: false, 47 | config.PlacementStrategyInRack: false, 48 | }, 49 | }, 50 | { 51 | // Matches static assignments set in test run loop below 52 | replicaSlices: [][]int{ 53 | {1, 2, 3}, 54 | {4, 5, 6}, 55 | {6, 7, 1}, 56 | }, 57 | expectedResults: map[config.PlacementStrategy]bool{ 58 | config.PlacementStrategyAny: true, 59 | config.PlacementStrategyStatic: true, 60 | config.PlacementStrategyStaticInRack: false, 61 | config.PlacementStrategyBalancedLeaders: false, 62 | config.PlacementStrategyInRack: false, 63 | }, 64 | }, 65 | { 66 | // Matches static racks set in test run loop below 67 | replicaSlices: [][]int{ 68 | {1, 4, 7}, 69 | {2, 5, 8}, 70 | {3, 6, 9}, 71 | {4, 7, 10}, 72 | }, 73 | expectedResults: map[config.PlacementStrategy]bool{ 74 | config.PlacementStrategyAny: true, 75 | config.PlacementStrategyStatic: false, 76 | config.PlacementStrategyStaticInRack: true, 77 | config.PlacementStrategyBalancedLeaders: false, 78 | config.PlacementStrategyInRack: true, 79 | }, 80 | }, 81 | { 82 | replicaSlices: [][]int{ 83 | {1, 4, 6}, 84 | {1, 4, 6}, 85 | {1, 4, 6}, 86 | }, 87 | expectedResults: map[config.PlacementStrategy]bool{ 88 | config.PlacementStrategyAny: true, 89 | config.PlacementStrategyStatic: false, 90 | config.PlacementStrategyStaticInRack: false, 91 | config.PlacementStrategyBalancedLeaders: false, 92 | config.PlacementStrategyInRack: false, 93 | }, 94 | }, 95 | { 96 | replicaSlices: [][]int{ 97 | {1, 4, 7}, 98 | {2, 5, 8}, 99 | {3, 6, 9}, 100 | {1, 4, 7}, 101 | {2, 5, 8}, 102 | {3, 6, 9}, 103 | }, 104 | expectedResults: map[config.PlacementStrategy]bool{ 105 | config.PlacementStrategyAny: true, 106 | config.PlacementStrategyStatic: false, 107 | config.PlacementStrategyStaticInRack: false, 108 | config.PlacementStrategyBalancedLeaders: true, 109 | config.PlacementStrategyInRack: true, 110 | }, 111 | }, 112 | { 113 | replicaSlices: [][]int{ 114 | {1, 4, 7}, 115 | {2, 5, 8}, 116 | }, 117 | expectedResults: map[config.PlacementStrategy]bool{ 118 | config.PlacementStrategyAny: true, 119 | config.PlacementStrategyStatic: false, 120 | config.PlacementStrategyStaticInRack: false, 121 | config.PlacementStrategyBalancedLeaders: false, 122 | config.PlacementStrategyInRack: true, 123 | }, 124 | }, 125 | { 126 | replicaSlices: [][]int{ 127 | {1, 1, 1}, 128 | }, 129 | expectedResults: map[config.PlacementStrategy]bool{ 130 | config.PlacementStrategyAny: false, 131 | }, 132 | expectedErr: map[config.PlacementStrategy]bool{ 133 | config.PlacementStrategyAny: true, 134 | }, 135 | }, 136 | } 137 | 138 | for _, testCase := range testCases { 139 | for strategy, expectedResult := range testCase.expectedResults { 140 | result, err := EvaluateAssignments( 141 | admin.ReplicasToAssignments(testCase.replicaSlices), 142 | brokers, 143 | config.TopicPlacementConfig{ 144 | Strategy: strategy, 145 | StaticAssignments: [][]int{ 146 | {1, 2, 3}, 147 | {4, 5, 6}, 148 | {6, 7, 1}, 149 | }, 150 | StaticRackAssignments: []string{ 151 | "zone1", 152 | "zone2", 153 | "zone3", 154 | "zone1", 155 | }, 156 | }, 157 | ) 158 | if testCase.expectedErr[strategy] { 159 | assert.Error(t, err) 160 | } else { 161 | assert.NoError(t, err) 162 | assert.Equal( 163 | t, 164 | expectedResult, 165 | result, 166 | "Input: %+v, strategy: %s", 167 | testCase.replicaSlices, 168 | strategy, 169 | ) 170 | } 171 | } 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /pkg/apply/assigners/single_rack.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | ) 9 | 10 | // SingleRackAssigner is an assigner that ensures that the replicas of each 11 | // partition are in the same rack as the leader. The algorithm is: 12 | // 13 | // for each partition: 14 | // for each non-leader replica: 15 | // if replica not in same rack as leader: 16 | // change replica to a placeholder (-1) 17 | // 18 | // then: 19 | // 20 | // for each partition: 21 | // for each non-leader replica: 22 | // if replica is set to placeholder: 23 | // use picker to replace it with a broker in the target rack 24 | // 25 | // Note that this assigner doesn't make any leader changes. Thus, the assignments 26 | // need to already be leader balanced before we make the changes with this assigner. 27 | type SingleRackAssigner struct { 28 | brokers []admin.BrokerInfo 29 | brokerRacks map[int]string 30 | brokersPerRack map[string][]int 31 | picker pickers.Picker 32 | } 33 | 34 | var _ Assigner = (*SingleRackAssigner)(nil) 35 | 36 | // NewSingleRackAssigner creates and returns a SingleRackAssigner instance. 37 | func NewSingleRackAssigner( 38 | brokers []admin.BrokerInfo, 39 | picker pickers.Picker, 40 | ) *SingleRackAssigner { 41 | return &SingleRackAssigner{ 42 | brokers: brokers, 43 | brokerRacks: admin.BrokerRacks(brokers), 44 | brokersPerRack: admin.BrokersPerRack(brokers), 45 | picker: picker, 46 | } 47 | } 48 | 49 | // Assign returns a new partition assignment according to the assigner-specific logic. 50 | func (s *SingleRackAssigner) Assign( 51 | topic string, 52 | curr []admin.PartitionAssignment, 53 | ) ([]admin.PartitionAssignment, error) { 54 | if err := admin.CheckAssignments(curr); err != nil { 55 | return nil, err 56 | } 57 | 58 | // Check to make sure that the number of brokers in each rack is >= number of 59 | // replicas. Otherwise, we won't be able to find a feasible assignment. 60 | for rack, brokers := range s.brokersPerRack { 61 | if len(brokers) < len(curr[0].Replicas) { 62 | return nil, fmt.Errorf( 63 | "Rack %s does not have enough brokers for in-rack placement", 64 | rack, 65 | ) 66 | } 67 | } 68 | 69 | desired := admin.CopyAssignments(curr) 70 | 71 | // First, null-out any replicas that are in the wrong rack 72 | for index, assignment := range desired { 73 | leader := assignment.Replicas[0] 74 | leaderRack := s.brokerRacks[leader] 75 | 76 | for r, replica := range assignment.Replicas { 77 | replicaRack := s.brokerRacks[replica] 78 | 79 | if replicaRack != leaderRack { 80 | desired[index].Replicas[r] = -1 81 | } 82 | } 83 | } 84 | 85 | // Then, go back and replace all of the marked replicas 86 | for index, assignment := range desired { 87 | leader := assignment.Replicas[0] 88 | leaderRack := s.brokerRacks[leader] 89 | 90 | for r, replica := range assignment.Replicas { 91 | if replica == -1 { 92 | err := s.picker.PickNew( 93 | topic, 94 | s.brokersPerRack[leaderRack], 95 | desired, 96 | index, 97 | r, 98 | ) 99 | if err != nil { 100 | return nil, err 101 | } 102 | } 103 | } 104 | } 105 | 106 | return desired, nil 107 | } 108 | -------------------------------------------------------------------------------- /pkg/apply/assigners/single_rack_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | "github.com/segmentio/topicctl/pkg/config" 9 | ) 10 | 11 | func TestSingleRackAssignerThreeReplicas(t *testing.T) { 12 | brokers := testBrokers(12, 3) 13 | assigner := NewSingleRackAssigner(brokers, pickers.NewLowestIndexPicker()) 14 | checker := func(result []admin.PartitionAssignment) bool { 15 | ok, _ := EvaluateAssignments( 16 | result, 17 | brokers, 18 | config.TopicPlacementConfig{ 19 | Strategy: config.PlacementStrategyInRack, 20 | }, 21 | ) 22 | return ok 23 | } 24 | 25 | testCases := []assignerTestCase{ 26 | { 27 | description: "Already single rack per partition", 28 | curr: [][]int{ 29 | {1, 4, 7}, 30 | {2, 5, 8}, 31 | {3, 6, 9}, 32 | }, 33 | expected: [][]int{ 34 | {1, 4, 7}, 35 | {2, 5, 8}, 36 | {3, 6, 9}, 37 | }, 38 | checker: checker, 39 | }, 40 | { 41 | description: "Single change", 42 | curr: [][]int{ 43 | {1, 4, 7}, 44 | {2, 5, 8}, 45 | {3, 5, 9}, 46 | }, 47 | expected: [][]int{ 48 | {1, 4, 7}, 49 | {2, 5, 8}, 50 | {3, 6, 9}, 51 | }, 52 | checker: checker, 53 | }, 54 | { 55 | description: "Multiple changes", 56 | curr: [][]int{ 57 | {1, 4, 7}, 58 | {2, 5, 8}, 59 | // Multi-rack 60 | {3, 5, 9}, 61 | // Multi-rack 62 | {1, 3, 8}, 63 | {2, 5, 8}, 64 | // Multi-rack 65 | {3, 5, 9}, 66 | {1, 4, 7}, 67 | {2, 5, 8}, 68 | // Multi-rack 69 | {3, 5, 9}, 70 | }, 71 | expected: [][]int{ 72 | {1, 4, 7}, 73 | {2, 5, 8}, 74 | {3, 6, 9}, 75 | // Skip over broker 4 for second position since it's already used 76 | {1, 7, 4}, 77 | {2, 5, 8}, 78 | // Skip over broker 6 since it's already been used in this position 79 | {3, 12, 9}, 80 | {1, 4, 7}, 81 | {2, 5, 8}, 82 | // Then, come back to broker 6 83 | {3, 6, 9}, 84 | }, 85 | checker: checker, 86 | }, 87 | } 88 | 89 | for _, testCase := range testCases { 90 | testCase.evaluate(t, assigner) 91 | } 92 | } 93 | 94 | func TestSingleRackAssignerThreeReplicasRandomized(t *testing.T) { 95 | brokers := testBrokers(12, 3) 96 | assigner := NewSingleRackAssigner(brokers, pickers.NewRandomizedPicker()) 97 | checker := func(result []admin.PartitionAssignment) bool { 98 | ok, _ := EvaluateAssignments( 99 | result, 100 | brokers, 101 | config.TopicPlacementConfig{ 102 | Strategy: config.PlacementStrategyInRack, 103 | }, 104 | ) 105 | return ok 106 | } 107 | 108 | testCases := []assignerTestCase{ 109 | { 110 | description: "Single change", 111 | topic: "test-topic1", 112 | curr: [][]int{ 113 | {1, 4, 7}, 114 | {2, 5, 8}, 115 | {3, 5, 9}, 116 | }, 117 | expected: [][]int{ 118 | {1, 4, 7}, 119 | {2, 5, 8}, 120 | {3, 12, 9}, 121 | }, 122 | checker: checker, 123 | }, 124 | { 125 | description: "Multiple changes", 126 | topic: "test-topic3", 127 | curr: [][]int{ 128 | {1, 4, 7}, 129 | {2, 5, 8}, 130 | // Multi-rack 131 | {3, 5, 9}, 132 | // Multi-rack 133 | {1, 3, 8}, 134 | {2, 5, 8}, 135 | // Multi-rack 136 | {3, 5, 9}, 137 | {1, 4, 7}, 138 | {2, 5, 8}, 139 | // Multi-rack 140 | {3, 5, 9}, 141 | }, 142 | expected: [][]int{ 143 | {1, 4, 7}, 144 | {2, 5, 8}, 145 | {3, 12, 9}, 146 | // Skip over broker 4 for second position since it's already used 147 | {1, 10, 4}, 148 | {2, 5, 8}, 149 | // Skip over broker 6 since it's already been used in this position 150 | {3, 6, 9}, 151 | {1, 4, 7}, 152 | {2, 5, 8}, 153 | // Then, come back to either broker 6 or 12 154 | {3, 6, 9}, 155 | }, 156 | checker: checker, 157 | }, 158 | } 159 | 160 | for _, testCase := range testCases { 161 | testCase.evaluate(t, assigner) 162 | } 163 | } 164 | 165 | func TestSingleRackAssignerTwoReplicas(t *testing.T) { 166 | brokers := testBrokers(6, 3) 167 | assigner := NewSingleRackAssigner(brokers, pickers.NewLowestIndexPicker()) 168 | checker := func(result []admin.PartitionAssignment) bool { 169 | ok, _ := EvaluateAssignments( 170 | result, 171 | brokers, 172 | config.TopicPlacementConfig{ 173 | Strategy: config.PlacementStrategyInRack, 174 | }, 175 | ) 176 | return ok 177 | } 178 | 179 | testCases := []assignerTestCase{ 180 | { 181 | description: "Already single rack per partition", 182 | curr: [][]int{ 183 | {1, 4}, 184 | {2, 5}, 185 | {3, 6}, 186 | }, 187 | expected: [][]int{ 188 | {1, 4}, 189 | {2, 5}, 190 | {3, 6}, 191 | }, 192 | checker: checker, 193 | }, 194 | { 195 | description: "Error", 196 | curr: [][]int{ 197 | {1, 2}, 198 | {3, 2}, 199 | {5, 3}, 200 | {6, 5}, 201 | {1, 6}, 202 | {2, 6}, 203 | {1, 3}, 204 | {5, 3}, 205 | {6, 5}, 206 | }, 207 | expected: [][]int{ 208 | {1, 4}, 209 | {3, 6}, 210 | {5, 2}, 211 | {6, 3}, 212 | {1, 4}, 213 | {2, 5}, 214 | {1, 4}, 215 | {5, 2}, 216 | {6, 3}, 217 | }, 218 | checker: checker, 219 | }, 220 | } 221 | 222 | for _, testCase := range testCases { 223 | testCase.evaluate(t, assigner) 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /pkg/apply/assigners/static.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import "github.com/segmentio/topicctl/pkg/admin" 4 | 5 | // StaticAssigner is an Assigner that ignores the current state and assigns 6 | // based on the value of the Assignments field. Generally intended for 7 | // testing purposes. 8 | type StaticAssigner struct { 9 | Assignments []admin.PartitionAssignment 10 | } 11 | 12 | var _ Assigner = (*StaticAssigner)(nil) 13 | 14 | // Assign returns a new partition assignment according to the assigner-specific logic. 15 | func (s *StaticAssigner) Assign( 16 | topic string, 17 | curr []admin.PartitionAssignment, 18 | ) ([]admin.PartitionAssignment, error) { 19 | if err := admin.CheckAssignments(curr); err != nil { 20 | return nil, err 21 | } 22 | return s.Assignments, nil 23 | } 24 | -------------------------------------------------------------------------------- /pkg/apply/assigners/static_single_rack.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | ) 9 | 10 | // StaticSingleRackAssigner is an Assigner that assigns replicas within a static rack per 11 | // partition. This might be useful for cases where we need particular partitions in particular 12 | // racks, but don't care about the per-replica placement (assuming that the rack is ok). 13 | // 14 | // The following algorithm is used: 15 | // 16 | // for each partition: 17 | // for each replica: 18 | // if replica not in the desired (static) rack: 19 | // change the replica to a placeholder (-1) 20 | // 21 | // then: 22 | // 23 | // for each partition: 24 | // for each replica: 25 | // if replica set to the placeholder: 26 | // use picker to pick a broker from the set of all brokers in the target rack 27 | // 28 | // In the case of ties, the lowest indexed broker is picked (if randomize is false) or 29 | // a repeatably random choice (if randomize is true). 30 | type StaticSingleRackAssigner struct { 31 | rackAssignments []string 32 | brokers []admin.BrokerInfo 33 | brokerRacks map[int]string 34 | brokersPerRack map[string][]int 35 | picker pickers.Picker 36 | } 37 | 38 | var _ Assigner = (*StaticSingleRackAssigner)(nil) 39 | 40 | // NewStaticSingleRackAssigner returns a new StaticSingleRackAssigner instance. 41 | func NewStaticSingleRackAssigner( 42 | brokers []admin.BrokerInfo, 43 | rackAssignments []string, 44 | picker pickers.Picker, 45 | ) *StaticSingleRackAssigner { 46 | return &StaticSingleRackAssigner{ 47 | rackAssignments: rackAssignments, 48 | brokers: brokers, 49 | brokerRacks: admin.BrokerRacks(brokers), 50 | brokersPerRack: admin.BrokersPerRack(brokers), 51 | picker: picker, 52 | } 53 | } 54 | 55 | // Assign returns a new partition assignment according to the assigner-specific logic. 56 | func (s *StaticSingleRackAssigner) Assign( 57 | topic string, 58 | curr []admin.PartitionAssignment, 59 | ) ([]admin.PartitionAssignment, error) { 60 | if err := admin.CheckAssignments(curr); err != nil { 61 | return nil, err 62 | } 63 | 64 | // Check to make sure that the static assignments are valid racks and that there are enough 65 | // brokers per rack. 66 | for _, rack := range s.rackAssignments { 67 | rackCount := len(s.brokersPerRack[rack]) 68 | 69 | if rackCount == 0 { 70 | return nil, fmt.Errorf("Could not find any brokers for rack %s", rack) 71 | } else if rackCount < len(curr[0].Replicas) { 72 | return nil, fmt.Errorf( 73 | "Rack %s does not have enough brokers for in-rack placement", 74 | rack, 75 | ) 76 | } 77 | } 78 | 79 | desired := admin.CopyAssignments(curr) 80 | 81 | // First, null-out any replicas that are in the wrong rack 82 | for i := 0; i < len(desired); i++ { 83 | targetRack := s.rackAssignments[i] 84 | 85 | for j := 0; j < len(desired[i].Replicas); j++ { 86 | replica := desired[i].Replicas[j] 87 | 88 | if s.brokerRacks[replica] != targetRack { 89 | desired[i].Replicas[j] = -1 90 | } 91 | } 92 | } 93 | 94 | // Then, go back and replace all of the marked replicas 95 | for i := 0; i < len(desired); i++ { 96 | targetRack := s.rackAssignments[i] 97 | 98 | for j := 0; j < len(desired[i].Replicas); j++ { 99 | if desired[i].Replicas[j] == -1 { 100 | err := s.picker.PickNew( 101 | topic, 102 | s.brokersPerRack[targetRack], 103 | desired, 104 | i, 105 | j, 106 | ) 107 | if err != nil { 108 | return nil, err 109 | } 110 | } 111 | } 112 | } 113 | 114 | return desired, nil 115 | } 116 | -------------------------------------------------------------------------------- /pkg/apply/assigners/static_single_rack_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | "github.com/segmentio/topicctl/pkg/config" 9 | ) 10 | 11 | func TestStaticSingleRackAssignerThreePartitions(t *testing.T) { 12 | brokers := testBrokers(12, 3) 13 | 14 | staticAssignments := []string{ 15 | "zone1", 16 | "zone2", 17 | "zone3", 18 | } 19 | 20 | assigner := NewStaticSingleRackAssigner( 21 | brokers, 22 | staticAssignments, 23 | pickers.NewLowestIndexPicker(), 24 | ) 25 | checker := func(result []admin.PartitionAssignment) bool { 26 | ok, _ := EvaluateAssignments( 27 | result, 28 | brokers, 29 | config.TopicPlacementConfig{ 30 | Strategy: config.PlacementStrategyStaticInRack, 31 | StaticRackAssignments: staticAssignments, 32 | }, 33 | ) 34 | return ok 35 | } 36 | 37 | testCases := []assignerTestCase{ 38 | { 39 | description: "Already correct racks", 40 | curr: [][]int{ 41 | {1, 4, 7}, 42 | {2, 5, 8}, 43 | {3, 6, 9}, 44 | }, 45 | expected: [][]int{ 46 | {1, 4, 7}, 47 | {2, 5, 8}, 48 | {3, 6, 9}, 49 | }, 50 | checker: checker, 51 | }, 52 | { 53 | description: "Single partition change", 54 | curr: [][]int{ 55 | {1, 4, 7}, 56 | {2, 5, 8}, 57 | {4, 5, 9}, 58 | }, 59 | expected: [][]int{ 60 | {1, 4, 7}, 61 | {2, 5, 8}, 62 | {3, 6, 9}, 63 | }, 64 | checker: checker, 65 | }, 66 | } 67 | 68 | for _, testCase := range testCases { 69 | testCase.evaluate(t, assigner) 70 | } 71 | } 72 | 73 | func TestStaticSingleRackAssignerNinePartitions(t *testing.T) { 74 | brokers := testBrokers(12, 3) 75 | 76 | staticAssignments := []string{ 77 | "zone1", 78 | "zone2", 79 | "zone3", 80 | "zone1", 81 | "zone2", 82 | "zone3", 83 | "zone1", 84 | "zone2", 85 | "zone3", 86 | } 87 | 88 | assigner := NewStaticSingleRackAssigner( 89 | brokers, 90 | staticAssignments, 91 | pickers.NewLowestIndexPicker(), 92 | ) 93 | checker := func(result []admin.PartitionAssignment) bool { 94 | ok, _ := EvaluateAssignments( 95 | result, 96 | brokers, 97 | config.TopicPlacementConfig{ 98 | Strategy: config.PlacementStrategyStaticInRack, 99 | StaticRackAssignments: staticAssignments, 100 | }, 101 | ) 102 | return ok 103 | } 104 | 105 | testCases := []assignerTestCase{ 106 | { 107 | description: "Multiple changes", 108 | curr: [][]int{ 109 | {1, 4, 7}, 110 | // Multi-rack 111 | {3, 5, 8}, 112 | // Multi-rack 113 | {3, 5, 9}, 114 | // Multi-rack 115 | {1, 3, 8}, 116 | {2, 5, 8}, 117 | // Multi-rack 118 | {3, 5, 9}, 119 | {1, 4, 7}, 120 | {2, 5, 8}, 121 | // Multi-rack 122 | {3, 5, 9}, 123 | }, 124 | expected: [][]int{ 125 | {1, 4, 7}, 126 | // Skip over broker 2 for this position because it's already a leader for 127 | // other partitions, and broker 5 and 8 because they're already in this partition 128 | {11, 5, 8}, 129 | {3, 6, 9}, 130 | // Skip over broker 4 for second position since it's already used 131 | {1, 7, 4}, 132 | {2, 5, 8}, 133 | // Skip over broker 6 since it's already been used in this position 134 | {3, 12, 9}, 135 | {1, 4, 7}, 136 | {2, 5, 8}, 137 | // Then, come back to broker 6 138 | {3, 6, 9}, 139 | }, 140 | checker: checker, 141 | }, 142 | } 143 | 144 | for _, testCase := range testCases { 145 | testCase.evaluate(t, assigner) 146 | } 147 | } 148 | 149 | func TestStaticSingleRackAssignerNinePartitionsRandomized(t *testing.T) { 150 | brokers := testBrokers(12, 3) 151 | 152 | staticAssignments := []string{ 153 | "zone1", 154 | "zone2", 155 | "zone3", 156 | "zone1", 157 | "zone2", 158 | "zone3", 159 | "zone1", 160 | "zone2", 161 | "zone3", 162 | } 163 | 164 | assigner := NewStaticSingleRackAssigner( 165 | brokers, 166 | staticAssignments, 167 | pickers.NewRandomizedPicker(), 168 | ) 169 | checker := func(result []admin.PartitionAssignment) bool { 170 | ok, _ := EvaluateAssignments( 171 | result, 172 | brokers, 173 | config.TopicPlacementConfig{ 174 | Strategy: config.PlacementStrategyStaticInRack, 175 | StaticRackAssignments: staticAssignments, 176 | }, 177 | ) 178 | return ok 179 | } 180 | 181 | testCases := []assignerTestCase{ 182 | { 183 | description: "Multiple changes", 184 | curr: [][]int{ 185 | {1, 4, 7}, 186 | // Multi-rack 187 | {3, 5, 8}, 188 | // Multi-rack 189 | {3, 5, 9}, 190 | // Multi-rack 191 | {1, 3, 8}, 192 | {2, 5, 8}, 193 | // Multi-rack 194 | {3, 5, 9}, 195 | {1, 4, 7}, 196 | {2, 5, 8}, 197 | // Multi-rack 198 | {3, 5, 9}, 199 | }, 200 | expected: [][]int{ 201 | {1, 4, 7}, 202 | // Skip over broker 2 for this position because it's already a leader for 203 | // other partitions, and broker 5 and 8 because they're already in this partition 204 | {11, 5, 8}, 205 | {3, 6, 9}, 206 | // Skip over broker 4 for second position since it's already used 207 | {1, 10, 4}, 208 | {2, 5, 8}, 209 | // Skip over broker 6 since it's already been used in this position 210 | {3, 12, 9}, 211 | {1, 4, 7}, 212 | {2, 5, 8}, 213 | {3, 12, 9}, 214 | }, 215 | checker: checker, 216 | }, 217 | } 218 | 219 | for _, testCase := range testCases { 220 | testCase.evaluate(t, assigner) 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /pkg/apply/assigners/static_test.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | ) 8 | 9 | func TestStaticAssigner(t *testing.T) { 10 | assigner := &StaticAssigner{ 11 | Assignments: admin.ReplicasToAssignments( 12 | [][]int{ 13 | {1, 2, 3}, 14 | {3, 4, 5}, 15 | {5, 6, 7}, 16 | }, 17 | ), 18 | } 19 | 20 | testCases := []assignerTestCase{ 21 | { 22 | curr: [][]int{ 23 | {1, 2, 3}, 24 | {2, 4, 5}, 25 | }, 26 | expected: [][]int{ 27 | {1, 2, 3}, 28 | {3, 4, 5}, 29 | {5, 6, 7}, 30 | }, 31 | }, 32 | } 33 | 34 | for _, testCase := range testCases { 35 | testCase.evaluate(t, assigner) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pkg/apply/assigners/testing.go: -------------------------------------------------------------------------------- 1 | package assigners 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | type assignerTestCase struct { 13 | description string 14 | topic string 15 | curr [][]int 16 | expected [][]int 17 | checker func([]admin.PartitionAssignment) bool 18 | err error 19 | } 20 | 21 | func (a assignerTestCase) evaluate(t *testing.T, assigner Assigner) { 22 | desired, err := assigner.Assign( 23 | a.topic, 24 | admin.ReplicasToAssignments(a.curr), 25 | ) 26 | if a.err != nil { 27 | require.NotNil(t, err, a.description) 28 | } else { 29 | require.Nil(t, err, a.description) 30 | 31 | replicas, err := admin.AssignmentsToReplicas(desired) 32 | require.NoError(t, err) 33 | 34 | assert.NoError(t, admin.CheckAssignments(desired), a.description) 35 | assert.Equal( 36 | t, 37 | a.expected, 38 | replicas, 39 | a.description, 40 | ) 41 | if a.checker != nil { 42 | assert.True(t, a.checker(desired), a.description) 43 | } 44 | } 45 | } 46 | 47 | func testBrokers(numBrokers int, numRacks int) []admin.BrokerInfo { 48 | brokers := []admin.BrokerInfo{} 49 | 50 | for b := 0; b < numBrokers; b++ { 51 | brokers = append( 52 | brokers, 53 | admin.BrokerInfo{ 54 | ID: b + 1, 55 | Rack: fmt.Sprintf("zone%d", (b%numRacks)+1), 56 | }, 57 | ) 58 | } 59 | 60 | return brokers 61 | } 62 | -------------------------------------------------------------------------------- /pkg/apply/extenders/balanced.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/pickers" 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // BalancedExtender adds extra partition assignments in a "balanced" way. The current 12 | // algorithm is: 13 | // 14 | // for each new partition: 15 | // set the leader rack to the next rack in the cycle 16 | // choose the leader using the picker 17 | // for each follower: 18 | // set the rack to either the same one as the leader (if inRack true) or the next one in the 19 | // cycle (if inRack false) 20 | // pick the follower using the picker 21 | type BalancedExtender struct { 22 | brokers []admin.BrokerInfo 23 | inRack bool 24 | picker pickers.Picker 25 | racks []string 26 | brokerRacks map[int]string 27 | brokersPerRack map[string][]int 28 | } 29 | 30 | var _ Extender = (*BalancedExtender)(nil) 31 | 32 | // NewBalancedExtender returns a new BalancedExtender instance. 33 | func NewBalancedExtender( 34 | brokers []admin.BrokerInfo, 35 | inRack bool, 36 | picker pickers.Picker, 37 | ) *BalancedExtender { 38 | return &BalancedExtender{ 39 | brokers: brokers, 40 | inRack: inRack, 41 | picker: picker, 42 | racks: admin.DistinctRacks(brokers), 43 | brokerRacks: admin.BrokerRacks(brokers), 44 | brokersPerRack: admin.BrokersPerRack(brokers), 45 | } 46 | } 47 | 48 | // Extend returns partition assignments for the extension of the argument topic. 49 | func (b *BalancedExtender) Extend( 50 | topic string, 51 | curr []admin.PartitionAssignment, 52 | extraPartitions int, 53 | ) ([]admin.PartitionAssignment, error) { 54 | if extraPartitions%len(b.racks) != 0 { 55 | log.Warnf("Extra partitions are not a multiple of the number of racks, balancing will not be ideal") 56 | } 57 | 58 | if b.inRack { 59 | // Check to make sure that the number of brokers in each rack is >= number of 60 | // replicas. Otherwise, we won't be able to find a feasible assignment. 61 | for rack, brokers := range b.brokersPerRack { 62 | if len(brokers) < len(curr[0].Replicas) { 63 | return nil, fmt.Errorf( 64 | "Rack %s does not have enough brokers for in-rack placement", 65 | rack, 66 | ) 67 | } 68 | } 69 | } 70 | 71 | desired := admin.CopyAssignments(curr) 72 | 73 | for i := 0; i < extraPartitions; i++ { 74 | partitionID := i + len(curr) 75 | nextAssignment := admin.PartitionAssignment{ 76 | ID: partitionID, 77 | Replicas: []int{}, 78 | } 79 | 80 | // Put in placeholders for replicas 81 | for j := 0; j < len(curr[0].Replicas); j++ { 82 | nextAssignment.Replicas = append( 83 | nextAssignment.Replicas, 84 | -1, 85 | ) 86 | } 87 | 88 | desired = append(desired, nextAssignment) 89 | 90 | // Iterate over the positions of each of the replicas 91 | for j := 0; j < len(curr[0].Replicas); j++ { 92 | var nextRack string 93 | 94 | if b.inRack { 95 | nextRack = b.racks[i%len(b.racks)] 96 | } else { 97 | nextRack = b.racks[(i+j)%len(b.racks)] 98 | } 99 | 100 | err := b.picker.PickNew( 101 | topic, 102 | b.brokersPerRack[nextRack], 103 | desired, 104 | partitionID, 105 | j, 106 | ) 107 | if err != nil { 108 | return nil, err 109 | } 110 | } 111 | 112 | } 113 | 114 | return desired, nil 115 | } 116 | -------------------------------------------------------------------------------- /pkg/apply/extenders/balanced_test.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/apply/assigners" 8 | "github.com/segmentio/topicctl/pkg/apply/pickers" 9 | "github.com/segmentio/topicctl/pkg/config" 10 | ) 11 | 12 | func TestBalancedExtenderCrossRack(t *testing.T) { 13 | brokers := testBrokers(12, 3) 14 | extender := NewBalancedExtender(brokers, false, pickers.NewRandomizedPicker()) 15 | checker := func(result []admin.PartitionAssignment) bool { 16 | ok, _ := assigners.EvaluateAssignments( 17 | result, 18 | brokers, 19 | config.TopicPlacementConfig{ 20 | Strategy: config.PlacementStrategyBalancedLeaders, 21 | }, 22 | ) 23 | return ok 24 | } 25 | 26 | testCases := []extenderTestCase{ 27 | { 28 | description: "Add partitions", 29 | topic: "test-topic", 30 | curr: [][]int{ 31 | {1, 2, 3}, 32 | {2, 3, 4}, 33 | {3, 4, 5}, 34 | }, 35 | extraPartitions: 6, 36 | expected: [][]int{ 37 | {1, 2, 3}, 38 | {2, 3, 4}, 39 | {3, 4, 5}, 40 | {7, 11, 9}, 41 | {8, 12, 1}, 42 | {6, 1, 8}, 43 | {10, 5, 6}, 44 | {5, 9, 10}, 45 | {12, 10, 11}, 46 | }, 47 | checker: checker, 48 | }, 49 | } 50 | 51 | for _, testCase := range testCases { 52 | testCase.evaluate(t, extender) 53 | } 54 | } 55 | 56 | func TestBalancedExtenderInRack(t *testing.T) { 57 | brokers := testBrokers(12, 3) 58 | extender := NewBalancedExtender(brokers, true, pickers.NewRandomizedPicker()) 59 | checker := func(result []admin.PartitionAssignment) bool { 60 | ok, _ := assigners.EvaluateAssignments( 61 | result, 62 | brokers, 63 | config.TopicPlacementConfig{ 64 | Strategy: config.PlacementStrategyInRack, 65 | }, 66 | ) 67 | return ok 68 | } 69 | 70 | testCases := []extenderTestCase{ 71 | { 72 | description: "Add partitions", 73 | topic: "test-topic", 74 | curr: [][]int{ 75 | {1, 4, 7}, 76 | {2, 5, 8}, 77 | {3, 6, 9}, 78 | }, 79 | extraPartitions: 6, 80 | expected: [][]int{ 81 | {1, 4, 7}, 82 | {2, 5, 8}, 83 | {3, 6, 9}, 84 | {7, 10, 4}, 85 | {8, 11, 2}, 86 | {6, 3, 12}, 87 | {10, 7, 1}, 88 | {5, 8, 11}, 89 | {12, 9, 6}, 90 | }, 91 | checker: checker, 92 | }, 93 | } 94 | 95 | for _, testCase := range testCases { 96 | testCase.evaluate(t, extender) 97 | } 98 | } 99 | 100 | func TestBalancedExtenderInRackPartitionCountNotMultipleOfRacks(t *testing.T) { 101 | brokers := testBrokers(12, 3) 102 | extender := NewBalancedExtender(brokers, true, pickers.NewRandomizedPicker()) 103 | checker := func(result []admin.PartitionAssignment) bool { 104 | ok, _ := assigners.EvaluateAssignments( 105 | result, 106 | brokers, 107 | config.TopicPlacementConfig{ 108 | Strategy: config.PlacementStrategyInRack, 109 | }, 110 | ) 111 | return ok 112 | } 113 | 114 | testCases := []extenderTestCase{ 115 | { 116 | description: "Add partitions", 117 | topic: "test-topic", 118 | curr: [][]int{ 119 | {1, 4, 7}, 120 | {2, 5, 8}, 121 | {3, 6, 9}, 122 | {7, 10, 4}, 123 | }, 124 | extraPartitions: 5, 125 | expected: [][]int{ 126 | {1, 4, 7}, 127 | {2, 5, 8}, 128 | {3, 6, 9}, 129 | {7, 10, 4}, 130 | {4, 1, 10}, 131 | {5, 2, 11}, 132 | {12, 9, 6}, 133 | {10, 7, 1}, 134 | {11, 8, 5}, 135 | }, 136 | checker: checker, 137 | }, 138 | } 139 | 140 | for _, testCase := range testCases { 141 | testCase.evaluate(t, extender) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /pkg/apply/extenders/extender.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import "github.com/segmentio/topicctl/pkg/admin" 4 | 5 | // Extender is an interface for structs that determine how 6 | // to add new partitions to an existing topic. 7 | type Extender interface { 8 | Extend( 9 | topic string, 10 | currAssignments []admin.PartitionAssignment, 11 | newPartitions int, 12 | ) ([]admin.PartitionAssignment, error) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/apply/extenders/static.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import "github.com/segmentio/topicctl/pkg/admin" 4 | 5 | // StaticExtender is an Extender that ignores the current state and assigns 6 | // based on the value of the Assignments field. Generally intended for testing 7 | // purposes. 8 | type StaticExtender struct { 9 | Assignments []admin.PartitionAssignment 10 | } 11 | 12 | var _ Extender = (*StaticExtender)(nil) 13 | 14 | // Extend returns partition assignments for the extension of the argument topic. 15 | func (s *StaticExtender) Extend( 16 | topic string, 17 | curr []admin.PartitionAssignment, 18 | newPartitions int, 19 | ) ([]admin.PartitionAssignment, error) { 20 | if err := admin.CheckAssignments(curr); err != nil { 21 | return nil, err 22 | } 23 | return s.Assignments, nil 24 | } 25 | -------------------------------------------------------------------------------- /pkg/apply/extenders/static_test.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | ) 8 | 9 | func TestStaticExtender(t *testing.T) { 10 | extender := &StaticExtender{ 11 | Assignments: admin.ReplicasToAssignments( 12 | [][]int{ 13 | {1, 2, 3}, 14 | {3, 4, 5}, 15 | {5, 6, 7}, 16 | {8, 9, 10}, 17 | }, 18 | ), 19 | } 20 | 21 | testCases := []extenderTestCase{ 22 | { 23 | curr: [][]int{ 24 | {1, 2, 3}, 25 | {2, 4, 5}, 26 | }, 27 | expected: [][]int{ 28 | {1, 2, 3}, 29 | {3, 4, 5}, 30 | {5, 6, 7}, 31 | {8, 9, 10}, 32 | }, 33 | extraPartitions: 2, 34 | }, 35 | } 36 | 37 | for _, testCase := range testCases { 38 | testCase.evaluate(t, extender) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/apply/extenders/testing.go: -------------------------------------------------------------------------------- 1 | package extenders 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | type extenderTestCase struct { 13 | description string 14 | topic string 15 | curr [][]int 16 | extraPartitions int 17 | expected [][]int 18 | checker func([]admin.PartitionAssignment) bool 19 | err error 20 | } 21 | 22 | func (e extenderTestCase) evaluate(t *testing.T, extender Extender) { 23 | desired, err := extender.Extend( 24 | e.topic, 25 | admin.ReplicasToAssignments(e.curr), 26 | e.extraPartitions, 27 | ) 28 | if e.err != nil { 29 | assert.Error(t, err, e.description) 30 | } else { 31 | replicas, err := admin.AssignmentsToReplicas(desired) 32 | require.Nil(t, err, e.description) 33 | 34 | assert.NoError(t, err, e.description) 35 | assert.NoError(t, admin.CheckAssignments(desired), e.description) 36 | assert.Equal( 37 | t, 38 | e.expected, 39 | replicas, 40 | e.description, 41 | ) 42 | if e.checker != nil { 43 | assert.True(t, e.checker(desired), e.description) 44 | } 45 | } 46 | } 47 | 48 | func testBrokers(numBrokers int, numRacks int) []admin.BrokerInfo { 49 | brokers := []admin.BrokerInfo{} 50 | 51 | for b := 0; b < numBrokers; b++ { 52 | brokers = append( 53 | brokers, 54 | admin.BrokerInfo{ 55 | ID: b + 1, 56 | Rack: fmt.Sprintf("zone%d", (b%numRacks)+1), 57 | }, 58 | ) 59 | } 60 | 61 | return brokers 62 | } 63 | -------------------------------------------------------------------------------- /pkg/apply/format.go: -------------------------------------------------------------------------------- 1 | package apply 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/olekukonko/tablewriter" 11 | "github.com/segmentio/kafka-go" 12 | "github.com/segmentio/topicctl/pkg/config" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // FormatNewTopicConfig generates a pretty string representation of a kafka-go 17 | // topic config. 18 | func FormatNewTopicConfig(config kafka.TopicConfig) string { 19 | content, err := json.MarshalIndent(config, "", " ") 20 | if err != nil { 21 | log.Warnf("Error marshalling topic config: %+v", err) 22 | return "Error" 23 | } 24 | 25 | return string(content) 26 | } 27 | 28 | // FormatSettingsDiff generates a table that summarizes the differences between 29 | // the topic settings from a topic config and the settings from ZK. 30 | func FormatSettingsDiff( 31 | topicSettings config.TopicSettings, 32 | configMap map[string]string, 33 | diffKeys []string, 34 | ) (string, error) { 35 | buf := &bytes.Buffer{} 36 | 37 | table := tablewriter.NewWriter(buf) 38 | 39 | headers := []string{ 40 | "Key", 41 | "Cluster Value (Curr)", 42 | "Config Value (New)", 43 | } 44 | 45 | table.SetHeader(headers) 46 | 47 | table.SetAutoWrapText(false) 48 | table.SetColumnAlignment( 49 | []int{ 50 | tablewriter.ALIGN_LEFT, 51 | tablewriter.ALIGN_LEFT, 52 | tablewriter.ALIGN_LEFT, 53 | }, 54 | ) 55 | table.SetBorders( 56 | tablewriter.Border{ 57 | Left: false, 58 | Top: true, 59 | Right: false, 60 | Bottom: true, 61 | }, 62 | ) 63 | 64 | for _, diffKey := range diffKeys { 65 | configValueStr := configMap[diffKey] 66 | 67 | var valueStr string 68 | var err error 69 | 70 | if topicSettings.HasKey(diffKey) { 71 | valueStr, err = topicSettings.GetValueStr(diffKey) 72 | if err != nil { 73 | return "", err 74 | } 75 | } 76 | 77 | // Add a human-formatted minutes suffix to time-related fields 78 | if strings.HasSuffix(diffKey, ".ms") { 79 | configValueStr = fmt.Sprintf("%s%s", configValueStr, timeSuffix(configValueStr)) 80 | valueStr = fmt.Sprintf("%s%s", valueStr, timeSuffix(valueStr)) 81 | } 82 | 83 | row := []string{ 84 | diffKey, 85 | configValueStr, 86 | valueStr, 87 | } 88 | 89 | table.Append(row) 90 | } 91 | 92 | table.Render() 93 | return string(bytes.TrimRight(buf.Bytes(), "\n")), nil 94 | } 95 | 96 | // FormatMissingKeys generates a table that summarizes the key/value pairs 97 | // that are set in the config in ZK but missing from the topic config. 98 | func FormatMissingKeys( 99 | configMap map[string]string, 100 | missingKeys []string, 101 | ) string { 102 | buf := &bytes.Buffer{} 103 | 104 | table := tablewriter.NewWriter(buf) 105 | 106 | headers := []string{ 107 | "Key", 108 | "Cluster Value", 109 | } 110 | 111 | table.SetHeader(headers) 112 | 113 | table.SetAutoWrapText(false) 114 | table.SetColumnAlignment( 115 | []int{ 116 | tablewriter.ALIGN_LEFT, 117 | tablewriter.ALIGN_LEFT, 118 | }, 119 | ) 120 | table.SetBorders( 121 | tablewriter.Border{ 122 | Left: false, 123 | Top: true, 124 | Right: false, 125 | Bottom: true, 126 | }, 127 | ) 128 | 129 | for _, missingKey := range missingKeys { 130 | configValueStr := configMap[missingKey] 131 | 132 | // Add a human-formatted minutes suffix to time-related fields 133 | if strings.HasSuffix(missingKey, ".ms") { 134 | configValueStr = fmt.Sprintf("%s%s", configValueStr, timeSuffix(configValueStr)) 135 | } 136 | 137 | row := []string{ 138 | missingKey, 139 | configValueStr, 140 | } 141 | 142 | table.Append(row) 143 | } 144 | 145 | table.Render() 146 | return string(bytes.TrimRight(buf.Bytes(), "\n")) 147 | } 148 | 149 | func timeSuffix(msStr string) string { 150 | msInt, err := strconv.ParseInt(msStr, 10, 64) 151 | if err != nil { 152 | return "" 153 | } 154 | 155 | if msInt < 60000 { 156 | return "" 157 | } 158 | 159 | if msInt%60000 != 0 { 160 | return "" 161 | } 162 | 163 | return fmt.Sprintf(" (%d min)", msInt/60000) 164 | } 165 | -------------------------------------------------------------------------------- /pkg/apply/pickers/cluster_use.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "sort" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/segmentio/topicctl/pkg/util" 8 | ) 9 | 10 | // ClusterUsePicker is a picker that considers broker use across the entire cluster to break ties. 11 | type ClusterUsePicker struct { 12 | brokerCountsByPosition []map[int]int 13 | } 14 | 15 | var _ Picker = (*ClusterUsePicker)(nil) 16 | 17 | // NewClusterUsePicker generates a new picker from the argument brokers and topics. 18 | func NewClusterUsePicker( 19 | brokers []admin.BrokerInfo, 20 | topics []admin.TopicInfo, 21 | ) *ClusterUsePicker { 22 | // Map from position -> broker -> count 23 | brokerCountsByPosition := []map[int]int{} 24 | maxReplicas := admin.MaxReplication(topics) 25 | 26 | for i := 0; i < maxReplicas; i++ { 27 | positionMap := map[int]int{} 28 | 29 | for _, broker := range brokers { 30 | positionMap[broker.ID] = 0 31 | } 32 | 33 | brokerCountsByPosition = append( 34 | brokerCountsByPosition, 35 | positionMap, 36 | ) 37 | } 38 | 39 | for _, topic := range topics { 40 | for _, partition := range topic.Partitions { 41 | for r, replica := range partition.Replicas { 42 | brokerCountsByPosition[r][replica]++ 43 | } 44 | } 45 | } 46 | 47 | return &ClusterUsePicker{ 48 | brokerCountsByPosition: brokerCountsByPosition, 49 | } 50 | } 51 | 52 | // PickNew updates the replica for the argument partition and index, using the choices in 53 | // brokerChoices. 54 | func (c *ClusterUsePicker) PickNew( 55 | topic string, 56 | brokerChoices []int, 57 | curr []admin.PartitionAssignment, 58 | partition int, 59 | index int, 60 | ) error { 61 | return pickNewByPositionFrequency( 62 | topic, 63 | brokerChoices, 64 | curr, 65 | partition, 66 | index, 67 | c.keySorter(index, true), 68 | ) 69 | } 70 | 71 | // SortRemovals sorts the argument partitions in order of priority for removing the broker 72 | // at the argument index. 73 | func (c *ClusterUsePicker) SortRemovals( 74 | topic string, 75 | partitionChoices []int, 76 | curr []admin.PartitionAssignment, 77 | index int, 78 | ) error { 79 | return sortRemovalsByPositionFrequency( 80 | topic, 81 | partitionChoices, 82 | curr, 83 | index, 84 | c.keySorter(index, false), 85 | ) 86 | } 87 | 88 | // ScoreBroker returns an integer score for the given broker at the provided partition and index. 89 | func (c *ClusterUsePicker) ScoreBroker( 90 | topic string, 91 | brokerID int, 92 | partition int, 93 | index int, 94 | ) int { 95 | return c.brokerCountsByPosition[index][brokerID] 96 | } 97 | 98 | func (c *ClusterUsePicker) keySorter(index int, asc bool) util.KeySorter { 99 | return func(input map[int]int) []int { 100 | keys := util.SortedKeys(input) 101 | 102 | sort.Slice(keys, func(a, b int) bool { 103 | if asc { 104 | return c.brokerCountsByPosition[index][keys[a]] < 105 | c.brokerCountsByPosition[index][keys[b]] 106 | } 107 | return c.brokerCountsByPosition[index][keys[a]] > 108 | c.brokerCountsByPosition[index][keys[b]] 109 | }) 110 | 111 | return keys 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /pkg/apply/pickers/cluster_use_test.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/segmentio/topicctl/pkg/admin" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestClusterUsePickerPickNew(t *testing.T) { 11 | brokers := testBrokers(12, 3) 12 | topics := []admin.TopicInfo{ 13 | { 14 | Name: "test-topic1", 15 | Partitions: []admin.PartitionInfo{ 16 | { 17 | ID: 0, 18 | Replicas: []int{1, 2, 3}, 19 | }, 20 | { 21 | ID: 1, 22 | Replicas: []int{1, 2, 3}, 23 | }, 24 | }, 25 | }, 26 | { 27 | Name: "test-topic2", 28 | Partitions: []admin.PartitionInfo{ 29 | { 30 | ID: 0, 31 | Replicas: []int{4, 3, 1}, 32 | }, 33 | { 34 | ID: 1, 35 | Replicas: []int{5, 2, 3}, 36 | }, 37 | }, 38 | }, 39 | } 40 | 41 | picker := NewClusterUsePicker(brokers, topics) 42 | 43 | testCases := []pickNewTestCase{ 44 | { 45 | description: "Simple replacement, part 1", 46 | topic: "test-topic", 47 | brokerChoices: []int{1, 2, 3}, 48 | curr: [][]int{ 49 | {1, 5, 4}, 50 | {2, -1, 4}, 51 | {2, 1, 5}, 52 | }, 53 | partition: 1, 54 | index: 1, 55 | // Of the feasible choices, 3 is the least used in position 1 56 | expectedChoice: 3, 57 | }, 58 | { 59 | description: "Simple replacement, part 2", 60 | topic: "test-topic", 61 | brokerChoices: []int{1, 2, 3, 8}, 62 | curr: [][]int{ 63 | {1, 5, 4}, 64 | {2, -1, 4}, 65 | {2, 1, 5}, 66 | }, 67 | partition: 1, 68 | index: 1, 69 | // Of the feasible choices, 8 is the least used in position 1 70 | expectedChoice: 8, 71 | }, 72 | { 73 | description: "Simple replacement, part 3", 74 | topic: "test-topic", 75 | brokerChoices: []int{1, 2, 3, 4}, 76 | curr: [][]int{ 77 | {1, 5, 4}, 78 | {6, 7, -1}, 79 | {2, 1, 5}, 80 | }, 81 | partition: 1, 82 | index: 2, 83 | // Of the feasible choices, 2 is the least used in position 1 84 | expectedChoice: 2, 85 | }, 86 | { 87 | description: "Not feasible, part 1", 88 | topic: "test-topic", 89 | brokerChoices: []int{2, 7}, 90 | curr: [][]int{ 91 | {1, 5, 4}, 92 | {2, 7, 4}, 93 | {2, 1, 5}, 94 | }, 95 | partition: 1, 96 | index: 2, 97 | expectedErr: true, 98 | }, 99 | { 100 | description: "Not feasible, part 2", 101 | topic: "test-topic", 102 | brokerChoices: []int{}, 103 | curr: [][]int{ 104 | {1, 5, 4}, 105 | {2, 3, 4}, 106 | {2, 1, 5}, 107 | }, 108 | partition: 1, 109 | index: 2, 110 | expectedErr: true, 111 | }, 112 | } 113 | 114 | for _, testCase := range testCases { 115 | testCase.evaluate(t, picker) 116 | } 117 | } 118 | 119 | func TestClusterUsePickerSortRemovals(t *testing.T) { 120 | brokers := testBrokers(12, 3) 121 | topics := []admin.TopicInfo{ 122 | { 123 | Name: "test-topic1", 124 | Partitions: []admin.PartitionInfo{ 125 | { 126 | ID: 0, 127 | Replicas: []int{1, 2, 3}, 128 | }, 129 | { 130 | ID: 1, 131 | Replicas: []int{2, 7, 3}, 132 | }, 133 | }, 134 | }, 135 | { 136 | Name: "test-topic2", 137 | Partitions: []admin.PartitionInfo{ 138 | { 139 | ID: 0, 140 | Replicas: []int{3, 8, 1}, 141 | }, 142 | { 143 | ID: 1, 144 | Replicas: []int{3, 5, 8}, 145 | }, 146 | }, 147 | }, 148 | } 149 | 150 | picker := NewClusterUsePicker(brokers, topics) 151 | 152 | testCases := []sortRemovalsTestCase{ 153 | { 154 | description: "No tie-breaking required", 155 | topic: "test-topic", 156 | partitionChoices: []int{0, 1, 2, 3, 4, 5}, 157 | curr: [][]int{ 158 | {1, 5, 4}, 159 | {3, 5, 4}, 160 | {3, 1, 5}, 161 | {2, 1, 4}, 162 | {2, 4, 5}, 163 | {3, 4, 7}, 164 | }, 165 | index: 0, 166 | expectedOrdering: []int{1, 2, 5, 3, 4, 0}, 167 | }, 168 | { 169 | description: "Simple sort", 170 | topic: "test-topic", 171 | partitionChoices: []int{0, 2, 3, 4, 5}, 172 | curr: [][]int{ 173 | {1, 5, 4}, 174 | {3, 5, 4}, 175 | {3, 1, 5}, 176 | {2, 1, 4}, 177 | {2, 4, 5}, 178 | {3, 4, 7}, 179 | }, 180 | index: 0, 181 | // Break tie in favor of 3 since it's more overrepresented in the cluster as a whole 182 | expectedOrdering: []int{2, 5, 3, 4, 0}, 183 | }, 184 | } 185 | 186 | for _, testCase := range testCases { 187 | testCase.evaluate(t, picker) 188 | } 189 | } 190 | 191 | func TestClusterUsePickerScoreBroker(t *testing.T) { 192 | brokers := testBrokers(12, 3) 193 | topics := []admin.TopicInfo{ 194 | { 195 | Name: "test-topic1", 196 | Partitions: []admin.PartitionInfo{ 197 | { 198 | ID: 0, 199 | Replicas: []int{1, 2, 3}, 200 | }, 201 | { 202 | ID: 1, 203 | Replicas: []int{2, 7, 3}, 204 | }, 205 | }, 206 | }, 207 | { 208 | Name: "test-topic2", 209 | Partitions: []admin.PartitionInfo{ 210 | { 211 | ID: 0, 212 | Replicas: []int{3, 8, 1}, 213 | }, 214 | { 215 | ID: 1, 216 | Replicas: []int{3, 5, 8}, 217 | }, 218 | }, 219 | }, 220 | } 221 | 222 | picker := NewClusterUsePicker(brokers, topics) 223 | score := picker.ScoreBroker("test-topic3", 3, 1, 0) 224 | assert.Equal(t, 2, score) 225 | } 226 | -------------------------------------------------------------------------------- /pkg/apply/pickers/lowest_index.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "github.com/segmentio/topicctl/pkg/admin" 5 | "github.com/segmentio/topicctl/pkg/util" 6 | ) 7 | 8 | // LowestIndexPicker is a picker that uses broker index to break ties. 9 | type LowestIndexPicker struct{} 10 | 11 | var _ Picker = (*LowestIndexPicker)(nil) 12 | 13 | // NewLowestIndexPicker returns a new LowestIndexPicker instance. 14 | func NewLowestIndexPicker() *LowestIndexPicker { 15 | return &LowestIndexPicker{} 16 | } 17 | 18 | // PickNew updates the replica for the argument partition and index, using the choices in 19 | // brokerChoices. 20 | func (l *LowestIndexPicker) PickNew( 21 | topic string, 22 | brokerChoices []int, 23 | curr []admin.PartitionAssignment, 24 | partition int, 25 | index int, 26 | ) error { 27 | return pickNewByPositionFrequency( 28 | topic, 29 | brokerChoices, 30 | curr, 31 | partition, 32 | index, 33 | util.SortedKeys, 34 | ) 35 | } 36 | 37 | // SortRemovals sorts the argument partitions in order of priority for removing the broker 38 | // at the argument index. 39 | func (l *LowestIndexPicker) SortRemovals( 40 | topic string, 41 | partitionChoices []int, 42 | curr []admin.PartitionAssignment, 43 | index int, 44 | ) error { 45 | return sortRemovalsByPositionFrequency( 46 | topic, 47 | partitionChoices, 48 | curr, 49 | index, 50 | util.SortedKeys, 51 | ) 52 | } 53 | 54 | // ScoreBroker returns an integer score for the given broker at the provided partition and index. 55 | func (l *LowestIndexPicker) ScoreBroker( 56 | topic string, 57 | brokerID int, 58 | partition int, 59 | index int, 60 | ) int { 61 | return brokerID 62 | } 63 | -------------------------------------------------------------------------------- /pkg/apply/pickers/lowest_index_test.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestLowestIndexPickerPickNew(t *testing.T) { 10 | picker := NewLowestIndexPicker() 11 | 12 | testCases := []pickNewTestCase{ 13 | { 14 | description: "Simple replacement, part 1", 15 | topic: "test-topic", 16 | brokerChoices: []int{1, 2, 3}, 17 | curr: [][]int{ 18 | {1, 5, 4}, 19 | {2, -1, 4}, 20 | {2, 1, 5}, 21 | }, 22 | partition: 1, 23 | index: 1, 24 | expectedChoice: 3, 25 | }, 26 | { 27 | description: "Simple replacement, part 2", 28 | topic: "test-topic", 29 | brokerChoices: []int{2, 3, 4}, 30 | curr: [][]int{ 31 | {1, 5, 4}, 32 | {6, 7, 4}, 33 | {2, 1, 5}, 34 | }, 35 | partition: 1, 36 | index: 2, 37 | expectedChoice: 2, 38 | }, 39 | { 40 | description: "Not feasible, part 1", 41 | topic: "test-topic", 42 | brokerChoices: []int{2, 7}, 43 | curr: [][]int{ 44 | {1, 5, 4}, 45 | {2, 7, 4}, 46 | {2, 1, 5}, 47 | }, 48 | partition: 1, 49 | index: 2, 50 | expectedErr: true, 51 | }, 52 | { 53 | description: "Not feasible, part 2", 54 | topic: "test-topic", 55 | brokerChoices: []int{}, 56 | curr: [][]int{ 57 | {1, 5, 4}, 58 | {2, 7, 4}, 59 | {2, 1, 5}, 60 | }, 61 | partition: 1, 62 | index: 2, 63 | expectedErr: true, 64 | }, 65 | } 66 | 67 | for _, testCase := range testCases { 68 | testCase.evaluate(t, picker) 69 | } 70 | } 71 | 72 | func TestLowestIndexPickerSortRemovals(t *testing.T) { 73 | picker := NewLowestIndexPicker() 74 | 75 | testCases := []sortRemovalsTestCase{ 76 | { 77 | description: "Simple sort", 78 | topic: "test-topic", 79 | partitionChoices: []int{0, 1, 2, 3, 4}, 80 | curr: [][]int{ 81 | {1, 5, 4}, 82 | {3, 5, 4}, 83 | {3, 1, 5}, 84 | {2, 1, 4}, 85 | {2, 4, 5}, 86 | {3, 4, 7}, 87 | }, 88 | index: 0, 89 | expectedOrdering: []int{3, 4, 1, 2, 0}, 90 | }, 91 | } 92 | 93 | for _, testCase := range testCases { 94 | testCase.evaluate(t, picker) 95 | } 96 | } 97 | 98 | func TestLowestIndexPickerScoreBroker(t *testing.T) { 99 | picker := NewLowestIndexPicker() 100 | score := picker.ScoreBroker("test-topic", 2, 3, 4) 101 | assert.Equal(t, score, 2) 102 | } 103 | -------------------------------------------------------------------------------- /pkg/apply/pickers/picker.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "errors" 5 | "sort" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/util" 9 | ) 10 | 11 | var ( 12 | // ErrNoFeasibleChoice is returned by a picker when there is no feasible choice among 13 | // the offered possibilities. 14 | ErrNoFeasibleChoice = errors.New("Picker could not find a feasible choice") 15 | ) 16 | 17 | // Picker is an interface that picks a replica assignment based on arbitrary criteria (e.g., 18 | // the current number of brokers in the given index). It's used by assigners and extenders to 19 | // make choices, subject to specific constraints (e.g., must be in certain rack). 20 | type Picker interface { 21 | // PickNew is the primary method used for assignment and extension. It chooses a new 22 | // replica for the given partition and index and directly modifies the argument assignments. 23 | PickNew( 24 | topic string, 25 | brokerChoices []int, 26 | curr []admin.PartitionAssignment, 27 | partition int, 28 | index int, 29 | ) error 30 | 31 | // SortRemovals is a helper for choosing which replica in a set of partitions to replace. 32 | // Because the actual replacement logic is somewhat complex at the moment, the interface 33 | // is a little different than the PickNew function above. In particular, the choices are 34 | // a slice of partitions and these are sorted in place (without any replacement in curr). 35 | SortRemovals( 36 | topic string, 37 | partitionChoices []int, 38 | curr []admin.PartitionAssignment, 39 | index int, 40 | ) error 41 | 42 | // ScoreBroker is a helper for generating a static "score" for a broker. It's used in 43 | // rebalancing and other applications where we're doing swaps as opposed to a single 44 | // addition or subtraction. A higher score should correspond to higher frequency, i.e. 45 | // more likely to be removed. 46 | ScoreBroker( 47 | topic string, 48 | brokerID int, 49 | partition int, 50 | index int, 51 | ) int 52 | } 53 | 54 | func pickNewByPositionFrequency( 55 | topic string, 56 | brokerChoices []int, 57 | curr []admin.PartitionAssignment, 58 | partition int, 59 | index int, 60 | keySorter util.KeySorter, 61 | ) error { 62 | if len(brokerChoices) == 0 { 63 | return ErrNoFeasibleChoice 64 | } 65 | 66 | brokerChoicesMap := map[int]struct{}{} 67 | for _, choice := range brokerChoices { 68 | brokerChoicesMap[choice] = struct{}{} 69 | } 70 | 71 | brokerCounts := map[int]int{} 72 | 73 | for _, choice := range brokerChoices { 74 | brokerCounts[choice] = 0 75 | } 76 | 77 | // Get counts for each feasible broker 78 | for p := 0; p < len(curr); p++ { 79 | replica := curr[p].Replicas[index] 80 | if _, ok := brokerChoicesMap[replica]; ok { 81 | brokerCounts[replica]++ 82 | } 83 | } 84 | 85 | // Sort by count ascending, using index to break ties 86 | sortedBrokers := util.SortedKeysByValue(brokerCounts, true, keySorter) 87 | 88 | // Replace with the first feasible broker 89 | for _, broker := range sortedBrokers { 90 | if curr[partition].Index(broker) == -1 { 91 | curr[partition].Replicas[index] = broker 92 | return nil 93 | } 94 | } 95 | 96 | return ErrNoFeasibleChoice 97 | } 98 | 99 | func sortRemovalsByPositionFrequency( 100 | topic string, 101 | partitionChoices []int, 102 | curr []admin.PartitionAssignment, 103 | index int, 104 | keySorter util.KeySorter, 105 | ) error { 106 | if len(partitionChoices) == 0 { 107 | return ErrNoFeasibleChoice 108 | } 109 | 110 | brokerCounts := map[int]int{} 111 | 112 | for _, partition := range partitionChoices { 113 | replica := curr[partition].Replicas[index] 114 | if replica >= 0 { 115 | brokerCounts[replica]++ 116 | } 117 | } 118 | 119 | // Sort by count descending, using index to break ties 120 | sortedBrokers := util.SortedKeysByValue(brokerCounts, false, keySorter) 121 | brokerRanks := map[int]int{} 122 | for s, broker := range sortedBrokers { 123 | brokerRanks[broker] = s 124 | } 125 | 126 | // Sort partition choices in-place 127 | sort.Slice(partitionChoices, func(a, b int) bool { 128 | aPartition := partitionChoices[a] 129 | bPartition := partitionChoices[b] 130 | 131 | aReplica := curr[aPartition].Replicas[index] 132 | bReplica := curr[bPartition].Replicas[index] 133 | 134 | return brokerRanks[aReplica] < brokerRanks[bReplica] 135 | }) 136 | 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /pkg/apply/pickers/randomize_test.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestRandomizedPickerPickNew(t *testing.T) { 10 | picker := NewRandomizedPicker() 11 | 12 | testCases := []pickNewTestCase{ 13 | { 14 | description: "Simple replacement, part 1", 15 | topic: "test-topic2", 16 | brokerChoices: []int{1, 2, 3}, 17 | curr: [][]int{ 18 | {1, 5, 4}, 19 | {2, -1, 4}, 20 | {2, 1, 5}, 21 | }, 22 | partition: 1, 23 | index: 1, 24 | expectedChoice: 3, 25 | }, 26 | { 27 | description: "Simple replacement, part 2", 28 | topic: "test-topic2", 29 | brokerChoices: []int{2, 3, 4}, 30 | curr: [][]int{ 31 | {1, 5, 4}, 32 | {6, 7, 4}, 33 | {2, 1, 5}, 34 | }, 35 | partition: 1, 36 | index: 2, 37 | expectedChoice: 3, 38 | }, 39 | { 40 | description: "Not feasible, part 1", 41 | topic: "test-topic2", 42 | brokerChoices: []int{2, 7}, 43 | curr: [][]int{ 44 | {1, 5, 4}, 45 | {2, 7, 4}, 46 | {2, 1, 5}, 47 | }, 48 | partition: 1, 49 | index: 2, 50 | expectedErr: true, 51 | }, 52 | { 53 | description: "Not feasible, part 2", 54 | topic: "test-topic2", 55 | brokerChoices: []int{}, 56 | curr: [][]int{ 57 | {1, 5, 4}, 58 | {2, 7, 4}, 59 | {2, 1, 5}, 60 | }, 61 | partition: 1, 62 | index: 2, 63 | expectedErr: true, 64 | }, 65 | } 66 | 67 | for _, testCase := range testCases { 68 | testCase.evaluate(t, picker) 69 | } 70 | } 71 | 72 | func TestRandomizedPickerSortRemovals(t *testing.T) { 73 | picker := NewRandomizedPicker() 74 | 75 | testCases := []sortRemovalsTestCase{ 76 | { 77 | description: "Simple sort", 78 | topic: "test-topic4", 79 | partitionChoices: []int{0, 1, 2, 3, 4}, 80 | curr: [][]int{ 81 | {1, 5, 4}, 82 | {3, 5, 4}, 83 | {3, 1, 5}, 84 | {2, 1, 4}, 85 | {2, 4, 5}, 86 | {3, 4, 7}, 87 | }, 88 | index: 0, 89 | expectedOrdering: []int{1, 2, 3, 4, 0}, 90 | }, 91 | } 92 | 93 | for _, testCase := range testCases { 94 | testCase.evaluate(t, picker) 95 | } 96 | } 97 | 98 | func TestRandomizedPickerScoreBroker(t *testing.T) { 99 | picker := NewRandomizedPicker() 100 | score := picker.ScoreBroker("test-topic", 2, 3, 4) 101 | assert.Equal(t, 1182295229, score) 102 | } 103 | -------------------------------------------------------------------------------- /pkg/apply/pickers/randomized.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "fmt" 5 | "hash/fnv" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/util" 9 | ) 10 | 11 | // RandomizedPicker is a picker that uses broker index to break ties. 12 | type RandomizedPicker struct{} 13 | 14 | var _ Picker = (*RandomizedPicker)(nil) 15 | 16 | // NewRandomizedPicker returns a new RandomizedPicker instance. 17 | func NewRandomizedPicker() *RandomizedPicker { 18 | return &RandomizedPicker{} 19 | } 20 | 21 | // PickNew updates the replica for the argument partition and index, using the choices in 22 | // brokerChoices. 23 | func (r *RandomizedPicker) PickNew( 24 | topic string, 25 | brokerChoices []int, 26 | curr []admin.PartitionAssignment, 27 | partition int, 28 | index int, 29 | ) error { 30 | keySorter := func(input map[int]int) []int { 31 | seed := fmt.Sprintf("%s-%d-%d", topic, partition, index) 32 | return util.ShuffledKeys(input, seed) 33 | } 34 | 35 | return pickNewByPositionFrequency( 36 | topic, 37 | brokerChoices, 38 | curr, 39 | partition, 40 | index, 41 | keySorter, 42 | ) 43 | } 44 | 45 | // SortRemovals sorts the argument partitions in order of priority for removing the broker 46 | // at the argument index. 47 | func (r *RandomizedPicker) SortRemovals( 48 | topic string, 49 | partitionChoices []int, 50 | curr []admin.PartitionAssignment, 51 | index int, 52 | ) error { 53 | keySorter := func(input map[int]int) []int { 54 | seed := fmt.Sprintf("%s-%+v-%d", topic, partitionChoices, index) 55 | return util.ShuffledKeys(input, seed) 56 | } 57 | return sortRemovalsByPositionFrequency( 58 | topic, 59 | partitionChoices, 60 | curr, 61 | index, 62 | keySorter, 63 | ) 64 | } 65 | 66 | // ScoreBroker returns an integer score for the given broker at the provided partition and index. 67 | func (r *RandomizedPicker) ScoreBroker( 68 | topic string, 69 | brokerID int, 70 | partition int, 71 | index int, 72 | ) int { 73 | // Hash the string of the inputs 74 | seed := fmt.Sprintf("%s-%d-%d-%d", topic, brokerID, partition, index) 75 | hash := fnv.New32() 76 | hash.Write([]byte(seed)) 77 | return int(hash.Sum32()) 78 | } 79 | -------------------------------------------------------------------------------- /pkg/apply/pickers/testing.go: -------------------------------------------------------------------------------- 1 | package pickers 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/segmentio/topicctl/pkg/util" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type pickNewTestCase struct { 14 | // Inputs 15 | topic string 16 | brokerChoices []int 17 | curr [][]int 18 | partition int 19 | index int 20 | 21 | description string 22 | expectedChoice int 23 | expectedErr bool 24 | } 25 | 26 | func (p pickNewTestCase) evaluate(t *testing.T, picker Picker) { 27 | currAssignments := admin.ReplicasToAssignments(p.curr) 28 | 29 | expectedAssignments := admin.ReplicasToAssignments(p.curr) 30 | expectedAssignments[p.partition].Replicas[p.index] = p.expectedChoice 31 | 32 | err := picker.PickNew( 33 | p.topic, 34 | p.brokerChoices, 35 | currAssignments, 36 | p.partition, 37 | p.index, 38 | ) 39 | if p.expectedErr { 40 | assert.Error(t, err, p.description) 41 | } else { 42 | require.Nil(t, err, p.description) 43 | 44 | expectedReplicas, err := admin.AssignmentsToReplicas(expectedAssignments) 45 | require.Nil(t, err, p.description) 46 | 47 | updatedReplicas, err := admin.AssignmentsToReplicas(currAssignments) 48 | require.Nil(t, err, p.description) 49 | 50 | assert.Equal( 51 | t, 52 | expectedReplicas, 53 | updatedReplicas, 54 | p.description, 55 | ) 56 | } 57 | } 58 | 59 | type sortRemovalsTestCase struct { 60 | // Inputs 61 | topic string 62 | partitionChoices []int 63 | curr [][]int 64 | index int 65 | 66 | description string 67 | expectedOrdering []int 68 | expectedErr bool 69 | } 70 | 71 | func (s sortRemovalsTestCase) evaluate(t *testing.T, picker Picker) { 72 | currAssignments := admin.ReplicasToAssignments(s.curr) 73 | expectedAssignments := admin.ReplicasToAssignments(s.curr) 74 | 75 | updatedOrdering := util.CopyInts(s.partitionChoices) 76 | 77 | err := picker.SortRemovals( 78 | s.topic, 79 | updatedOrdering, 80 | currAssignments, 81 | s.index, 82 | ) 83 | if s.expectedErr { 84 | assert.Error(t, err, s.description) 85 | } else { 86 | require.Nil(t, err, s.description) 87 | 88 | assert.Equal( 89 | t, 90 | s.expectedOrdering, 91 | updatedOrdering, 92 | ) 93 | 94 | // Replicas should be unchanged 95 | expectedReplicas, err := admin.AssignmentsToReplicas(expectedAssignments) 96 | require.Nil(t, err, s.description) 97 | 98 | updatedReplicas, err := admin.AssignmentsToReplicas(currAssignments) 99 | require.Nil(t, err, s.description) 100 | 101 | assert.Equal( 102 | t, 103 | expectedReplicas, 104 | updatedReplicas, 105 | s.description, 106 | ) 107 | } 108 | } 109 | 110 | func testBrokers(numBrokers int, numRacks int) []admin.BrokerInfo { 111 | brokers := []admin.BrokerInfo{} 112 | 113 | for b := 0; b < numBrokers; b++ { 114 | brokers = append( 115 | brokers, 116 | admin.BrokerInfo{ 117 | ID: b + 1, 118 | Rack: fmt.Sprintf("zone%d", (b%numRacks)+1), 119 | }, 120 | ) 121 | } 122 | 123 | return brokers 124 | } 125 | -------------------------------------------------------------------------------- /pkg/apply/rebalancers/rebalancer.go: -------------------------------------------------------------------------------- 1 | package rebalancers 2 | 3 | import ( 4 | "github.com/segmentio/topicctl/pkg/admin" 5 | ) 6 | 7 | // Rebalancer is an interface for structs that figure out how to 8 | // reassign replicas in existing topic partitions in order to ensure that all brokers 9 | // are evenly represented. It also supports removing brokers (e.g., if they are to be removed 10 | // from the cluster). 11 | type Rebalancer interface { 12 | Rebalance( 13 | topic string, 14 | currAssignments []admin.PartitionAssignment, 15 | brokersToRemove []int, 16 | ) ([]admin.PartitionAssignment, error) 17 | } 18 | -------------------------------------------------------------------------------- /pkg/apply/rebalancers/testing.go: -------------------------------------------------------------------------------- 1 | package rebalancers 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/segmentio/topicctl/pkg/admin" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | type rebalancerTestCase struct { 13 | description string 14 | topic string 15 | curr [][]int 16 | toRemove []int 17 | expected [][]int 18 | err error 19 | } 20 | 21 | func (r rebalancerTestCase) evaluate(t *testing.T, rebalancer Rebalancer) { 22 | desired, err := rebalancer.Rebalance( 23 | r.topic, 24 | admin.ReplicasToAssignments(r.curr), 25 | r.toRemove, 26 | ) 27 | if r.err != nil { 28 | require.NotNil(t, err, r.description) 29 | } else { 30 | require.Nil(t, err, r.description) 31 | 32 | replicas, err := admin.AssignmentsToReplicas(desired) 33 | require.NoError(t, err) 34 | 35 | assert.NoError(t, admin.CheckAssignments(desired), r.description) 36 | assert.Equal( 37 | t, 38 | r.expected, 39 | replicas, 40 | r.description, 41 | ) 42 | } 43 | } 44 | 45 | func testBrokers(numBrokers int, numRacks int) []admin.BrokerInfo { 46 | brokers := []admin.BrokerInfo{} 47 | 48 | for b := 0; b < numBrokers; b++ { 49 | brokers = append( 50 | brokers, 51 | admin.BrokerInfo{ 52 | ID: b + 1, 53 | Rack: fmt.Sprintf("zone%d", (b%numRacks)+1), 54 | }, 55 | ) 56 | } 57 | 58 | return brokers 59 | } 60 | -------------------------------------------------------------------------------- /pkg/check/format.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/fatih/color" 8 | "github.com/olekukonko/tablewriter" 9 | "github.com/segmentio/topicctl/pkg/util" 10 | ) 11 | 12 | // FormatResults generates a pretty table from topic check results. 13 | func FormatResults(results TopicCheckResults) string { 14 | buf := &bytes.Buffer{} 15 | 16 | table := tablewriter.NewWriter(buf) 17 | 18 | table.SetHeader([]string{ 19 | "Name", 20 | "OK", 21 | "Details", 22 | }) 23 | 24 | table.SetAutoWrapText(false) 25 | table.SetColumnAlignment( 26 | []int{ 27 | tablewriter.ALIGN_LEFT, 28 | tablewriter.ALIGN_CENTER, 29 | tablewriter.ALIGN_LEFT, 30 | }, 31 | ) 32 | table.SetBorders( 33 | tablewriter.Border{ 34 | Left: false, 35 | Top: true, 36 | Right: false, 37 | Bottom: true, 38 | }, 39 | ) 40 | 41 | for _, result := range results.Results { 42 | var checkPrinter func(f string, a ...interface{}) string 43 | if result.OK || !util.InTerminal() { 44 | checkPrinter = fmt.Sprintf 45 | } else { 46 | checkPrinter = color.New(color.FgRed).SprintfFunc() 47 | } 48 | 49 | var okStr string 50 | 51 | if result.OK { 52 | okStr = "✓" 53 | } else { 54 | okStr = "✗" 55 | } 56 | 57 | table.Append( 58 | []string{ 59 | checkPrinter("%s", string(result.Name)), 60 | checkPrinter("%s", okStr), 61 | checkPrinter("%s", result.Description), 62 | }, 63 | ) 64 | } 65 | 66 | table.Render() 67 | return string(bytes.TrimRight(buf.Bytes(), "\n")) 68 | } 69 | -------------------------------------------------------------------------------- /pkg/check/result.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | // CheckName is a string name for a topic check. 4 | type CheckName string 5 | 6 | const ( 7 | // All possible CheckName values. 8 | CheckNameConfigsConsistent CheckName = "configs consistent" 9 | CheckNameConfigCorrect CheckName = "config correct" 10 | CheckNameConfigSettingsCorrect CheckName = "config settings correct" 11 | CheckNameLeadersCorrect CheckName = "leaders correct" 12 | CheckNamePartitionCountCorrect CheckName = "partition count correct" 13 | CheckNameReplicasInSync CheckName = "replicas in-sync" 14 | CheckNameReplicationFactorCorrect CheckName = "replication factor correct" 15 | CheckNameThrottlesClear CheckName = "throttles clear" 16 | CheckNameTopicExists CheckName = "topic exists" 17 | ) 18 | 19 | // TopicCheckResults stores the result of checking a single topic. 20 | type TopicCheckResults struct { 21 | Results []TopicCheckResult 22 | } 23 | 24 | // TopicCheckResult contains the name and status of a single check. 25 | type TopicCheckResult struct { 26 | Name CheckName 27 | OK bool 28 | Description string 29 | } 30 | 31 | // AllOK returns true if all subresults are OK, otherwise it returns false. 32 | func (r *TopicCheckResults) AllOK() bool { 33 | for _, result := range r.Results { 34 | if !result.OK { 35 | return false 36 | } 37 | } 38 | 39 | return true 40 | } 41 | 42 | // AppendResult adds a new check result to the results. 43 | func (r *TopicCheckResults) AppendResult(result TopicCheckResult) { 44 | r.Results = append(r.Results, result) 45 | } 46 | 47 | // UpdateLastResult updates the details of the most recently added result. 48 | func (r *TopicCheckResults) UpdateLastResult(ok bool, description string) { 49 | r.Results[len(r.Results)-1].OK = ok 50 | r.Results[len(r.Results)-1].Description = description 51 | } 52 | -------------------------------------------------------------------------------- /pkg/cli/command.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type replCommand struct { 9 | args []string 10 | flags map[string]string 11 | } 12 | 13 | func (r replCommand) getBoolValue(key string) bool { 14 | value, ok := r.flags[key] 15 | 16 | if value == "true" { 17 | return true 18 | } else if value == "" && ok { 19 | // If key is set but value is not, treat this as "true" 20 | return true 21 | } else { 22 | return false 23 | } 24 | } 25 | 26 | func (r replCommand) checkArgs( 27 | minArgs int, 28 | maxArgs int, 29 | allowedFlags map[string]struct{}, 30 | ) error { 31 | if minArgs == maxArgs { 32 | if len(r.args) != minArgs { 33 | return fmt.Errorf("Expected %d args", minArgs) 34 | } 35 | } else { 36 | if len(r.args) < minArgs || len(r.args) > maxArgs { 37 | return fmt.Errorf("Expected between %d and %d args", minArgs, maxArgs) 38 | } 39 | } 40 | 41 | for key := range r.flags { 42 | if allowedFlags == nil { 43 | return fmt.Errorf("Flag %s not recognized", key) 44 | } 45 | if _, ok := allowedFlags[key]; !ok { 46 | return fmt.Errorf("Flag %s not recognized", key) 47 | } 48 | } 49 | 50 | return nil 51 | } 52 | 53 | func parseReplInputs(input string) replCommand { 54 | args := []string{} 55 | flags := map[string]string{} 56 | 57 | components := strings.Split(input, " ") 58 | 59 | for c, component := range components { 60 | if component == "" { 61 | continue 62 | } else if c > 0 && strings.HasPrefix(component, "--") { 63 | subcomponents := strings.SplitN(component, "=", 2) 64 | key := subcomponents[0][2:] 65 | var value string 66 | if len(subcomponents) > 1 { 67 | value = subcomponents[1] 68 | } 69 | flags[key] = value 70 | } else { 71 | args = append(args, component) 72 | } 73 | } 74 | 75 | return replCommand{ 76 | args: args, 77 | flags: flags, 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /pkg/cli/command_test.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseReplInputs(t *testing.T) { 10 | assert.Equal( 11 | t, 12 | replCommand{ 13 | args: []string{"arg1", "arg2"}, 14 | flags: map[string]string{}, 15 | }, 16 | parseReplInputs("arg1 arg2"), 17 | ) 18 | assert.Equal( 19 | t, 20 | replCommand{ 21 | args: []string{"--flag1=value1", "arg1", "arg2"}, 22 | flags: map[string]string{}, 23 | }, 24 | parseReplInputs("--flag1=value1 arg1 arg2"), 25 | ) 26 | assert.Equal( 27 | t, 28 | replCommand{ 29 | args: []string{"arg1", "arg2", "arg3"}, 30 | flags: map[string]string{ 31 | "flag1": "value1", 32 | "flag2": "value2", 33 | }, 34 | }, 35 | parseReplInputs("arg1 arg2 --flag1=value1 arg3 --flag2=value2"), 36 | ) 37 | } 38 | 39 | func TestGetBoolValue(t *testing.T) { 40 | command := replCommand{ 41 | flags: map[string]string{ 42 | "key1": "", 43 | "key2": "true", 44 | "key3": "false", 45 | }, 46 | } 47 | assert.True(t, command.getBoolValue("key1")) 48 | assert.True(t, command.getBoolValue("key2")) 49 | assert.False(t, command.getBoolValue("key3")) 50 | assert.False(t, command.getBoolValue("non-existent-key")) 51 | } 52 | 53 | func TestCheckArgs(t *testing.T) { 54 | command := replCommand{ 55 | args: []string{ 56 | "arg1", 57 | "arg2", 58 | }, 59 | flags: map[string]string{ 60 | "key1": "value1", 61 | }, 62 | } 63 | assert.NoError(t, command.checkArgs(2, 2, map[string]struct{}{"key1": {}})) 64 | assert.NoError(t, command.checkArgs(2, 3, map[string]struct{}{"key1": {}})) 65 | assert.NoError(t, command.checkArgs(1, 2, map[string]struct{}{"key1": {}})) 66 | assert.NoError(t, command.checkArgs(1, 2, map[string]struct{}{"key1": {}, "key2": {}})) 67 | assert.Error(t, command.checkArgs(3, 3, map[string]struct{}{"key1": {}})) 68 | assert.Error(t, command.checkArgs(3, 5, map[string]struct{}{"key1": {}})) 69 | assert.Error(t, command.checkArgs(2, 2, map[string]struct{}{"key2": {}})) 70 | assert.Error(t, command.checkArgs(2, 2, nil)) 71 | } 72 | -------------------------------------------------------------------------------- /pkg/config/acl.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/hashicorp/go-multierror" 7 | "github.com/segmentio/kafka-go" 8 | ) 9 | 10 | type ACLConfig struct { 11 | Meta ResourceMeta `json:"meta"` 12 | Spec ACLSpec `json:"spec"` 13 | } 14 | 15 | type ACLSpec struct { 16 | ACLs []ACL `json:"acls"` 17 | } 18 | 19 | type ACL struct { 20 | Resource ACLResource `json:"resource"` 21 | Operations []kafka.ACLOperationType `json:"operations"` 22 | } 23 | 24 | type ACLResource struct { 25 | Type kafka.ResourceType `json:"type"` 26 | Name string `json:"name"` 27 | PatternType kafka.PatternType `json:"patternType"` 28 | Principal string `json:"principal"` 29 | Host string `json:"host"` 30 | Permission kafka.ACLPermissionType `json:"permission"` 31 | } 32 | 33 | func (a ACLConfig) ToNewACLEntries() []kafka.ACLEntry { 34 | acls := []kafka.ACLEntry{} 35 | 36 | for _, acl := range a.Spec.ACLs { 37 | for _, operation := range acl.Operations { 38 | acls = append(acls, kafka.ACLEntry{ 39 | ResourceType: acl.Resource.Type, 40 | ResourceName: acl.Resource.Name, 41 | ResourcePatternType: acl.Resource.PatternType, 42 | Principal: acl.Resource.Principal, 43 | Host: acl.Resource.Host, 44 | Operation: operation, 45 | PermissionType: acl.Resource.Permission, 46 | }) 47 | } 48 | } 49 | return acls 50 | } 51 | 52 | // SetDefaults sets the default host and permission for each ACL in an ACL config 53 | // if these aren't set 54 | func (a *ACLConfig) SetDefaults() { 55 | for i, acl := range a.Spec.ACLs { 56 | if acl.Resource.Host == "" { 57 | a.Spec.ACLs[i].Resource.Host = "*" 58 | } 59 | if acl.Resource.Permission == kafka.ACLPermissionTypeUnknown { 60 | a.Spec.ACLs[i].Resource.Permission = kafka.ACLPermissionTypeAllow 61 | } 62 | } 63 | } 64 | 65 | // Validate evaluates whether the ACL config is valid. 66 | func (a *ACLConfig) Validate() error { 67 | var err error 68 | 69 | err = a.Meta.Validate() 70 | 71 | for _, acl := range a.Spec.ACLs { 72 | if acl.Resource.Type == kafka.ResourceTypeUnknown { 73 | err = multierror.Append(err, errors.New("ACL resource type cannot be unknown")) 74 | } 75 | if acl.Resource.Name == "" { 76 | err = multierror.Append(err, errors.New("ACL resource name cannot be empty")) 77 | } 78 | if acl.Resource.PatternType == kafka.PatternTypeUnknown { 79 | err = multierror.Append(err, errors.New("ACL resource pattern type cannot be unknown")) 80 | } 81 | if acl.Resource.Principal == "" { 82 | err = multierror.Append(err, errors.New("ACL resource principal cannot be empty")) 83 | } 84 | 85 | for _, operation := range acl.Operations { 86 | if operation == kafka.ACLOperationTypeUnknown { 87 | err = multierror.Append(err, errors.New("ACL operation cannot be unknown")) 88 | } 89 | } 90 | } 91 | 92 | return err 93 | } 94 | -------------------------------------------------------------------------------- /pkg/config/cluster_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestClusterValidate(t *testing.T) { 10 | type testCase struct { 11 | description string 12 | clusterConfig ClusterConfig 13 | expError bool 14 | } 15 | 16 | testCases := []testCase{ 17 | { 18 | description: "all good", 19 | clusterConfig: ClusterConfig{ 20 | Meta: ClusterMeta{ 21 | Name: "test-cluster", 22 | Region: "test-region", 23 | Environment: "test-environment", 24 | Description: "test-description", 25 | }, 26 | Spec: ClusterSpec{ 27 | BootstrapAddrs: []string{"broker-addr"}, 28 | ZKAddrs: []string{"zk-addr"}, 29 | DefaultRetentionDropStepDurationStr: "5m", 30 | }, 31 | }, 32 | expError: false, 33 | }, 34 | { 35 | description: "missing meta fields", 36 | clusterConfig: ClusterConfig{ 37 | Meta: ClusterMeta{ 38 | Environment: "test-environment", 39 | Description: "test-description", 40 | }, 41 | Spec: ClusterSpec{ 42 | BootstrapAddrs: []string{"broker-addr"}, 43 | ZKAddrs: []string{"zk-addr"}, 44 | }, 45 | }, 46 | expError: true, 47 | }, 48 | { 49 | description: "missing bootstrap addresses", 50 | clusterConfig: ClusterConfig{ 51 | Meta: ClusterMeta{ 52 | Name: "test-cluster", 53 | Region: "test-region", 54 | Environment: "test-environment", 55 | Description: "test-description", 56 | }, 57 | Spec: ClusterSpec{ 58 | ZKAddrs: []string{"zk-addr"}, 59 | }, 60 | }, 61 | expError: true, 62 | }, 63 | { 64 | description: "missing zk addresses", 65 | clusterConfig: ClusterConfig{ 66 | Meta: ClusterMeta{ 67 | Name: "test-cluster", 68 | Region: "test-region", 69 | Environment: "test-environment", 70 | Description: "test-description", 71 | }, 72 | Spec: ClusterSpec{ 73 | BootstrapAddrs: []string{"broker-addr"}, 74 | }, 75 | }, 76 | expError: false, 77 | }, 78 | { 79 | description: "bad retention drop format", 80 | clusterConfig: ClusterConfig{ 81 | Meta: ClusterMeta{ 82 | Name: "test-cluster", 83 | Region: "test-region", 84 | Environment: "test-environment", 85 | Description: "test-description", 86 | }, 87 | Spec: ClusterSpec{ 88 | BootstrapAddrs: []string{"broker-addr"}, 89 | ZKAddrs: []string{"zk-addr"}, 90 | DefaultRetentionDropStepDurationStr: "10xxx", 91 | }, 92 | }, 93 | expError: true, 94 | }, 95 | { 96 | description: "secrets manager set", 97 | clusterConfig: ClusterConfig{ 98 | Meta: ClusterMeta{ 99 | Name: "test-cluster", 100 | Region: "test-region", 101 | Environment: "test-environment", 102 | Description: "test-description", 103 | }, 104 | Spec: ClusterSpec{ 105 | BootstrapAddrs: []string{"broker-addr"}, 106 | ZKAddrs: []string{"zk-addr"}, 107 | SASL: SASLConfig{ 108 | Enabled: true, 109 | Mechanism: "plain", 110 | SecretsManagerArn: "arn:aws:secretsmanager:::secret:SecretName-xxxxxx", 111 | }, 112 | }, 113 | }, 114 | expError: true, 115 | }, 116 | { 117 | description: "secrets manager cannot be set with username and password", 118 | clusterConfig: ClusterConfig{ 119 | Meta: ClusterMeta{ 120 | Name: "test-cluster", 121 | Region: "test-region", 122 | Environment: "test-environment", 123 | Description: "test-description", 124 | }, 125 | Spec: ClusterSpec{ 126 | BootstrapAddrs: []string{"broker-addr"}, 127 | ZKAddrs: []string{"zk-addr"}, 128 | SASL: SASLConfig{ 129 | Enabled: true, 130 | Mechanism: "plain", 131 | Username: "user", 132 | Password: "password", 133 | SecretsManagerArn: "arn:aws:secretsmanager:::secret:SecretName-xxxxxx", 134 | }, 135 | }, 136 | }, 137 | expError: true, 138 | }, 139 | } 140 | 141 | for _, testCase := range testCases { 142 | err := testCase.clusterConfig.Validate() 143 | if testCase.expError { 144 | assert.Error(t, err, testCase.description) 145 | } else { 146 | assert.NoError(t, err, testCase.description) 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /pkg/config/load.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | "regexp" 11 | "strings" 12 | 13 | "github.com/ghodss/yaml" 14 | "github.com/hashicorp/go-multierror" 15 | ) 16 | 17 | var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") 18 | 19 | // LoadClusterFile loads a ClusterConfig from a path to a YAML file. 20 | func LoadClusterFile(path string, expandEnv bool) (ClusterConfig, error) { 21 | contents, err := os.ReadFile(path) 22 | if err != nil { 23 | return ClusterConfig{}, err 24 | } 25 | 26 | if expandEnv { 27 | contents = []byte(os.ExpandEnv(string(contents))) 28 | } 29 | 30 | absPath, err := filepath.Abs(path) 31 | if err != nil { 32 | return ClusterConfig{}, err 33 | } 34 | 35 | config, err := LoadClusterBytes(contents) 36 | if err != nil { 37 | return ClusterConfig{}, err 38 | } 39 | 40 | config.RootDir = filepath.Dir(absPath) 41 | return config, nil 42 | } 43 | 44 | // LoadClusterBytes loads a ClusterConfig from YAML bytes. 45 | func LoadClusterBytes(contents []byte) (ClusterConfig, error) { 46 | config := ClusterConfig{} 47 | err := unmarshalYAMLStrict(contents, &config) 48 | return config, err 49 | } 50 | 51 | // LoadTopicsFile loads one or more TopicConfigs from a path to a YAML file. 52 | func LoadTopicsFile(path string) ([]TopicConfig, error) { 53 | contents, err := os.ReadFile(path) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | contents = []byte(os.ExpandEnv(string(contents))) 59 | 60 | trimmedFile := strings.TrimSpace(string(contents)) 61 | topicStrs := sep.Split(trimmedFile, -1) 62 | 63 | topicConfigs := []TopicConfig{} 64 | 65 | for _, topicStr := range topicStrs { 66 | topicStr = strings.TrimSpace(topicStr) 67 | if isEmpty(topicStr) { 68 | continue 69 | } 70 | 71 | topicConfig, err := LoadTopicBytes([]byte(topicStr)) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | topicConfigs = append(topicConfigs, topicConfig) 77 | } 78 | 79 | return topicConfigs, nil 80 | } 81 | 82 | // LoadTopicBytes loads a TopicConfig from YAML bytes. 83 | func LoadTopicBytes(contents []byte) (TopicConfig, error) { 84 | config := TopicConfig{} 85 | err := unmarshalYAMLStrict(contents, &config) 86 | fmt.Println(config) 87 | return config, err 88 | } 89 | 90 | // LoadACLsFile loads one or more ACLConfigs from a path to a YAML file. 91 | func LoadACLsFile(path string) ([]ACLConfig, error) { 92 | contents, err := os.ReadFile(path) 93 | if err != nil { 94 | return nil, err 95 | } 96 | 97 | contents = []byte(os.ExpandEnv(string(contents))) 98 | 99 | trimmedFile := strings.TrimSpace(string(contents)) 100 | aclStrs := sep.Split(trimmedFile, -1) 101 | 102 | aclConfigs := []ACLConfig{} 103 | 104 | for _, aclStr := range aclStrs { 105 | aclStr = strings.TrimSpace(aclStr) 106 | if isEmpty(aclStr) { 107 | continue 108 | } 109 | 110 | aclConfig, err := LoadACLBytes([]byte(aclStr)) 111 | if err != nil { 112 | return nil, err 113 | } 114 | 115 | aclConfigs = append(aclConfigs, aclConfig) 116 | } 117 | 118 | return aclConfigs, nil 119 | } 120 | 121 | // LoadACLBytes loads an ACLConfig from YAML bytes. 122 | func LoadACLBytes(contents []byte) (ACLConfig, error) { 123 | config := ACLConfig{} 124 | err := unmarshalYAMLStrict(contents, &config) 125 | return config, err 126 | } 127 | 128 | // CheckConsistency verifies that the argument topic config is consistent with the argument 129 | // cluster, e.g. has the same environment and region, etc. 130 | func CheckConsistency(resourceMeta ResourceMeta, clusterConfig ClusterConfig) error { 131 | var err error 132 | 133 | if resourceMeta.Cluster != clusterConfig.Meta.Name { 134 | err = multierror.Append( 135 | err, 136 | errors.New("Topic cluster name does not match name in cluster config"), 137 | ) 138 | } 139 | if resourceMeta.Environment != clusterConfig.Meta.Environment { 140 | err = multierror.Append( 141 | err, 142 | errors.New("Topic environment does not match cluster environment"), 143 | ) 144 | } 145 | if resourceMeta.Region != clusterConfig.Meta.Region { 146 | err = multierror.Append( 147 | err, 148 | errors.New("Topic region does not match cluster region"), 149 | ) 150 | } 151 | 152 | return err 153 | } 154 | 155 | func isEmpty(contents string) bool { 156 | lines := strings.Split(contents, "\n") 157 | for _, line := range lines { 158 | trimmedLine := strings.TrimSpace(line) 159 | if len(trimmedLine) > 0 && !strings.HasPrefix(trimmedLine, "#") { 160 | return false 161 | } 162 | } 163 | 164 | return true 165 | } 166 | 167 | func unmarshalYAMLStrict(y []byte, o interface{}) error { 168 | jsonBytes, err := yaml.YAMLToJSON(y) 169 | if err != nil { 170 | return err 171 | } 172 | dec := json.NewDecoder(bytes.NewReader(jsonBytes)) 173 | dec.DisallowUnknownFields() 174 | return dec.Decode(o) 175 | } 176 | -------------------------------------------------------------------------------- /pkg/config/meta.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/hashicorp/go-multierror" 7 | ) 8 | 9 | // ResourceMeta stores the (mostly immutable) metadata associated with a resource. 10 | // Inspired by the meta structs in Kubernetes objects. 11 | type ResourceMeta struct { 12 | Name string `json:"name"` 13 | Cluster string `json:"cluster"` 14 | Region string `json:"region"` 15 | Environment string `json:"environment"` 16 | Description string `json:"description"` 17 | Labels map[string]string `json:"labels"` 18 | 19 | // Consumers is a list of consumers who are expected to consume from this 20 | // topic. 21 | Consumers []string `json:"consumers,omitempty"` 22 | } 23 | 24 | // Validate evalutes whether the ResourceMeta is valid. 25 | func (rm *ResourceMeta) Validate() error { 26 | var err error 27 | if rm.Name == "" { 28 | err = multierror.Append(err, errors.New("Name must be set")) 29 | } 30 | if rm.Cluster == "" { 31 | err = multierror.Append(err, errors.New("Cluster must be set")) 32 | } 33 | if rm.Region == "" { 34 | err = multierror.Append(err, errors.New("Region must be set")) 35 | } 36 | if rm.Environment == "" { 37 | err = multierror.Append(err, errors.New("Environment must be set")) 38 | } 39 | return err 40 | } 41 | -------------------------------------------------------------------------------- /pkg/config/meta_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestMetaValidate(t *testing.T) { 10 | type testCase struct { 11 | description string 12 | meta ResourceMeta 13 | expError bool 14 | } 15 | 16 | testCases := []testCase{ 17 | { 18 | description: "valid meta", 19 | meta: ResourceMeta{ 20 | Name: "test-topic", 21 | Cluster: "test-cluster", 22 | Region: "test-region", 23 | Environment: "test-environment", 24 | Description: "test-description", 25 | }, 26 | expError: false, 27 | }, 28 | { 29 | description: "meta missing fields", 30 | meta: ResourceMeta{ 31 | Name: "test-topic", 32 | Environment: "test-environment", 33 | Description: "Bootstrapped via topicctl bootstrap", 34 | }, 35 | expError: true, 36 | }, 37 | } 38 | 39 | for _, testCase := range testCases { 40 | err := testCase.meta.Validate() 41 | if testCase.expError { 42 | assert.Error(t, err, testCase.description) 43 | } else { 44 | assert.NoError(t, err, testCase.description) 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/acls/acl-test-invalid.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: acl-test 3 | cluster: test-cluster 4 | environment: test-env 5 | region: test-region 6 | description: | 7 | Test acl 8 | 9 | spec: 10 | acls: 11 | - resource: 12 | type: topic 13 | name: test-topic 14 | patternType: literal 15 | operations: 16 | - read 17 | - describe 18 | - resource: 19 | type: group 20 | name: test-group 21 | patternType: invalid 22 | operations: 23 | - read 24 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/acls/acl-test-multi.yaml: -------------------------------------------------------------------------------- 1 | # This is an empty config. 2 | --- 3 | meta: 4 | name: acl-test1 5 | cluster: test-cluster 6 | environment: test-env 7 | region: test-region 8 | description: | 9 | Test acl 10 | 11 | spec: 12 | acls: 13 | - resource: 14 | type: topic 15 | name: test-topic 16 | patternType: literal 17 | operations: 18 | - read 19 | - describe 20 | - resource: 21 | type: group 22 | name: test-group 23 | patternType: prefixed 24 | operations: 25 | - read 26 | --- 27 | meta: 28 | name: acl-test2 29 | cluster: test-cluster 30 | environment: test-env 31 | region: test-region 32 | description: | 33 | Test acl 34 | 35 | spec: 36 | acls: 37 | - resource: 38 | type: topic 39 | name: test-topic 40 | patternType: literal 41 | operations: 42 | - read 43 | - describe 44 | - resource: 45 | type: group 46 | name: test-group 47 | patternType: prefixed 48 | operations: 49 | - read 50 | --- 51 | # Another empty one 52 | 53 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/acls/acl-test-no-match.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: acl-test-no-match 3 | cluster: test-cluster 4 | environment: bad-env 5 | region: test-region 6 | description: | 7 | Test acl 8 | 9 | spec: 10 | acls: 11 | - resource: 12 | type: topic 13 | name: test-topic 14 | patternType: literal 15 | principal: 'User:Alice' 16 | host: "*" 17 | permission: allow 18 | operations: 19 | - read 20 | - describe 21 | - resource: 22 | type: group 23 | name: test-group 24 | patternType: prefixed 25 | principal: 'User:Alice' 26 | host: "*" 27 | permission: allow 28 | operations: 29 | - read 30 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/acls/acl-test.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: acl-test 3 | cluster: test-cluster 4 | environment: test-env 5 | region: test-region 6 | description: | 7 | Test acl 8 | 9 | spec: 10 | acls: 11 | - resource: 12 | type: topic 13 | name: test-topic 14 | patternType: literal 15 | principal: 'User:Alice' 16 | host: "*" 17 | permission: allow 18 | operations: 19 | - read 20 | - describe 21 | - resource: 22 | type: group 23 | name: test-group 24 | patternType: prefixed 25 | principal: 'User:Alice' 26 | host: "*" 27 | permission: allow 28 | operations: 29 | - read 30 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/cluster-extra-fields.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: test-cluster 3 | environment: test-env 4 | region: $K2_TEST_ENV_VAR 5 | description: | 6 | Test cluster 7 | extraField: value1 8 | anotherExtraField: value2 9 | 10 | spec: 11 | bootstrapAddrs: 12 | - bootstrap-addr:9092 13 | zkAddrs: 14 | - zk-addr:2181 15 | zkPrefix: "/test-cluster-id" 16 | zkLockPath: /topicctl/locks 17 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/cluster-invalid.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: test-cluster 3 | environment: test-env 4 | region: test-region 5 | description: | 6 | Test cluster 7 | 8 | spec: 9 | clusterID: test-cluster-id 10 | zkAddrs: 11 | - localhost:2181 12 | tls: 13 | enabled: true 14 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/cluster.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: test-cluster 3 | environment: test-env 4 | region: $K2_TEST_ENV_VAR 5 | description: | 6 | Test cluster 7 | 8 | spec: 9 | bootstrapAddrs: 10 | - bootstrap-addr:9092 11 | zkAddrs: 12 | - zk-addr:2181 13 | zkPrefix: "/test-cluster-id" 14 | zkLockPath: /topicctl/locks 15 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/topics/topic-test-invalid.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-test-invalid 3 | cluster: test-cluster 4 | description: | 5 | Test topic 6 | 7 | spec: 8 | partitions: 9 9 | retentionMinutes: 100 10 | placement: 11 | strategy: non-matching 12 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/topics/topic-test-multi.yaml: -------------------------------------------------------------------------------- 1 | # This is an empty config. 2 | --- 3 | meta: 4 | name: topic-test1 5 | cluster: test-cluster 6 | environment: test-env 7 | region: test-region 8 | description: | 9 | Test topic 10 | 11 | spec: 12 | partitions: 9 13 | replicationFactor: 2 14 | retentionMinutes: 100 15 | placement: 16 | strategy: in-rack 17 | settings: 18 | cleanup.policy: compact 19 | follower.replication.throttled.replicas: 20 | - "1:3" 21 | - "4:5" 22 | max.compaction.lag.ms: 12345 23 | --- 24 | meta: 25 | name: topic-test2 26 | cluster: test-cluster 27 | environment: test-env 28 | region: test-region 29 | description: | 30 | Test topic 31 | 32 | spec: 33 | partitions: 9 34 | replicationFactor: 2 35 | retentionMinutes: 100 36 | placement: 37 | strategy: in-rack 38 | settings: 39 | cleanup.policy: compact 40 | follower.replication.throttled.replicas: 41 | - "1:3" 42 | - "4:5" 43 | max.compaction.lag.ms: 12345 44 | --- 45 | # Another empty one 46 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/topics/topic-test-no-match.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-test-no-match 3 | cluster: local-cluster 4 | environment: bad-env 5 | region: local-region 6 | description: | 7 | Test topic 8 | 9 | spec: 10 | partitions: 9 11 | replicationFactor: 2 12 | retentionMinutes: 100 13 | placement: 14 | strategy: static 15 | staticAssignments: 16 | - [3, 4] 17 | - [5, 6] 18 | - [2, 1] 19 | - [2, 3] 20 | - [5, 1] 21 | - [1, 2] 22 | - [1, 3] 23 | - [5, 6] 24 | - [2, 1] 25 | -------------------------------------------------------------------------------- /pkg/config/testdata/test-cluster/topics/topic-test.yaml: -------------------------------------------------------------------------------- 1 | meta: 2 | name: topic-test 3 | cluster: test-cluster 4 | environment: test-env 5 | region: test-region 6 | description: | 7 | Test topic 8 | 9 | spec: 10 | partitions: 9 11 | replicationFactor: 2 12 | retentionMinutes: 100 13 | placement: 14 | strategy: in-rack 15 | settings: 16 | cleanup.policy: compact 17 | follower.replication.throttled.replicas: 18 | - "1:3" 19 | - "4:5" 20 | max.compaction.lag.ms: 12345 21 | -------------------------------------------------------------------------------- /pkg/create/acl.go: -------------------------------------------------------------------------------- 1 | package create 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | 9 | "github.com/segmentio/kafka-go" 10 | "github.com/segmentio/topicctl/pkg/admin" 11 | "github.com/segmentio/topicctl/pkg/config" 12 | "github.com/segmentio/topicctl/pkg/util" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | // ACLCreatorConfig contains the configuration for an ACL creator. 17 | type ACLCreatorConfig struct { 18 | ClusterConfig config.ClusterConfig 19 | DryRun bool 20 | SkipConfirm bool 21 | ACLConfig config.ACLConfig 22 | } 23 | 24 | type ACLCreator struct { 25 | config ACLCreatorConfig 26 | adminClient admin.Client 27 | 28 | clusterConfig config.ClusterConfig 29 | aclConfig config.ACLConfig 30 | } 31 | 32 | func NewACLCreator( 33 | ctx context.Context, 34 | adminClient admin.Client, 35 | creatorConfig ACLCreatorConfig, 36 | ) (*ACLCreator, error) { 37 | if !adminClient.GetSupportedFeatures().ACLs { 38 | return nil, fmt.Errorf("ACLs are not supported by this cluster") 39 | } 40 | 41 | return &ACLCreator{ 42 | config: creatorConfig, 43 | adminClient: adminClient, 44 | clusterConfig: creatorConfig.ClusterConfig, 45 | aclConfig: creatorConfig.ACLConfig, 46 | }, nil 47 | } 48 | 49 | func (a *ACLCreator) Create(ctx context.Context) error { 50 | log.Info("Validating configs...") 51 | 52 | if err := a.clusterConfig.Validate(); err != nil { 53 | return err 54 | } 55 | 56 | if err := a.aclConfig.Validate(); err != nil { 57 | return err 58 | } 59 | 60 | if err := config.CheckConsistency(a.aclConfig.Meta, a.clusterConfig); err != nil { 61 | return err 62 | } 63 | 64 | log.Info("Checking if ACLs already exist...") 65 | 66 | acls := a.aclConfig.ToNewACLEntries() 67 | 68 | allExistingACLs := []kafka.ACLEntry{} 69 | newACLs := []kafka.ACLEntry{} 70 | 71 | for _, acl := range acls { 72 | existingACLs, err := a.adminClient.GetACLs(ctx, kafka.ACLFilter{ 73 | ResourceTypeFilter: acl.ResourceType, 74 | ResourceNameFilter: acl.ResourceName, 75 | ResourcePatternTypeFilter: acl.ResourcePatternType, 76 | PrincipalFilter: acl.Principal, 77 | HostFilter: acl.Host, 78 | Operation: acl.Operation, 79 | PermissionType: acl.PermissionType, 80 | }) 81 | if err != nil { 82 | return fmt.Errorf("error checking for existing ACL (%v): %v", acl, err) 83 | } 84 | if len(existingACLs) > 0 { 85 | allExistingACLs = append(allExistingACLs, acl) 86 | } else { 87 | newACLs = append(newACLs, acl) 88 | } 89 | } 90 | 91 | if len(allExistingACLs) > 0 { 92 | log.Infof( 93 | "Found %d existing ACLs:\n%s", 94 | len(allExistingACLs), 95 | formatNewACLsConfig(allExistingACLs), 96 | ) 97 | } 98 | 99 | if len(newACLs) == 0 { 100 | log.Infof("No ACLs to create") 101 | return nil 102 | } 103 | 104 | if a.config.DryRun { 105 | log.Infof( 106 | "Would create ACLs with config %+v", 107 | formatNewACLsConfig(newACLs), 108 | ) 109 | return nil 110 | } 111 | 112 | log.Infof( 113 | "It looks like these ACLs don't already exist. Will create them with this config:\n%s", 114 | formatNewACLsConfig(newACLs), 115 | ) 116 | 117 | ok, _ := util.Confirm("OK to continue?", a.config.SkipConfirm) 118 | if !ok { 119 | return errors.New("Stopping because of user response") 120 | } 121 | 122 | log.Infof("Creating new ACLs for user with config %+v", formatNewACLsConfig(newACLs)) 123 | 124 | if err := a.adminClient.CreateACLs(ctx, acls); err != nil { 125 | return fmt.Errorf("error creating new ACLs: %v", err) 126 | } 127 | 128 | return nil 129 | } 130 | 131 | // formatNewACLsConfig generates a pretty string representation of kafka-go 132 | // ACL configurations. 133 | func formatNewACLsConfig(config []kafka.ACLEntry) string { 134 | content, err := json.MarshalIndent(config, "", " ") 135 | if err != nil { 136 | log.Warnf("Error marshalling ACLs config: %+v", err) 137 | return "Error" 138 | } 139 | 140 | return string(content) 141 | } 142 | -------------------------------------------------------------------------------- /pkg/groups/types.go: -------------------------------------------------------------------------------- 1 | package groups 2 | 3 | import ( 4 | "sort" 5 | "time" 6 | 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | // GroupCoordinator stores the coordinator broker for a single consumer group. 11 | type GroupCoordinator struct { 12 | GroupID string 13 | Coordinator int 14 | Topics []string 15 | } 16 | 17 | // GroupDetails stores the state and members for a consumer group. 18 | type GroupDetails struct { 19 | GroupID string 20 | State string 21 | Members []MemberInfo 22 | } 23 | 24 | // TopicsMap returns a map of all the topics consumed by the current group. 25 | func (g GroupDetails) TopicsMap() map[string]struct{} { 26 | topicsMap := map[string]struct{}{} 27 | 28 | for _, member := range g.Members { 29 | for _, topic := range member.Topics() { 30 | topicsMap[topic] = struct{}{} 31 | } 32 | } 33 | 34 | return topicsMap 35 | } 36 | 37 | // PartitionMembers returns the members for each partition in the argument topic. 38 | func (g GroupDetails) PartitionMembers(topic string) map[int]MemberInfo { 39 | partitionsMap := map[int]MemberInfo{} 40 | 41 | for _, member := range g.Members { 42 | partitions := member.TopicPartitions[topic] 43 | if len(partitions) > 0 { 44 | for _, partition := range partitions { 45 | if _, ok := partitionsMap[partition]; ok { 46 | log.Warnf("Multiple members assigned to partition %d", partition) 47 | } 48 | 49 | partitionsMap[partition] = member 50 | } 51 | } 52 | } 53 | 54 | return partitionsMap 55 | } 56 | 57 | // MemberInfo stores information about a single consumer group member. 58 | type MemberInfo struct { 59 | MemberID string 60 | ClientID string 61 | ClientHost string 62 | TopicPartitions map[string][]int 63 | } 64 | 65 | // Topics returns a slice of all topics that the current MemberInfo is consuming from. 66 | func (m MemberInfo) Topics() []string { 67 | topics := []string{} 68 | 69 | for topic := range m.TopicPartitions { 70 | topics = append(topics, topic) 71 | } 72 | 73 | sort.Slice(topics, func(a, b int) bool { 74 | return topics[a] < topics[b] 75 | }) 76 | 77 | return topics 78 | } 79 | 80 | // MemberPartitionLag information about the lag for a single topic / partition / group member 81 | // combination. 82 | type MemberPartitionLag struct { 83 | Topic string 84 | Partition int 85 | MemberID string 86 | NewestOffset int64 87 | NewestTime time.Time 88 | MemberOffset int64 89 | MemberTime time.Time 90 | } 91 | 92 | // OffsetLag returns the difference between the latest offset in the partition and the latest one 93 | // committed by the group member. 94 | func (m MemberPartitionLag) OffsetLag() int64 { 95 | return m.NewestOffset - m.MemberOffset 96 | } 97 | 98 | // TimeLag returns the time difference between the latest timestamp in the the partition and the 99 | // timestamp in the latest message committed by the group member. 100 | func (m MemberPartitionLag) TimeLag() time.Duration { 101 | return m.NewestTime.Sub(m.MemberTime) 102 | } 103 | 104 | // Consumer Group Offset reset strategies 105 | const ( 106 | LatestResetOffsetsStrategy string = "latest" 107 | EarliestResetOffsetsStrategy string = "earliest" 108 | ) 109 | -------------------------------------------------------------------------------- /pkg/messages/bounds_test.go: -------------------------------------------------------------------------------- 1 | package messages 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/segmentio/kafka-go" 10 | "github.com/segmentio/topicctl/pkg/admin" 11 | "github.com/segmentio/topicctl/pkg/util" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestGetAllPartitionBounds(t *testing.T) { 17 | ctx := context.Background() 18 | connector, err := admin.NewConnector(admin.ConnectorConfig{ 19 | BrokerAddr: util.TestKafkaAddr(), 20 | ConnTimeout: 10 * time.Second, 21 | }) 22 | require.NoError(t, err) 23 | 24 | topicName := util.RandomString("topic-bounds-", 6) 25 | _, err = connector.KafkaClient.CreateTopics( 26 | ctx, 27 | &kafka.CreateTopicsRequest{ 28 | Topics: []kafka.TopicConfig{ 29 | { 30 | Topic: topicName, 31 | NumPartitions: 4, 32 | ReplicationFactor: 1, 33 | }, 34 | }, 35 | }, 36 | ) 37 | require.NoError(t, err) 38 | time.Sleep(200 * time.Millisecond) 39 | 40 | writer := kafka.NewWriter( 41 | kafka.WriterConfig{ 42 | Brokers: []string{connector.Config.BrokerAddr}, 43 | Dialer: connector.Dialer, 44 | Topic: topicName, 45 | Balancer: &kafka.RoundRobin{}, 46 | }, 47 | ) 48 | defer writer.Close() 49 | 50 | messages := []kafka.Message{} 51 | 52 | for i := 0; i < 10; i++ { 53 | messages = append( 54 | messages, 55 | kafka.Message{ 56 | Key: []byte(fmt.Sprintf("key%d", i)), 57 | Value: []byte(fmt.Sprintf("value%d", i)), 58 | }, 59 | ) 60 | } 61 | 62 | err = writer.WriteMessages(ctx, messages...) 63 | require.NoError(t, err) 64 | 65 | bounds, err := GetAllPartitionBounds(ctx, connector, topicName, nil) 66 | assert.NoError(t, err) 67 | 68 | // The first partition gets 3 messages. (i.e) earliest/first offset is 0 and latest/last is 3 69 | assert.Equal(t, 4, len(bounds)) 70 | assert.Equal(t, 0, bounds[0].Partition) 71 | assert.Equal(t, int64(0), bounds[0].FirstOffset) 72 | assert.Equal(t, int64(3), bounds[0].LastOffset) 73 | 74 | // The last partition gets only 2 messages. (i.e) earliest/first offset is 0 and latest/last is 2 75 | assert.Equal(t, 3, bounds[3].Partition) 76 | assert.Equal(t, int64(0), bounds[3].FirstOffset) 77 | assert.Equal(t, int64(2), bounds[3].LastOffset) 78 | 79 | boundsWithOffsets, err := GetAllPartitionBounds( 80 | ctx, 81 | connector, 82 | topicName, 83 | map[int]int64{ 84 | 0: 1, 85 | }, 86 | ) 87 | assert.NoError(t, err) 88 | 89 | assert.Equal(t, 4, len(boundsWithOffsets)) 90 | 91 | // Start of first partition is moved forward. First partition has earliest offset is 0 and latest is 3 92 | assert.Equal(t, 0, boundsWithOffsets[0].Partition) 93 | assert.Equal(t, int64(1), boundsWithOffsets[0].FirstOffset) 94 | assert.Equal(t, int64(3), boundsWithOffsets[0].LastOffset) 95 | 96 | // Other partition bounds are unchanged. Last partition has earliest offset is 0 and latest is 2 97 | assert.Equal(t, 3, boundsWithOffsets[3].Partition) 98 | assert.Equal(t, int64(0), boundsWithOffsets[3].FirstOffset) 99 | assert.Equal(t, int64(2), boundsWithOffsets[3].LastOffset) 100 | } 101 | -------------------------------------------------------------------------------- /pkg/messages/tail_test.go: -------------------------------------------------------------------------------- 1 | package messages 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "fmt" 7 | "testing" 8 | "time" 9 | 10 | "github.com/segmentio/kafka-go" 11 | "github.com/segmentio/topicctl/pkg/admin" 12 | "github.com/segmentio/topicctl/pkg/util" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func TestTailerGetMessages(t *testing.T) { 18 | ctx, cancel := context.WithCancel(context.Background()) 19 | defer cancel() 20 | 21 | connector, err := admin.NewConnector(admin.ConnectorConfig{ 22 | BrokerAddr: util.TestKafkaAddr(), 23 | ConnTimeout: 10 * time.Second, 24 | }) 25 | require.NoError(t, err) 26 | 27 | topicName := util.RandomString("topic-tail-", 6) 28 | _, err = connector.KafkaClient.CreateTopics( 29 | ctx, 30 | &kafka.CreateTopicsRequest{ 31 | Topics: []kafka.TopicConfig{ 32 | { 33 | Topic: topicName, 34 | NumPartitions: 4, 35 | ReplicationFactor: 1, 36 | }, 37 | }, 38 | }, 39 | ) 40 | require.NoError(t, err) 41 | time.Sleep(200 * time.Millisecond) 42 | 43 | writer := kafka.NewWriter( 44 | kafka.WriterConfig{ 45 | Brokers: []string{connector.Config.BrokerAddr}, 46 | Dialer: connector.Dialer, 47 | Topic: topicName, 48 | Balancer: &kafka.RoundRobin{}, 49 | }, 50 | ) 51 | defer writer.Close() 52 | 53 | messages := []kafka.Message{} 54 | 55 | for i := 0; i < 10; i++ { 56 | messages = append( 57 | messages, 58 | kafka.Message{ 59 | Key: []byte(fmt.Sprintf("key%d", i)), 60 | Value: []byte(fmt.Sprintf("value%d", i)), 61 | Headers: []kafka.Header{ 62 | { 63 | Key: "h1", 64 | Value: []byte("1234"), 65 | }, 66 | { 67 | Key: "h2", 68 | Value: []byte("5678"), 69 | }, 70 | }, 71 | }, 72 | ) 73 | } 74 | 75 | err = writer.WriteMessages(ctx, messages...) 76 | require.NoError(t, err) 77 | 78 | tailer := NewTopicTailer( 79 | connector, 80 | topicName, 81 | []int{0, 1, 2, 3}, 82 | kafka.FirstOffset, 83 | 1, 84 | 1000, 85 | ) 86 | messagesChan := make(chan TailMessage) 87 | tailer.GetMessages(ctx, messagesChan) 88 | 89 | timer := time.NewTimer(5 * time.Second) 90 | 91 | messageCount := 0 92 | seenKeys := map[string]struct{}{} 93 | 94 | outerLoop: 95 | for { 96 | select { 97 | case message := <-messagesChan: 98 | assert.NoError(t, message.Err) 99 | seenKeys[string(message.Message.Key)] = struct{}{} 100 | messageCount++ 101 | 102 | if messageCount == 10 { 103 | break outerLoop 104 | } 105 | case <-timer.C: 106 | break outerLoop 107 | } 108 | } 109 | 110 | assert.Equal(t, 10, len(seenKeys)) 111 | } 112 | 113 | func TestFormatHeader(t *testing.T) { 114 | tests := []struct { 115 | headers []kafka.Header 116 | expected string 117 | }{ 118 | {}, 119 | { 120 | headers: []kafka.Header{{Key: "foo"}}, 121 | expected: "foo=", 122 | }, 123 | { 124 | headers: []kafka.Header{ 125 | {Key: "foo", Value: []byte("123")}, 126 | }, 127 | expected: "foo=" + base64.StdEncoding.EncodeToString([]byte("123")), 128 | }, 129 | { 130 | headers: []kafka.Header{ 131 | {Key: "foo", Value: []byte("123")}, 132 | {Key: "bar", Value: []byte("456")}, 133 | }, 134 | expected: "foo=" + base64.StdEncoding.EncodeToString([]byte("123")) + ", " + 135 | "bar=" + base64.StdEncoding.EncodeToString([]byte("456")), 136 | }, 137 | { 138 | headers: []kafka.Header{ 139 | {Key: "foo", Value: []byte("123")}, 140 | {Key: "bar", Value: []byte("456")}, 141 | {Key: "baz", Value: []byte("789")}, 142 | }, 143 | expected: "foo=" + base64.StdEncoding.EncodeToString([]byte("123")) + ", " + 144 | "bar=" + base64.StdEncoding.EncodeToString([]byte("456")) + ", " + 145 | "baz=" + base64.StdEncoding.EncodeToString([]byte("789")), 146 | }, 147 | } 148 | 149 | for _, tt := range tests { 150 | assert.Equal(t, tt.expected, formatHeaders(tt.headers)) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /pkg/util/confirm.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | // Confirm shows the argument prompt to the user and returns a boolean based on whether or not 11 | // the user confirms that it's ok to continue. 12 | func Confirm(prompt string, skip bool) (bool, error) { 13 | fmt.Printf("%s (yes/no) ", prompt) 14 | 15 | if skip { 16 | log.Infof("Automatically answering yes because skip is set to true") 17 | return true, nil 18 | } 19 | 20 | var response string 21 | _, err := fmt.Scanln(&response) 22 | if err != nil { 23 | log.Warnf("Got error reading response, not continuing: %+v", err) 24 | return false, err 25 | } 26 | if strings.TrimSpace(strings.ToLower(response)) != "yes" { 27 | log.Infof("Not continuing") 28 | return false, nil 29 | } 30 | 31 | return true, nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/util/durations.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | // PrettyDuration returns a human-formatted duration string given an golang 9 | // duration value. 10 | func PrettyDuration(duration time.Duration) string { 11 | seconds := duration.Seconds() 12 | 13 | if seconds < 1.0 { 14 | return fmt.Sprintf("%dms", duration.Milliseconds()) 15 | } else if seconds < 240.0 { 16 | return fmt.Sprintf("%ds", int(seconds)) 17 | } else if seconds < (2.0 * 60.0 * 60.0) { 18 | return fmt.Sprintf("%dm", int(duration.Minutes())) 19 | } else { 20 | return fmt.Sprintf("%dh", int(duration.Hours())) 21 | } 22 | } 23 | 24 | // PrettyRate returns a human-formatted rate from a count and a duration. 25 | func PrettyRate(count int64, duration time.Duration) string { 26 | if duration == 0 { 27 | return "" 28 | } else if count == 0 { 29 | return "0" 30 | } 31 | 32 | ratePerSec := float64(count) / duration.Seconds() 33 | ratePerMin := float64(count) / duration.Minutes() 34 | ratePerHour := float64(count) / duration.Hours() 35 | 36 | if ratePerSec >= 10.0 { 37 | return fmt.Sprintf("%d/sec", int(ratePerSec)) 38 | } else if ratePerSec >= 1.0 { 39 | return fmt.Sprintf("%0.1f/sec", ratePerSec) 40 | } else if ratePerMin >= 10.0 { 41 | return fmt.Sprintf("%d/min", int(ratePerMin)) 42 | } else if ratePerMin >= 1.0 { 43 | return fmt.Sprintf("%0.1f/min", ratePerMin) 44 | } else if ratePerHour >= 0.1 { 45 | return fmt.Sprintf("%0.1f/hour", ratePerHour) 46 | } else { 47 | return fmt.Sprintf("~0") 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /pkg/util/durations_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestPrettyDuration(t *testing.T) { 11 | type testCase struct { 12 | duration time.Duration 13 | expected string 14 | } 15 | 16 | testCases := []testCase{ 17 | { 18 | duration: 5 * time.Millisecond, 19 | expected: "5ms", 20 | }, 21 | { 22 | duration: 25*time.Second + 410*time.Millisecond, 23 | expected: "25s", 24 | }, 25 | { 26 | duration: 30*time.Minute + 10*time.Second, 27 | expected: "30m", 28 | }, 29 | { 30 | duration: 60*6*time.Minute + 15*time.Minute, 31 | expected: "6h", 32 | }, 33 | } 34 | 35 | for _, testCaseObj := range testCases { 36 | assert.Equal( 37 | t, 38 | testCaseObj.expected, 39 | PrettyDuration(testCaseObj.duration), 40 | ) 41 | } 42 | } 43 | 44 | func TestPrettyRate(t *testing.T) { 45 | type testCase struct { 46 | count int64 47 | duration time.Duration 48 | expected string 49 | } 50 | testCases := []testCase{ 51 | { 52 | count: 300, 53 | duration: 0, 54 | expected: "", 55 | }, 56 | { 57 | count: 0, 58 | duration: time.Second, 59 | expected: "0", 60 | }, 61 | { 62 | count: 300, 63 | duration: time.Second, 64 | expected: "300/sec", 65 | }, 66 | { 67 | count: 3, 68 | duration: time.Second + 100*time.Millisecond, 69 | expected: "2.7/sec", 70 | }, 71 | { 72 | count: 35, 73 | duration: time.Minute, 74 | expected: "35/min", 75 | }, 76 | { 77 | count: 3, 78 | duration: time.Minute, 79 | expected: "3.0/min", 80 | }, 81 | { 82 | count: 3, 83 | duration: time.Hour, 84 | expected: "3.0/hour", 85 | }, 86 | { 87 | count: 1, 88 | duration: time.Hour * 1000, 89 | expected: "~0", 90 | }, 91 | } 92 | 93 | for _, testCaseObj := range testCases { 94 | assert.Equal( 95 | t, 96 | testCaseObj.expected, 97 | PrettyRate(testCaseObj.count, testCaseObj.duration), 98 | ) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /pkg/util/error.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/segmentio/kafka-go" 7 | ) 8 | 9 | func KafkaErrorsToErr(errors map[string]error) error { 10 | var hasErrors bool 11 | for _, err := range errors { 12 | if err != nil { 13 | hasErrors = true 14 | break 15 | } 16 | } 17 | if hasErrors { 18 | return fmt.Errorf("%+v", errors) 19 | } 20 | return nil 21 | } 22 | 23 | func IncrementalAlterConfigsResponseResourcesError(resources []kafka.IncrementalAlterConfigsResponseResource) error { 24 | errors := map[string]error{} 25 | var hasErrors bool 26 | for _, resource := range resources { 27 | if resource.Error != nil { 28 | hasErrors = true 29 | errors[resource.ResourceName] = resource.Error 30 | } 31 | } 32 | if hasErrors { 33 | return fmt.Errorf("%+v", errors) 34 | } 35 | return nil 36 | } 37 | 38 | func AlterPartitionReassignmentsRequestAssignmentError(results []kafka.AlterPartitionReassignmentsResponsePartitionResult) error { 39 | errors := map[int]error{} 40 | var hasErrors bool 41 | for _, result := range results { 42 | if result.Error != nil { 43 | hasErrors = true 44 | errors[result.PartitionID] = result.Error 45 | } 46 | } 47 | if hasErrors { 48 | return fmt.Errorf("%+v", errors) 49 | } 50 | return nil 51 | } 52 | -------------------------------------------------------------------------------- /pkg/util/maps.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "hash/fnv" 5 | "math/rand" 6 | "sort" 7 | ) 8 | 9 | // KeySorter is a type for a function that sorts integer keys based on their values in a map. 10 | type KeySorter func(map[int]int) []int 11 | 12 | // SortedKeys returns the keys of the argument, sorted by value. 13 | func SortedKeys(input map[int]int) []int { 14 | keys := []int{} 15 | 16 | for key := range input { 17 | keys = append(keys, key) 18 | } 19 | 20 | sort.Slice( 21 | keys, func(a, b int) bool { 22 | return keys[a] < keys[b] 23 | }, 24 | ) 25 | 26 | return keys 27 | } 28 | 29 | // ShuffledKeys returns a shuffled version of the keys in the 30 | // argument map. The provided seedStr is hashed and used to seed 31 | // the random number generator. 32 | func ShuffledKeys(input map[int]int, seedStr string) []int { 33 | keys := SortedKeys(input) 34 | 35 | hash := fnv.New64() 36 | hash.Write([]byte(seedStr)) 37 | 38 | random := rand.New(rand.NewSource(int64(hash.Sum64()))) 39 | random.Shuffle(len(keys), func(i, j int) { 40 | keys[i], keys[j] = keys[j], keys[i] 41 | }) 42 | 43 | return keys 44 | } 45 | 46 | // SortedKeysByValue returns the keys in a map, sorted by the map values. 47 | func SortedKeysByValue(input map[int]int, asc bool, keySorter KeySorter) []int { 48 | // First, sort the keys 49 | keys := keySorter(input) 50 | 51 | // Then, sort by value 52 | if asc { 53 | sort.Slice( 54 | keys, func(a, b int) bool { 55 | return input[keys[a]] < input[keys[b]] 56 | }, 57 | ) 58 | } else { 59 | sort.Slice( 60 | keys, func(a, b int) bool { 61 | return input[keys[a]] > input[keys[b]] 62 | }, 63 | ) 64 | } 65 | 66 | return keys 67 | } 68 | -------------------------------------------------------------------------------- /pkg/util/progress.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | log "github.com/sirupsen/logrus" 6 | "time" 7 | ) 8 | 9 | // Rebalance topic progress Config 10 | type RebalanceTopicProgressConfig struct { 11 | TopicName string `json:"topic"` 12 | ClusterName string `json:"cluster"` 13 | ClusterEnvironment string `json:"environment"` 14 | ToRemove []int `json:"to_remove"` 15 | RebalanceError bool `json:"rebalance_error"` 16 | } 17 | 18 | // Rebalance overall progress Config 19 | type RebalanceProgressConfig struct { 20 | SuccessTopics int `json:"success_topics"` 21 | ErrorTopics int `json:"error_topics"` 22 | ClusterName string `json:"cluster"` 23 | ClusterEnvironment string `json:"environment"` 24 | ToRemove []int `json:"to_remove"` 25 | } 26 | 27 | // Rebalance Topic Round progress Config 28 | type RebalanceRoundProgressConfig struct { 29 | TopicName string `json:"topic"` 30 | ClusterName string `json:"cluster"` 31 | ClusterEnvironment string `json:"environment"` 32 | ToRemove []int `json:"to_remove"` 33 | CurrRound int `json:"round"` 34 | TotalRounds int `json:"total_rounds"` 35 | } 36 | 37 | // Rebalance context struct 38 | type RebalanceCtxStruct struct { 39 | Enabled bool `json:"enabled"` 40 | Interval time.Duration `json:"interval"` 41 | } 42 | 43 | // shows progress of a config repeatedly during an interval 44 | func ShowProgress( 45 | ctx context.Context, 46 | progressConfig interface{}, 47 | interval time.Duration, 48 | stopChan chan bool, 49 | ) { 50 | progressStr, err := StructToStr(progressConfig) 51 | if err != nil { 52 | log.Errorf("progress struct to string error: %+v", err) 53 | } else { 54 | // print first before ticker starts 55 | log.Infof("Rebalance Progress: %s", progressStr) 56 | } 57 | 58 | ticker := time.NewTicker(interval) 59 | defer ticker.Stop() 60 | 61 | for { 62 | select { 63 | case <-ticker.C: 64 | if err == nil { 65 | log.Infof("Rebalance Progress: %s", progressStr) 66 | } 67 | case <-stopChan: 68 | return 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /pkg/util/slices.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "reflect" 5 | ) 6 | 7 | // CopyInts copies a slice of ints. 8 | func CopyInts(input []int) []int { 9 | results := make([]int, len(input)) 10 | copy(results, input) 11 | return results 12 | } 13 | 14 | // SameElements determines whether two int slices have the 15 | // same elements (in any order). 16 | func SameElements(slice1 []int, slice2 []int) bool { 17 | if len(slice1) != len(slice2) { 18 | return false 19 | } 20 | 21 | slice1Counts := map[int]int{} 22 | for _, s := range slice1 { 23 | slice1Counts[s]++ 24 | } 25 | 26 | slice2Counts := map[int]int{} 27 | for _, s := range slice2 { 28 | slice2Counts[s]++ 29 | } 30 | 31 | return reflect.DeepEqual(slice1Counts, slice2Counts) 32 | } 33 | -------------------------------------------------------------------------------- /pkg/util/strings.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | ) 7 | 8 | // TruncateStringSuffix truncates a string by replacing the trailing characters with 9 | // "..." if needed. 10 | func TruncateStringSuffix(input string, maxLen int) (string, int) { 11 | if len(input)-3 <= maxLen { 12 | return input, 0 13 | } 14 | 15 | numOmitted := len(input) - (maxLen - 3) 16 | return fmt.Sprintf("%s...", input[:maxLen-3]), numOmitted 17 | } 18 | 19 | // TruncateStringMiddle truncates a string by replacing characters in the middle with 20 | // "..." if needed. 21 | func TruncateStringMiddle(input string, maxLen int, suffixLen int) (string, int) { 22 | if len(input)-3 <= maxLen { 23 | return input, 0 24 | } 25 | 26 | suffix := input[len(input)-suffixLen:] 27 | prefix := input[:maxLen-suffixLen-3] 28 | 29 | numOmitted := len(input) - len(prefix) - len(suffix) 30 | return fmt.Sprintf("%s...%s", prefix, suffix), numOmitted 31 | } 32 | 33 | // Convert any struct to json string 34 | func StructToStr(inputStruct interface{}) (string, error) { 35 | jsonBytes, err := json.Marshal(inputStruct) 36 | if err != nil { 37 | return "{}", err 38 | } 39 | 40 | return string(jsonBytes), nil 41 | } 42 | -------------------------------------------------------------------------------- /pkg/util/strings_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestTruncateStringSuffix(t *testing.T) { 10 | resultLong, omittedLong := TruncateStringSuffix("01234567890123456789", 10) 11 | assert.Equal(t, "0123456...", resultLong) 12 | assert.Equal(t, 13, omittedLong) 13 | 14 | resultShort, omittedShort := TruncateStringSuffix("012345", 10) 15 | assert.Equal(t, "012345", resultShort) 16 | assert.Equal(t, 0, omittedShort) 17 | } 18 | 19 | func TestTruncateStringMiddle(t *testing.T) { 20 | resultLong, omittedLong := TruncateStringMiddle("01234567890123456789", 10, 3) 21 | assert.Equal(t, "0123...789", resultLong) 22 | assert.Equal(t, 13, omittedLong) 23 | 24 | resultShort, omittedShort := TruncateStringMiddle("012345", 10, 3) 25 | assert.Equal(t, "012345", resultShort) 26 | assert.Equal(t, 0, omittedShort) 27 | } 28 | -------------------------------------------------------------------------------- /pkg/util/terminal.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "os" 5 | 6 | "golang.org/x/crypto/ssh/terminal" 7 | ) 8 | 9 | // InTerminal determines whether we're running in a terminal or not. 10 | // 11 | // Implementation from https://rosettacode.org/wiki/Check_output_device_is_a_terminal#Go. 12 | func InTerminal() bool { 13 | return terminal.IsTerminal(int(os.Stdout.Fd())) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/util/testing.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func init() { 14 | rand.Seed(time.Now().UnixNano()) 15 | } 16 | 17 | // TestZKAddr returns a zookeeper address for unit testing purposes. 18 | func TestZKAddr() string { 19 | // Inside docker-compose (i.e., in CI), we need to use a different 20 | // address 21 | testZkAddr, ok := os.LookupEnv("KAFKA_TOPICS_TEST_ZK_ADDR") 22 | if !ok { 23 | return "localhost:2181" 24 | } 25 | 26 | return testZkAddr 27 | } 28 | 29 | // TestKafkaAddr returns a kafka bootstrap address for unit testing purposes. 30 | func TestKafkaAddr() string { 31 | // Inside docker-compose (i.e., in CI), we need to use a different 32 | // address 33 | testKafkaAddr, ok := os.LookupEnv("KAFKA_TOPICS_TEST_KAFKA_ADDR") 34 | if !ok { 35 | return "169.254.123.123:9092" 36 | } 37 | 38 | return testKafkaAddr 39 | } 40 | 41 | // CanTestBrokerAdmin returns whether we can test the broker-only admin client. 42 | func CanTestBrokerAdmin() bool { 43 | value, ok := os.LookupEnv("KAFKA_TOPICS_TEST_BROKER_ADMIN") 44 | if ok && value != "" { 45 | return true 46 | } 47 | 48 | return false 49 | } 50 | 51 | // CanTestBrokerAdminSecurity returns whether we can test the broker-only admin client security features. 52 | func CanTestBrokerAdminSecurity() bool { 53 | value, ok := os.LookupEnv("KAFKA_TOPICS_TEST_BROKER_ADMIN_SECURITY") 54 | if ok && value != "" { 55 | return true 56 | } 57 | 58 | return false 59 | } 60 | 61 | var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") 62 | 63 | // RandomString returns a random string with the argument length. 64 | // 65 | // Adapted from the example in 66 | // https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-go. 67 | func RandomString(prefix string, length int) string { 68 | b := make([]rune, length) 69 | for i := range b { 70 | b[i] = letters[rand.Intn(len(letters))] 71 | } 72 | return fmt.Sprintf("%s-%s", prefix, string(b)) 73 | } 74 | 75 | // RetryUntil is a helper that will re-run the argument function multiple times (up to a 76 | // duration limit) until it no longer produces an error. 77 | func RetryUntil(t *testing.T, timeout time.Duration, f func() error) { 78 | sleepTime := 100 * time.Millisecond 79 | end := time.Now().Add(timeout) 80 | var err error 81 | 82 | for time.Now().Before(end) { 83 | time.Sleep(sleepTime) 84 | sleepTime = sleepTime * 2 85 | 86 | err = f() 87 | if err == nil { 88 | return 89 | } 90 | } 91 | 92 | require.NoError(t, err) 93 | } 94 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | // Version is the current topicctl version. 4 | const Version = "1.20.0" 5 | -------------------------------------------------------------------------------- /pkg/zk/lock.go: -------------------------------------------------------------------------------- 1 | package zk 2 | 3 | import ( 4 | szk "github.com/samuel/go-zookeeper/zk" 5 | ) 6 | 7 | // Lock is a lock interface that's satified by the samuel zk Lock struct. 8 | type Lock interface { 9 | Unlock() error 10 | } 11 | 12 | var _ Lock = (*szk.Lock)(nil) 13 | -------------------------------------------------------------------------------- /pkg/zk/logger.go: -------------------------------------------------------------------------------- 1 | package zk 2 | 3 | import ( 4 | szk "github.com/samuel/go-zookeeper/zk" 5 | log "github.com/sirupsen/logrus" 6 | ) 7 | 8 | // DebugLogger is a logger that satisfies the szk.Logger interface. 9 | type DebugLogger struct{} 10 | 11 | var _ szk.Logger = (*DebugLogger)(nil) 12 | 13 | // Printf sends samuel zk log messages to logrus at the debug level. 14 | func (l *DebugLogger) Printf(format string, args ...interface{}) { 15 | log.Debugf(format, args...) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/zk/testing_util.go: -------------------------------------------------------------------------------- 1 | package zk 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | szk "github.com/samuel/go-zookeeper/zk" 8 | log "github.com/sirupsen/logrus" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | // PathTuple is a combination used for generating nodes in zk. For testing purposes 13 | // only. 14 | type PathTuple struct { 15 | Path string 16 | Obj interface{} 17 | } 18 | 19 | // CreateNode creates a single node at the argument path. For testing purposes only. 20 | func CreateNode(t *testing.T, zkConn *szk.Conn, path string, obj interface{}) { 21 | var data []byte 22 | var err error 23 | 24 | if obj != nil { 25 | data, err = json.Marshal(obj) 26 | require.NoError(t, err) 27 | } 28 | 29 | log.Infof("Creating path %+v", path) 30 | 31 | _, err = zkConn.Create(path, data, 0, szk.WorldACL(szk.PermAll)) 32 | require.NoError(t, err) 33 | } 34 | 35 | // CreateNodes creates nodes according to the argument PathTuples. For testing purposes only. 36 | func CreateNodes(t *testing.T, zkConn *szk.Conn, pathTuples []PathTuple) { 37 | for _, tuple := range pathTuples { 38 | CreateNode(t, zkConn, tuple.Path, tuple.Obj) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /scripts/set_up_net_alias.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ADDR=169.254.123.123 4 | echo "Aliasing $ADDR to localhost..." 5 | 6 | UNAME=$(uname -a) 7 | case "$UNAME" in 8 | Linux*) sudo ifconfig lo:0 $ADDR netmask 255.255.255.0 up;; 9 | Darwin*) sudo ifconfig lo0 alias $ADDR;; 10 | *) exit 11 | esac 12 | 13 | if [[ $? != 0 ]] 14 | then 15 | >&2 echo "Unable to create alias" 16 | exit 1 17 | fi 18 | --------------------------------------------------------------------------------