├── internal
├── color_palette.go
├── output
│ ├── format
│ │ ├── list
│ │ │ ├── list.go
│ │ │ ├── tree.go
│ │ │ └── plain.go
│ │ ├── tabular
│ │ │ ├── table.go
│ │ │ └── column.go
│ │ └── formatters.go
│ └── output.go
├── verbosity_level.go
├── logger.go
├── verbosity_level_test.go
├── json_highlighter.go
├── counter.go
├── utils.go
├── json_message_processor.go
├── message_metadata.go
├── printer.go
└── plain_text_marshaller.go
├── _media
└── logo-small.jpg
├── .github
├── ISSUE_TEMPLATE
│ ├── question.md
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── release.yml
├── kafka
├── callback.go
├── offset_store.go
├── cluster_metadata.go
├── topic_metadata.go
├── client.go
├── errors.go
├── event.go
├── shutdown_reason.go
├── config_entry.go
├── sasl_handshake_version.go
├── scram_client.go
├── partition_meta.go
├── printer_mock.go
├── topic_partition_offset.go
├── consumer_wrapper.go
├── producer.go
├── consumer_group.go
├── topic.go
├── offset.go
├── offset_store_mock.go
├── topic_partitions.go
├── bootstrap.go
├── partition_consumer_mock.go
├── broker.go
├── sasl_authentication.go
├── options.go
├── partition_offset.go
├── checkpoint.go
├── consumer_group_details.go
├── client_mock.go
├── partition_checkpoints.go
├── checkpoint_test.go
├── broker_meta.go
├── local_offset_manager.go
└── local_offset_store.go
├── release
├── windows
│ ├── build.ps1
│ ├── release.ps1
│ └── release.wxs
├── build.bash
├── release_darwin.bash
└── release_linux.bash
├── .gitignore
├── commands
├── global_params.go
├── create
│ ├── create.go
│ ├── partitions.go
│ └── topic.go
├── deletion
│ ├── delete.go
│ ├── topic.go
│ ├── group.go
│ └── local_offset.go
├── describe
│ ├── describe.go
│ ├── cluster.go
│ ├── group.go
│ └── topic.go
├── list
│ ├── list.go
│ ├── local_topics.go
│ ├── topics.go
│ ├── local_offsets.go
│ ├── groups.go
│ └── group_offsets.go
├── kafka_params.go
├── version.go
├── produce
│ ├── plain.go
│ ├── proto.go
│ └── produce.go
└── common.go
├── main.go
├── docker-compose.yml
├── .golangci.yml
├── README.md
├── Makefile
├── protobuf
├── file_finder.go
├── marshaller.go
└── loader.go
├── go.mod
├── release_notes.md
└── application.go
/internal/color_palette.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
--------------------------------------------------------------------------------
/_media/logo-small.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xitonix/trubka/HEAD/_media/logo-small.jpg
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: The question you have about any feature or command
4 | title: "[Question]: "
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/kafka/callback.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "time"
4 |
5 | // Callback the function which will get called upon receiving a message from Kafka.
6 | type Callback func(topic string, partition int32, offset int64, time time.Time, key, value []byte) error
7 |
--------------------------------------------------------------------------------
/kafka/offset_store.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | type offsetStore interface {
4 | start(loaded TopicPartitionOffset)
5 | commit(topic string, partition int32, offset int64) error
6 | read(topic string) (PartitionOffset, error)
7 | errors() <-chan error
8 | close()
9 | }
10 |
--------------------------------------------------------------------------------
/release/windows/build.ps1:
--------------------------------------------------------------------------------
1 | param($binary,$version)
2 |
3 | $buildTime = (Get-Date).ToUniversalTime() | Get-Date -UFormat '%Y-%m-%dT%TZ'
4 | $env:GOOS = "windows"
5 | $env:GOARCH = "amd64"
6 | go build -o "$binary.exe" -ldflags="-s -w -X main.version=v$version -X main.commit=$env:GITHUB_SHA -X 'main.built=$buildTime'" ..\..\.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, build with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 | trubka
14 | .idea/
15 | .DS_Store
16 | cmd/producer/producer
17 | bin
18 | *.pprof
--------------------------------------------------------------------------------
/kafka/cluster_metadata.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | // ClusterMetadata Kafka cluster metadata.
4 | type ClusterMetadata struct {
5 | // Brokers the list of the brokers within the cluster.
6 | Brokers []*Broker `json:"brokers,omitempty"`
7 | // ConfigEntries cluster configuration entries.
8 | ConfigEntries []*ConfigEntry `json:"configurations,omitempty"`
9 | }
10 |
--------------------------------------------------------------------------------
/kafka/topic_metadata.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | // TopicMetadata holds metadata for a topic.
4 | type TopicMetadata struct {
5 | // Partitions a list of all the partitions.
6 | Partitions []*PartitionMeta `json:"partitions,omitempty"`
7 | // ConfigEntries a list of topic configurations stored on the server.
8 | ConfigEntries []*ConfigEntry `json:"configurations,omitempty"`
9 | }
10 |
--------------------------------------------------------------------------------
/internal/output/format/list/list.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | // List defines a list interface.
4 | type List interface {
5 | Render()
6 | AddItem(item interface{})
7 | AddItemF(format string, a ...interface{})
8 | Indent()
9 | UnIndent()
10 | }
11 |
12 | // New creates a new list.
13 | func New(plain bool) List {
14 | if plain {
15 | return newPlain()
16 | }
17 | return newTree()
18 | }
19 |
--------------------------------------------------------------------------------
/commands/global_params.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import "github.com/xitonix/trubka/internal"
4 |
5 | // GlobalParameters holds the app's global parameters available to all the sub-commands.
6 | type GlobalParameters struct {
7 | // Verbosity logging verbosity level.
8 | Verbosity internal.VerbosityLevel
9 | // EnableColor enables colours across all the sub-commands.
10 | EnableColor bool
11 | }
12 |
--------------------------------------------------------------------------------
/kafka/client.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "io"
5 |
6 | "github.com/Shopify/sarama"
7 | )
8 |
9 | type client interface {
10 | Partitions(topic string) ([]int32, error)
11 | ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error)
12 | Topics() ([]string, error)
13 | GetOffset(topic string, partitionID int32, time int64) (int64, error)
14 | io.Closer
15 | }
16 |
--------------------------------------------------------------------------------
/kafka/errors.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "errors"
4 |
5 | var (
6 | // ErrEmptyEnvironment occurs when the provided environment is empty.
7 | ErrEmptyEnvironment = errors.New("the environment cannot be empty")
8 |
9 | // ErrEmptyTopic occurs when the provided topic is empty.
10 | ErrEmptyTopic = errors.New("the topic cannot be empty")
11 |
12 | errOutOfRangeOffset = errors.New("out of range offset")
13 | )
14 |
--------------------------------------------------------------------------------
/kafka/event.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "time"
4 |
5 | // Event Kafka event.
6 | type Event struct {
7 | // Topic the topic from which the message was consumed.
8 | Topic string
9 | // Key partition key.
10 | Key []byte
11 | // Value message content.
12 | Value []byte
13 | // Timestamp message timestamp.
14 | Timestamp time.Time
15 | // Partition the Kafka partition to which the message belong.
16 | Partition int32
17 | // Offset the message offset.
18 | Offset int64
19 | }
20 |
--------------------------------------------------------------------------------
/release/build.bash:
--------------------------------------------------------------------------------
1 | set -euxo pipefail
2 |
3 | if [[ $# -ne 1 && $# -ne 2 ]]; then
4 | echo "usage: release/build.bash OUT [VERSION]" 1>&2
5 | exit 64
6 | fi
7 |
8 | BUILD_TIME=$(date -u '+%a %d %b %Y %H:%M:%S GMT')
9 | RUNTIME=$(go version | cut -d' ' -f 3)
10 |
11 | cd "$(dirname "$(dirname "${BASH_SOURCE[0]}")")"
12 | VERSION="${2:-}"
13 | go build -o "$1" -ldflags="-s -w -X main.version=v${VERSION} -X main.runtimeVer=${RUNTIME} -X main.commit=${GITHUB_SHA} -X 'main.built=${BUILD_TIME}'" *.go
--------------------------------------------------------------------------------
/commands/create/create.go:
--------------------------------------------------------------------------------
1 | package create
2 |
3 | import (
4 | "gopkg.in/alecthomas/kingpin.v2"
5 |
6 | "github.com/xitonix/trubka/commands"
7 | )
8 |
9 | // AddCommands adds the create command to the app.
10 | func AddCommands(app *kingpin.Application, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
11 | parent := app.Command("create", "A command to create Kafka entities.")
12 | addCreateTopicSubCommand(parent, global, kafkaParams)
13 | addCreatePartitionsSubCommand(parent, global, kafkaParams)
14 | }
15 |
--------------------------------------------------------------------------------
/kafka/shutdown_reason.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | type shutdownReason int8
4 |
5 | const (
6 | cancelledByUser shutdownReason = iota
7 | noMoreMessage
8 | reachedStopCheckpoint
9 | )
10 |
11 | var (
12 | shutdownReasonToString = map[shutdownReason]string{
13 | cancelledByUser: "Cancelled by user",
14 | noMoreMessage: "No more messages received",
15 | reachedStopCheckpoint: "Reached stop checkpoint",
16 | }
17 | )
18 |
19 | func (s shutdownReason) String() string {
20 | return shutdownReasonToString[s]
21 | }
22 |
--------------------------------------------------------------------------------
/kafka/config_entry.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | // ConfigEntry represents a Kafka config.
4 | type ConfigEntry struct {
5 | Name string `json:"name"`
6 | Value string `json:"value"`
7 | }
8 |
9 | // ConfigEntriesByName sorts configuration entries by name.
10 | type ConfigEntriesByName []*ConfigEntry
11 |
12 | func (c ConfigEntriesByName) Len() int {
13 | return len(c)
14 | }
15 |
16 | func (c ConfigEntriesByName) Swap(i, j int) {
17 | c[i], c[j] = c[j], c[i]
18 | }
19 |
20 | func (c ConfigEntriesByName) Less(i, j int) bool {
21 | return c[i].Name < c[j].Name
22 | }
23 |
--------------------------------------------------------------------------------
/kafka/sasl_handshake_version.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "github.com/Shopify/sarama"
4 |
5 | // SASLHandshakeVersion SASL handshake version.
6 | type SASLHandshakeVersion string
7 |
8 | const (
9 | // SASLHandshakeV0 version 0
10 | SASLHandshakeV0 SASLHandshakeVersion = "v0"
11 | // SASLHandshakeV1 version 1
12 | SASLHandshakeV1 SASLHandshakeVersion = "v1"
13 | )
14 |
15 | func (s SASLHandshakeVersion) toSaramaVersion() int16 {
16 | switch s {
17 | case SASLHandshakeV0:
18 | return sarama.SASLHandshakeV0
19 | default:
20 | return sarama.SASLHandshakeV1
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/commands/deletion/delete.go:
--------------------------------------------------------------------------------
1 | package deletion
2 |
3 | import (
4 | "gopkg.in/alecthomas/kingpin.v2"
5 |
6 | "github.com/xitonix/trubka/commands"
7 | )
8 |
9 | // AddCommands adds the delete command to the app.
10 | func AddCommands(app *kingpin.Application, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
11 | parent := app.Command("delete", "A command to delete Kafka entities.")
12 | addDeleteTopicSubCommand(parent, global, kafkaParams)
13 | addDeleteGroupSubCommand(parent, global, kafkaParams)
14 | addDeleteLocalOffsetsSubCommand(parent, global)
15 | }
16 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]:"
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 |
16 | **Expected behavior**
17 | A clear and concise description of what you expected to happen.
18 |
19 | **Screenshots**
20 | If applicable, add screenshots to help explain your problem.
21 |
22 | **Desktop (please complete the following information):**
23 | - OS:
24 | - Version:
25 |
--------------------------------------------------------------------------------
/commands/describe/describe.go:
--------------------------------------------------------------------------------
1 | package describe
2 |
3 | import (
4 | "gopkg.in/alecthomas/kingpin.v2"
5 |
6 | "github.com/xitonix/trubka/commands"
7 | )
8 |
9 | // AddCommands adds the describe command to the app.
10 | func AddCommands(app *kingpin.Application, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
11 | parent := app.Command("describe", "A command to describe a Kafka entity.")
12 | addGroupSubCommand(parent, global, kafkaParams)
13 | addBrokerSubCommand(parent, global, kafkaParams)
14 | addTopicSubCommand(parent, global, kafkaParams)
15 | addClusterSubCommand(parent, global, kafkaParams)
16 | }
17 |
--------------------------------------------------------------------------------
/commands/list/list.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "gopkg.in/alecthomas/kingpin.v2"
5 |
6 | "github.com/xitonix/trubka/commands"
7 | )
8 |
9 | // AddCommands adds the list command to the app.
10 | func AddCommands(app *kingpin.Application, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
11 | parent := app.Command("list", "A command to list Kafka entities.")
12 | addTopicsSubCommand(parent, global, kafkaParams)
13 | addGroupsSubCommand(parent, global, kafkaParams)
14 | addGroupOffsetsSubCommand(parent, global, kafkaParams)
15 | addLocalOffsetsSubCommand(parent, global, kafkaParams)
16 | addLocalTopicsSubCommand(parent, global)
17 | }
18 |
--------------------------------------------------------------------------------
/internal/output/output.go:
--------------------------------------------------------------------------------
1 | package output
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/xitonix/trubka/internal"
8 | )
9 |
10 | // NewLines prints `count` number of new lines to stdout.
11 | func NewLines(count int) {
12 | for i := 0; i < count; i++ {
13 | fmt.Println()
14 | }
15 | }
16 |
17 | // PrintAsJSON prints the input data into stdout as Json.
18 | func PrintAsJSON(data interface{}, style string, enableColor bool) error {
19 | result, err := json.MarshalIndent(data, "", " ")
20 | if err != nil {
21 | return err
22 | }
23 | h := internal.NewJSONHighlighter(style, enableColor)
24 | fmt.Println(string(h.Highlight(result)))
25 | return nil
26 | }
27 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "os"
8 |
9 | "github.com/xitonix/trubka/internal"
10 | "github.com/xitonix/trubka/internal/output/format"
11 | )
12 |
13 | // Version build flags
14 | var (
15 | version string
16 | commit string
17 | runtimeVer string
18 | built string
19 | )
20 |
21 | var enabledColor bool
22 |
23 | func main() {
24 | err := newApplication()
25 | if err != nil && !errors.Is(err, context.Canceled) {
26 | exit(err)
27 | }
28 | }
29 |
30 | func exit(err error) {
31 | msg := fmt.Sprintf("ERROR: %s", internal.Title(err))
32 | fmt.Fprintln(os.Stderr, format.Red(msg, enabledColor))
33 | os.Exit(1)
34 | }
35 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: "[Feature]: "
5 | labels: feature request
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **If applicable, describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/kafka/scram_client.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "github.com/xdg/scram"
5 | )
6 |
7 | type xdgSCRAMClient struct {
8 | *scram.Client
9 | *scram.ClientConversation
10 | scram.HashGeneratorFcn
11 | }
12 |
13 | func (x *xdgSCRAMClient) Begin(userName, password, authzID string) (err error) {
14 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)
15 | if err != nil {
16 | return err
17 | }
18 | x.ClientConversation = x.Client.NewConversation()
19 | return nil
20 | }
21 |
22 | func (x *xdgSCRAMClient) Step(challenge string) (response string, err error) {
23 | response, err = x.ClientConversation.Step(challenge)
24 | return
25 | }
26 |
27 | func (x *xdgSCRAMClient) Done() bool {
28 | return x.ClientConversation.Done()
29 | }
30 |
--------------------------------------------------------------------------------
/release/release_darwin.bash:
--------------------------------------------------------------------------------
1 | set -euxo pipefail
2 |
3 | SRC="$(dirname "$(dirname "${BASH_SOURCE[0]}")")"
4 | RELEASE_VERSION=$(echo ${GITHUB_REF} | cut -d'v' -f2)
5 | RELEASE_OS="$(go env GOOS)"
6 | RELEASE_ARCH="$(go env GOARCH)"
7 | RELEASE_NAME="${BINARY}_${RELEASE_VERSION}_${RELEASE_OS}_${RELEASE_ARCH}"
8 | BIN_DIR="$(pwd)/output/usr/bin"
9 | RPM_ITERATION=1
10 | mkdir -p $BIN_DIR
11 | echo "Creating ${RELEASE_NAME}.tar.gz..." 1>&2
12 | "$SRC/release/build.bash" "$BIN_DIR/$BINARY" "$RELEASE_VERSION"
13 | tar -C "${BIN_DIR}" -cvzf "${RELEASE_NAME}.tar.gz" "${BINARY}"
14 |
15 | echo "file=${RELEASE_NAME}.tar.gz" >> $GITHUB_OUTPUT
16 | echo "sha=$(shasum -a 256 ${RELEASE_NAME}.tar.gz | awk '{printf $1}')" >> $GITHUB_OUTPUT
17 | echo "version=v${RELEASE_VERSION}" >> $GITHUB_OUTPUT
--------------------------------------------------------------------------------
/internal/verbosity_level.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | // VerbosityLevel logging verbosity level.
4 | type VerbosityLevel int8
5 |
6 | const (
7 | // Forced the lowest logging level. Everything will be printed under this level.
8 | Forced VerbosityLevel = iota
9 | // Verbose verbose mode (-v)
10 | Verbose
11 | // VeryVerbose very verbose mode (-vv)
12 | VeryVerbose
13 | // SuperVerbose super verbose mode (-vvv)
14 | SuperVerbose
15 | // Chatty extremely verbose mode (-vvvv)
16 | Chatty
17 | )
18 |
19 | // ToVerbosityLevel converts the input into a logging verbosity level.
20 | func ToVerbosityLevel(counter int) VerbosityLevel {
21 | switch {
22 | case counter == 1:
23 | return Verbose
24 | case counter == 2:
25 | return VeryVerbose
26 | case counter == 3:
27 | return SuperVerbose
28 | case counter >= 4:
29 | return Chatty
30 | default:
31 | return Forced
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/internal/logger.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "time"
7 | )
8 |
9 | // Logger represents a logger.
10 | type Logger struct {
11 | currentLevel VerbosityLevel
12 | }
13 |
14 | // NewLogger creates a new instance of a logger.
15 | func NewLogger(level VerbosityLevel) *Logger {
16 | return &Logger{currentLevel: level}
17 | }
18 |
19 | // Log logs the provided message to stdout if the level is higher than the current log level.
20 | func (l *Logger) Log(level VerbosityLevel, message string) {
21 | if l.currentLevel < level {
22 | return
23 | }
24 | fmt.Fprintf(os.Stderr, "%s%s\n", time.Now().Format(loggingTimestampLayout), message)
25 | }
26 |
27 | // Logf formats and logs the provided message to stdout if the level is higher than the current log level.
28 | func (l *Logger) Logf(level VerbosityLevel, format string, a ...interface{}) {
29 | l.Log(level, fmt.Sprintf(format, a...))
30 | }
31 |
--------------------------------------------------------------------------------
/kafka/partition_meta.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | // PartitionMeta represents partition metadata.
4 | type PartitionMeta struct {
5 | // ID partition id.
6 | ID int32 `json:"id"`
7 | // Offset partition offset.
8 | Offset int64 `json:"offset"`
9 | // Leader leader node.
10 | Leader *Broker `json:"leader"`
11 | // Replicas replication nodes.
12 | Replicas []*Broker `json:"replicas"`
13 | // ISRs in-sync replicas.
14 | ISRs []*Broker `json:"in_sync_replicas"`
15 | // OfflineReplicas offline replicas.
16 | OfflineReplicas []*Broker `json:"offline_replicas"`
17 | }
18 |
19 | // PartitionMetaByID sorts partition metadata by partition ID.
20 | type PartitionMetaByID []*PartitionMeta
21 |
22 | func (b PartitionMetaByID) Len() int {
23 | return len(b)
24 | }
25 |
26 | func (b PartitionMetaByID) Swap(i, j int) {
27 | b[i], b[j] = b[j], b[i]
28 | }
29 |
30 | func (b PartitionMetaByID) Less(i, j int) bool {
31 | return b[i].ID < b[j].ID
32 | }
33 |
--------------------------------------------------------------------------------
/kafka/printer_mock.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "github.com/xitonix/trubka/internal"
5 | )
6 |
7 | type printerMock struct {
8 | }
9 |
10 | func (p *printerMock) Errorf(level internal.VerbosityLevel, format string, args ...interface{}) {
11 | }
12 |
13 | func (p *printerMock) Error(level internal.VerbosityLevel, msg string) {
14 | }
15 |
16 | func (p *printerMock) Infof(level internal.VerbosityLevel, format string, args ...interface{}) {
17 | }
18 |
19 | func (p *printerMock) Info(level internal.VerbosityLevel, msg string) {
20 | }
21 |
22 | func (p *printerMock) Warningf(level internal.VerbosityLevel, format string, args ...interface{}) {
23 | }
24 |
25 | func (p *printerMock) Warning(level internal.VerbosityLevel, msg string) {
26 | }
27 |
28 | func (p *printerMock) WriteEvent(topic string, bytes []byte) {
29 | }
30 |
31 | func (p *printerMock) Close() {
32 | }
33 |
34 | func (p *printerMock) Level() internal.VerbosityLevel {
35 | return internal.Verbose
36 | }
37 |
--------------------------------------------------------------------------------
/kafka/topic_partition_offset.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "sort"
4 |
5 | // TopicPartitionOffset represents a map of topic offset pairs for all the partitions.
6 | type TopicPartitionOffset map[string]PartitionOffset
7 |
8 | // ToJSON returns an object ready to be serialised into json string.
9 | func (t TopicPartitionOffset) ToJSON() interface{} {
10 | if t == nil {
11 | return nil
12 | }
13 |
14 | type tpo struct {
15 | Topic string `json:"topic"`
16 | Partitions interface{} `json:"partitions"`
17 | }
18 | output := make([]tpo, len(t))
19 | i := 0
20 | for topic, po := range t {
21 | output[i] = tpo{
22 | Topic: topic,
23 | Partitions: po.ToJSON(),
24 | }
25 | i++
26 | }
27 | return output
28 | }
29 |
30 | // SortedTopics returns a list of sorted topics.
31 | func (t TopicPartitionOffset) SortedTopics() []string {
32 | sorted := make([]string, 0)
33 | if len(t) == 0 {
34 | return sorted
35 | }
36 | for topic := range t {
37 | sorted = append(sorted, topic)
38 | }
39 | sort.Strings(sorted)
40 | return sorted
41 | }
42 |
--------------------------------------------------------------------------------
/internal/output/format/list/tree.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/jedib0t/go-pretty/list"
8 | )
9 |
10 | // Tree represents a tree.
11 | type Tree struct {
12 | writer list.Writer
13 | }
14 |
15 | func newTree() *Tree {
16 | w := list.NewWriter()
17 | w.SetOutputMirror(os.Stdout)
18 | w.SetStyle(list.StyleConnectedRounded)
19 | w.Style().LinePrefix = " "
20 | return &Tree{
21 | writer: w,
22 | }
23 | }
24 |
25 | // Render prints out the list into stdout.
26 | func (t *Tree) Render() {
27 | t.writer.Render()
28 | }
29 |
30 | // AddItem adds a new item to the list.
31 | func (t *Tree) AddItem(item interface{}) {
32 | t.writer.AppendItem(item)
33 | }
34 |
35 | // AddItemF adds a new formatted item to the list.
36 | func (t *Tree) AddItemF(format string, a ...interface{}) {
37 | t.writer.AppendItem(fmt.Sprintf(format, a...))
38 | }
39 |
40 | // Indent adds one level of indentation to the list.
41 | func (t *Tree) Indent() {
42 | t.writer.Indent()
43 | }
44 |
45 | // UnIndent removes one level of indentation from the list.
46 | func (t *Tree) UnIndent() {
47 | t.writer.UnIndent()
48 | }
49 |
--------------------------------------------------------------------------------
/kafka/consumer_wrapper.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/Shopify/sarama"
7 | )
8 |
9 | // ConsumerWrapper wraps Sarama consumer and its underlying client.
10 | type ConsumerWrapper struct {
11 | sarama.Consumer
12 | client sarama.Client
13 | }
14 |
15 | // NewConsumerWrapper creates a new instance of consumer wrapper.
16 | func NewConsumerWrapper(brokers []string, options ...Option) (*ConsumerWrapper, error) {
17 | client, err := initClient(brokers, options...)
18 | if err != nil {
19 | return nil, err
20 | }
21 | consumer, err := sarama.NewConsumerFromClient(client)
22 | if err != nil {
23 | return nil, fmt.Errorf("failed to initialise the Kafka consumer: %w", err)
24 | }
25 |
26 | return &ConsumerWrapper{
27 | Consumer: consumer,
28 | client: client,
29 | }, nil
30 | }
31 |
32 | // GetOffset queries the cluster to get the most recent available offset at the
33 | // given time (in milliseconds) on the topic/partition combination.
34 | func (c *ConsumerWrapper) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
35 | return c.client.GetOffset(topic, partitionID, time)
36 | }
37 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # This will run a single node Kafka cluster locally for testing purposes.
2 | services:
3 | broker:
4 | image: apache/kafka:latest
5 | hostname: broker
6 | container_name: broker
7 | ports:
8 | - "9092:9092"
9 | environment:
10 | KAFKA_BROKER_ID: 1
11 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,CONTROLLER:PLAINTEXT
12 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
13 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
14 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
15 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
16 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
17 | KAFKA_PROCESS_ROLES: broker,controller
18 | KAFKA_NODE_ID: 1
19 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker:29093
20 | KAFKA_LISTENERS: PLAINTEXT://broker:29092,CONTROLLER://broker:29093,PLAINTEXT_HOST://0.0.0.0:9092
21 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
22 | KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
23 | KAFKA_LOG_DIRS: /tmp/kraft-combined-logs
24 | CLUSTER_ID: MkU3OEVBNTcwNTJENDM2Qk
25 |
--------------------------------------------------------------------------------
/kafka/producer.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "github.com/Shopify/sarama"
5 | )
6 |
7 | // Producer represents a wrapper around Sarama producer.
8 | type Producer struct {
9 | producer sarama.SyncProducer
10 | }
11 |
12 | // NewProducer creates a new instance of Kafka producer.
13 | func NewProducer(brokers []string, options ...Option) (*Producer, error) {
14 | client, err := initClient(brokers, options...)
15 | if err != nil {
16 | return nil, err
17 | }
18 |
19 | producer, err := sarama.NewSyncProducerFromClient(client)
20 | if err != nil {
21 | return nil, err
22 | }
23 |
24 | return &Producer{
25 | producer: producer,
26 | }, nil
27 | }
28 |
29 | // Produce publishes a new message to the specified Kafka topic.
30 | func (p *Producer) Produce(topic string, key, value []byte) (int32, int64, error) {
31 | message := &sarama.ProducerMessage{
32 | Topic: topic,
33 | Key: sarama.ByteEncoder(key),
34 | Value: sarama.ByteEncoder(value)}
35 | return p.producer.SendMessage(message)
36 | }
37 |
38 | // Close closes the producer.
39 | func (p *Producer) Close() error {
40 | if p.producer != nil {
41 | return p.producer.Close()
42 | }
43 | return nil
44 | }
45 |
--------------------------------------------------------------------------------
/commands/kafka_params.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import (
4 | "crypto/tls"
5 | )
6 |
7 | // KafkaParameters holds CLI parameters to connect to Kafka.
8 | type KafkaParameters struct {
9 | // Brokers a comma separated list of host:port strings.
10 | Brokers string
11 | // Version the cluster version.
12 | Version string
13 | // TLS TLS settings.
14 | TLS *tls.Config
15 | // SASLMechanism SASL authentication mechanism.
16 | SASLMechanism string
17 | // SASLUsername SASL username.
18 | SASLUsername string
19 | // SASLPassword SASL password.
20 | SASLPassword string
21 | // SASLHandshakeVersion SASL handshake version.
22 | SASLHandshakeVersion string
23 | }
24 |
25 | // TLSParameters holds TLS connection parameters.
26 | type TLSParameters struct {
27 | // Enabled true if TLS is requested by the user.
28 | Enabled bool
29 | // CACert the path to CA Cert file.
30 | CACert string
31 | // ClientCert path to client certification file to enable mutual TLS authentication.
32 | ClientCert string
33 | // ClientCert path to client private key file to enable mutual TLS authentication.
34 | //
35 | // If set, the client certificate file must also be provided.
36 | ClientKey string
37 | }
38 |
--------------------------------------------------------------------------------
/kafka/consumer_group.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // GroupMember represents a consumer group member.
8 | type GroupMember struct {
9 | // ID the member identifier.
10 | ID string
11 | // ClientID client ID.
12 | ClientID string
13 | // Host the host name/IP of the client machine.
14 | Host string
15 | }
16 |
17 | func (g GroupMember) String() string {
18 | return fmt.Sprintf("%s [%s]", g.ID, g.Host)
19 | }
20 |
21 | // ConsumerGroup represents a consumer group.
22 | type ConsumerGroup struct {
23 | // Members the clients attached to the consumer groups.
24 | Members []GroupMember
25 | // TopicOffsets the offsets of each topic belong to the group.
26 | TopicOffsets TopicPartitionOffset
27 | // Coordinator the coordinator of the consumer group
28 | Coordinator Broker
29 | }
30 |
31 | // ConsumerGroups the map of consumer groups keyed by consumer group ID.
32 | type ConsumerGroups map[string]*ConsumerGroup
33 |
34 | // Names returns the names of the consumer groups
35 | func (c ConsumerGroups) Names() []string {
36 | names := make([]string, len(c))
37 | i := 0
38 | for name := range c {
39 | names[i] = name
40 | i++
41 | }
42 | return names
43 | }
44 |
--------------------------------------------------------------------------------
/commands/version.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "gopkg.in/alecthomas/kingpin.v2"
8 | )
9 |
10 | type version struct {
11 | version string
12 | commit string
13 | built string
14 | runtimeVer string
15 | app *kingpin.Application
16 | }
17 |
18 | // AddVersionCommand adds the version command to the app.
19 | func AddVersionCommand(app *kingpin.Application, appVersion, commit, built, runtimeVer string) {
20 | cmd := &version{
21 | version: appVersion,
22 | commit: commit,
23 | built: built,
24 | runtimeVer: runtimeVer,
25 | app: app,
26 | }
27 | app.Command("version", "Prints the current version of Trubka.").Action(cmd.run)
28 | }
29 |
30 | func (c *version) run(*kingpin.ParseContext) error {
31 | if c.version == "" {
32 | c.version = "[built from source]"
33 | }
34 | b := strings.Builder{}
35 | b.WriteString("Trubka - A CLI Tool for Kafka\n")
36 | b.WriteString(fmt.Sprintf(" Version: %s\n", c.version))
37 | b.WriteString(fmt.Sprintf(" Runtime: %s\n", c.runtimeVer))
38 | b.WriteString(fmt.Sprintf(" Built: %s\n", c.built))
39 | b.WriteString(fmt.Sprintf(" Commit: %s", c.commit))
40 | fmt.Println(b.String())
41 | return nil
42 | }
43 |
--------------------------------------------------------------------------------
/kafka/topic.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import "fmt"
4 |
5 | // Topic represents a Kafka topic with metadata.
6 | type Topic struct {
7 | // Name topic name.
8 | Name string `json:"name"`
9 | // NumberOfPartitions number of partitions within the topic.
10 | NumberOfPartitions int32 `json:"number_of_partitions"`
11 | // ReplicationFactor replication factor.
12 | ReplicationFactor int16 `json:"replication_factor"`
13 | }
14 |
15 | // TopicsByName sorts the topic list by name.
16 | type TopicsByName []Topic
17 |
18 | func (t TopicsByName) Len() int {
19 | return len(t)
20 | }
21 |
22 | func (t TopicsByName) Swap(i, j int) {
23 | t[i], t[j] = t[j], t[i]
24 | }
25 |
26 | func (t TopicsByName) Less(i, j int) bool {
27 | return t[i].Name < t[j].Name
28 | }
29 |
30 | // GetNames returns a list of all the topic names.
31 | func (t TopicsByName) GetNames() []string {
32 | result := make([]string, len(t))
33 | for i, topic := range t {
34 | result[i] = topic.Name
35 | }
36 | return result
37 | }
38 |
39 | // String returns the string representation of the topic metadata.
40 | func (t Topic) String() string {
41 | return fmt.Sprintf("%s (Partitions: %d, Replication Factor: %d)", t.Name, t.NumberOfPartitions, t.ReplicationFactor)
42 | }
43 |
--------------------------------------------------------------------------------
/kafka/offset.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "strconv"
5 | )
6 |
7 | const unknownOffset int64 = -3
8 | const offsetNotFound int64 = -4
9 |
10 | // Offset represents an offset pair for a given partition.
11 | //
12 | // A pair contains the latest offset of the partition reported by the server and the local or consumer group offset.
13 | type Offset struct {
14 | // Latest the latest available offset of the partition reported by the server.
15 | Latest int64
16 | // Current the current value of the local or consumer group offset. This is where the consumer up to.
17 | Current int64
18 | stopAt *checkpoint
19 | }
20 |
21 | // Lag calculates the lag between the latest and the current offset values.
22 | func (o Offset) Lag() int64 {
23 | if o.Latest > o.Current {
24 | return o.Latest - o.Current
25 | }
26 | return 0
27 | }
28 |
29 | // String returns the string representation of the given offset.
30 | func (o Offset) String(latest bool) string {
31 | if latest {
32 | return getOffsetText(o.Latest)
33 | }
34 | return getOffsetText(o.Current)
35 | }
36 |
37 | func getOffsetText(offset int64) string {
38 | switch offset {
39 | case unknownOffset, offsetNotFound:
40 | return "-"
41 | default:
42 | return strconv.FormatInt(offset, 10)
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/kafka/offset_store_mock.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | type offsetStoreMock struct {
4 | err chan error
5 | tpo TopicPartitionOffset
6 | forceReadFailure bool
7 | }
8 |
9 | func newOffsetStoreMock(forceReadFailure bool) *offsetStoreMock {
10 | return &offsetStoreMock{
11 | err: make(chan error),
12 | tpo: make(TopicPartitionOffset),
13 | forceReadFailure: forceReadFailure,
14 | }
15 | }
16 |
17 | func (o *offsetStoreMock) start(loaded TopicPartitionOffset) {
18 | o.tpo = loaded
19 | }
20 |
21 | func (o *offsetStoreMock) commit(topic string, partition int32, offset int64) error {
22 | o.tpo[topic][partition] = Offset{
23 | Current: offset,
24 | }
25 | return nil
26 | }
27 |
28 | func (o *offsetStoreMock) read(topic string) (PartitionOffset, error) {
29 | if o.forceReadFailure {
30 | return nil, errDeliberate
31 | }
32 | return o.tpo[topic], nil
33 | }
34 |
35 | func (o *offsetStoreMock) errors() <-chan error {
36 | return o.err
37 | }
38 |
39 | func (o *offsetStoreMock) close() {
40 | }
41 |
42 | func (o *offsetStoreMock) set(topic string, po map[int32]int64) {
43 | o.tpo[topic] = make(PartitionOffset)
44 | for partition, offset := range po {
45 | o.tpo[topic][partition] = Offset{
46 | Current: offset,
47 | }
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/internal/verbosity_level_test.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "math"
5 | "testing"
6 | )
7 |
8 | func TestToVerbosityLevel(t *testing.T) {
9 | testCases := []struct {
10 | title string
11 | counter int
12 | expected VerbosityLevel
13 | }{
14 | {
15 | title: "negative value",
16 | counter: -1,
17 | expected: Forced,
18 | },
19 | {
20 | title: "zero value",
21 | counter: 0,
22 | expected: Forced,
23 | },
24 | {
25 | title: "one",
26 | counter: 1,
27 | expected: Verbose,
28 | },
29 | {
30 | title: "two",
31 | counter: 2,
32 | expected: VeryVerbose,
33 | },
34 | {
35 | title: "three",
36 | counter: 3,
37 | expected: SuperVerbose,
38 | },
39 | {
40 | title: "four",
41 | counter: 4,
42 | expected: Chatty,
43 | },
44 | {
45 | title: "greater than four",
46 | counter: 5,
47 | expected: Chatty,
48 | },
49 | {
50 | title: "max int",
51 | counter: math.MaxInt64,
52 | expected: Chatty,
53 | },
54 | }
55 |
56 | for _, tc := range testCases {
57 | t.Run(tc.title, func(t *testing.T) {
58 | actual := ToVerbosityLevel(tc.counter)
59 | if actual != tc.expected {
60 | t.Errorf("Expected verbosity level: %d, Actual: %d", tc.expected, actual)
61 | }
62 | })
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/release/windows/release.ps1:
--------------------------------------------------------------------------------
1 | param($version,$binary)
2 |
3 | If ( ! ( Test-Path Env:wix ) ) {
4 | Write-Error 'Could not find WiX binaries %wix%'
5 | exit 1
6 | }
7 |
8 | $filename="${binary}_${version}_amd64.msi"
9 | $tarfilename="${binary}_${version}_windows_amd64.tar.gz"
10 | $wixVersion="0.0.0"
11 | $wixVersionMatch=[regex]::Match($version, '^([0-9]+\.[0-9]+\.[0-9]+)')
12 | If ( $wixVersionMatch.success ) {
13 | $wixVersion=$wixVersionMatch.captures.groups[1].value
14 | } Else {
15 | Write-Error "Invalid version $version"
16 | exit 1
17 | }
18 |
19 | .\build.ps1 `
20 | -version $version `
21 | -binary $binary
22 |
23 | tar -cvzf "${tarfilename}" "${binary}.exe"
24 |
25 | $appname=(Get-Culture).TextInfo.ToTitleCase($binary)
26 |
27 | & "${env:wix}bin\candle.exe" `
28 | -nologo `
29 | -arch x64 `
30 | "-dAppVersion=$version" `
31 | "-dWixVersion=$wixVersion" `
32 | "-dAppName=$appname" `
33 | release.wxs
34 | If ( $LastExitCode -ne 0 ) {
35 | exit $LastExitCode
36 | }
37 | & "${env:wix}bin\light.exe" `
38 | -nologo `
39 | -dcl:high `
40 | -ext WixUIExtension `
41 | -ext WixUtilExtension `
42 | release.wixobj `
43 | -o $filename
44 | If ( $LastExitCode -ne 0 ) {
45 | exit $LastExitCode
46 | }
47 |
48 | echo file=$filename >> $env:GITHUB_OUTPUT
49 | echo archive=$tarfilename >> $env:GITHUB_OUTPUT
--------------------------------------------------------------------------------
/internal/output/format/list/plain.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | )
7 |
8 | const (
9 | indentation = " "
10 | )
11 |
12 | // Plain represents a plain text list.
13 | type Plain struct {
14 | items []string
15 | indent int
16 | }
17 |
18 | func newPlain() *Plain {
19 | return &Plain{
20 | items: make([]string, 0),
21 | indent: 0,
22 | }
23 | }
24 |
25 | // Render prints out the list into stdout.
26 | func (p *Plain) Render() {
27 | for i, item := range p.items {
28 | if i < len(p.items) {
29 | fmt.Println(item)
30 | continue
31 | }
32 | fmt.Print(item)
33 | }
34 | }
35 |
36 | // AddItem adds a new item to the list.
37 | func (p *Plain) AddItem(item interface{}) {
38 | p.indentF("%v", item)
39 | }
40 |
41 | // AddItemF adds a new formatted item to the list.
42 | func (p *Plain) AddItemF(format string, a ...interface{}) {
43 | p.indentF(format, a...)
44 | }
45 |
46 | // Indent adds one level of indentation to the list.
47 | func (p *Plain) Indent() {
48 | p.indent++
49 | }
50 |
51 | // UnIndent removes one level of indentation from the list.
52 | func (p *Plain) UnIndent() {
53 | if p.indent > 0 {
54 | p.indent--
55 | }
56 | }
57 |
58 | func (p *Plain) indentF(format string, a ...interface{}) {
59 | p.items = append(p.items, strings.Repeat(indentation, p.indent)+fmt.Sprintf(format, a...))
60 | }
61 |
--------------------------------------------------------------------------------
/kafka/topic_partitions.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "sort"
5 | "strconv"
6 | "strings"
7 | )
8 |
9 | // TopicPartitions a map of topic and partition list.
10 | type TopicPartitions map[string][]int32
11 |
12 | // SortedPartitions returns a list of all the partitions sorted in ascending order.
13 | func (t TopicPartitions) SortedPartitions(topic string) []int {
14 | partitions, ok := t[topic]
15 | if !ok {
16 | return []int{}
17 | }
18 |
19 | sorted := make([]int, len(partitions))
20 | for i := 0; i < len(partitions); i++ {
21 | sorted[i] = int(partitions[i])
22 | }
23 | sort.Ints(sorted)
24 | return sorted
25 | }
26 |
27 | // SortedPartitionsString returns a comma separated string of the sorted partitions.
28 | func (t TopicPartitions) SortedPartitionsString(topic string) string {
29 | sorted := t.SortedPartitions(topic)
30 | if len(sorted) == 0 {
31 | return ""
32 | }
33 | partitions := make([]string, len(sorted))
34 | for i, p := range sorted {
35 | partitions[i] = strconv.Itoa(p)
36 | }
37 | return strings.Join(partitions, ",")
38 | }
39 |
40 | // SortedTopics returns a list of all the topics in the map sorted alphabetically.
41 | func (t TopicPartitions) SortedTopics() []string {
42 | if len(t) == 0 {
43 | return []string{}
44 | }
45 | topics := make([]string, len(t))
46 | var i int
47 | for topic := range t {
48 | topics[i] = topic
49 | i++
50 | }
51 | sort.Strings(topics)
52 | return topics
53 | }
54 |
--------------------------------------------------------------------------------
/internal/json_highlighter.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "bytes"
5 | "strings"
6 |
7 | "github.com/alecthomas/chroma"
8 | "github.com/alecthomas/chroma/formatters"
9 | "github.com/alecthomas/chroma/lexers"
10 | "github.com/alecthomas/chroma/styles"
11 | )
12 |
13 | // JSONHighlighter represents a Json highlighter.
14 | type JSONHighlighter struct {
15 | lex chroma.Lexer
16 | fm chroma.Formatter
17 | st *chroma.Style
18 | }
19 |
20 | // NewJSONHighlighter creates a new instance of a Json highlighter.
21 | func NewJSONHighlighter(style string, enabled bool) *JSONHighlighter {
22 | if !enabled || strings.EqualFold(style, "none") {
23 | return &JSONHighlighter{}
24 | }
25 | lex := lexers.Get("json")
26 | fm := formatters.Get("terminal")
27 | if fm == nil {
28 | fm = formatters.Fallback
29 | }
30 |
31 | st := styles.Get(style)
32 | if st == nil {
33 | st = styles.Fallback
34 | }
35 |
36 | return &JSONHighlighter{
37 | lex: lex,
38 | fm: fm,
39 | st: st,
40 | }
41 | }
42 |
43 | // Highlight returns the highlighted Json string based on the requested style.
44 | //
45 | // This method does not alter the input if the Highlighter is disabled.
46 | func (j *JSONHighlighter) Highlight(in []byte) []byte {
47 | if j.lex == nil {
48 | return in
49 | }
50 | tokens, err := j.lex.Tokenise(nil, string(in))
51 | if err != nil {
52 | return in
53 | }
54 | var buf bytes.Buffer
55 | err = j.fm.Format(&buf, j.st, tokens)
56 | if err != nil {
57 | return in
58 | }
59 | return buf.Bytes()
60 | }
61 |
--------------------------------------------------------------------------------
/commands/deletion/topic.go:
--------------------------------------------------------------------------------
1 | package deletion
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "gopkg.in/alecthomas/kingpin.v2"
8 |
9 | "github.com/xitonix/trubka/commands"
10 | "github.com/xitonix/trubka/internal"
11 | )
12 |
13 | type topic struct {
14 | globalParams *commands.GlobalParameters
15 | kafkaParams *commands.KafkaParameters
16 | topic string
17 | silent bool
18 | }
19 |
20 | func addDeleteTopicSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
21 | cmd := &topic{
22 | globalParams: global,
23 | kafkaParams: kafkaParams,
24 | }
25 | c := parent.Command("topic", "Deletes a topic.").Action(cmd.run)
26 | c.Arg("topic", "The topic to delete.").
27 | Required().
28 | StringVar(&cmd.topic)
29 |
30 | c.Flag("silent", "Deletes the topic without user confirmation.").
31 | Short('s').
32 | NoEnvar().
33 | BoolVar(&cmd.silent)
34 | }
35 |
36 | func (c *topic) run(_ *kingpin.ParseContext) error {
37 | manager, _, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
38 |
39 | if err != nil {
40 | return err
41 | }
42 |
43 | defer func() {
44 | manager.Close()
45 | cancel()
46 | }()
47 |
48 | if internal.IsEmpty(c.topic) {
49 | return errors.New("topic cannot be empty")
50 | }
51 | if c.silent || commands.AskForConfirmation(fmt.Sprintf("Are you sure you want to delete %s topic", c.topic)) {
52 | err := manager.DeleteTopic(c.topic)
53 | if err != nil {
54 | return err
55 | }
56 | fmt.Printf("%s topic has been deleted successfully.\n", c.topic)
57 | }
58 |
59 | return nil
60 | }
61 |
--------------------------------------------------------------------------------
/release/release_linux.bash:
--------------------------------------------------------------------------------
1 | set -euxo pipefail
2 |
3 | SRC="$(dirname "$(dirname "${BASH_SOURCE[0]}")")"
4 | RELEASE_VERSION=$(echo ${GITHUB_REF} | cut -d'v' -f2)
5 | RELEASE_OS="$(go env GOOS)"
6 | RELEASE_ARCH="$(go env GOARCH)"
7 | RELEASE_NAME="${BINARY}_${RELEASE_VERSION}_${RELEASE_OS}_${RELEASE_ARCH}"
8 | BIN_DIR="$(pwd)/output/usr/bin"
9 | RPM_ITERATION=1
10 | mkdir -p $BIN_DIR
11 | echo "Creating ${RELEASE_NAME}.tar.gz..." 1>&2
12 | "$SRC/release/build.bash" "$BIN_DIR/$BINARY" "$RELEASE_VERSION"
13 | tar -C "${BIN_DIR}" -cvzf "${RELEASE_NAME}.tar.gz" "${BINARY}"
14 |
15 | # Build the RPM package
16 | docker run --rm -v $(pwd):/root xitonix/fpm-rpm -C /root/output \
17 | --description "${DESCRIPTION}" \
18 | --maintainer "${MAINTAINER}" \
19 | --vendor "${VENDOR}" \
20 | --license "${LICENSE}" \
21 | --url "${URL}" \
22 | -s dir -t rpm \
23 | -n ${BINARY} \
24 | -p /root \
25 | -v ${RELEASE_VERSION} \
26 | --iteration ${RPM_ITERATION}
27 |
28 | # Build the DEB package
29 | docker run --rm -v $(pwd):/root xitonix/fpm-debian -C /root/output \
30 | --description "${DESCRIPTION}" \
31 | --maintainer "${MAINTAINER}" \
32 | --vendor "${VENDOR}" \
33 | --license "${LICENSE}" \
34 | --url "${URL}" \
35 | -s dir -t deb \
36 | -n ${BINARY} \
37 | -p /root \
38 | -v ${RELEASE_VERSION} \
39 | --deb-use-file-permissions
40 |
41 | echo "file=${RELEASE_NAME}.tar.gz" >> $GITHUB_OUTPUT
42 | echo "rpm=${BINARY}-${RELEASE_VERSION}-${RPM_ITERATION}.x86_64.rpm" >> $GITHUB_OUTPUT
43 | echo "deb=${BINARY}_${RELEASE_VERSION}_${RELEASE_ARCH}.deb" >> $GITHUB_OUTPUT
--------------------------------------------------------------------------------
/commands/deletion/group.go:
--------------------------------------------------------------------------------
1 | package deletion
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "gopkg.in/alecthomas/kingpin.v2"
8 |
9 | "github.com/xitonix/trubka/commands"
10 | "github.com/xitonix/trubka/internal"
11 | )
12 |
13 | type group struct {
14 | globalParams *commands.GlobalParameters
15 | kafkaParams *commands.KafkaParameters
16 | group string
17 | silent bool
18 | }
19 |
20 | func addDeleteGroupSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
21 | cmd := &group{
22 | globalParams: global,
23 | kafkaParams: kafkaParams,
24 | }
25 | c := parent.Command("group", "Deletes a consumer group.").Action(cmd.run)
26 | c.Arg("group", "The consumer group to delete.").
27 | Required().
28 | StringVar(&cmd.group)
29 |
30 | c.Flag("silent", "Deletes the consumer group without user confirmation.").
31 | Short('s').
32 | NoEnvar().
33 | BoolVar(&cmd.silent)
34 | }
35 |
36 | func (c *group) run(_ *kingpin.ParseContext) error {
37 | manager, _, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
38 |
39 | if err != nil {
40 | return err
41 | }
42 |
43 | defer func() {
44 | manager.Close()
45 | cancel()
46 | }()
47 |
48 | if internal.IsEmpty(c.group) {
49 | return errors.New("the consumer group name cannot be empty")
50 | }
51 | if c.silent || commands.AskForConfirmation(fmt.Sprintf("Are you sure you want to delete %s group", c.group)) {
52 | err := manager.DeleteConsumerGroup(c.group)
53 | if err != nil {
54 | return err
55 | }
56 | fmt.Printf("%s group has been deleted successfully.\n", c.group)
57 | }
58 |
59 | return nil
60 | }
61 |
--------------------------------------------------------------------------------
/commands/create/partitions.go:
--------------------------------------------------------------------------------
1 | package create
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "gopkg.in/alecthomas/kingpin.v2"
8 |
9 | "github.com/xitonix/trubka/commands"
10 | "github.com/xitonix/trubka/internal"
11 | )
12 |
13 | type partitions struct {
14 | globalParams *commands.GlobalParameters
15 | kafkaParams *commands.KafkaParameters
16 | topic string
17 | numberOfPartitions int32
18 | }
19 |
20 | func addCreatePartitionsSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
21 | cmd := &partitions{
22 | globalParams: global,
23 | kafkaParams: kafkaParams,
24 | }
25 | c := parent.Command("partitions", "Increases the number of partitions of the given topic. If the topic has a key, the partition logic or ordering of the messages will be affected.").Action(cmd.run)
26 | c.Arg("topic", "The topic name.").
27 | Required().
28 | StringVar(&cmd.topic)
29 |
30 | c.Flag("number-of-partitions", "Number of partitions.").
31 | Short('p').
32 | Required().
33 | NoEnvar().
34 | Int32Var(&cmd.numberOfPartitions)
35 | }
36 |
37 | func (c *partitions) run(_ *kingpin.ParseContext) error {
38 | manager, _, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
39 |
40 | if err != nil {
41 | return err
42 | }
43 |
44 | defer func() {
45 | manager.Close()
46 | cancel()
47 | }()
48 |
49 | if internal.IsEmpty(c.topic) {
50 | return errors.New("topic cannot be empty")
51 | }
52 |
53 | err = manager.CreatePartitions(c.topic, c.numberOfPartitions)
54 | if err == nil {
55 | fmt.Printf("The partitions of %s have been readjusted successfully.", c.topic)
56 | }
57 |
58 | return err
59 | }
60 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 |
3 | run:
4 | allow-parallel-runners: true
5 |
6 | linters:
7 | enable:
8 | - gosec
9 | - importas
10 | - govet
11 | settings:
12 | gosec:
13 | excludes:
14 | - G101
15 | - G115
16 | - G201
17 | - G402
18 | govet:
19 | enable:
20 | - nilness
21 | disable:
22 | - composites
23 | staticcheck:
24 | checks:
25 | - all
26 | - -S1001 # should use copy(to, from) instead of a loop, loop can be easier to read
27 | - -ST1003 # initialisms
28 | - -ST1005 # error strings should not be capitalized is fine
29 | - -QF1003 # could use tagged switch on if is fine
30 | - -QF1007 # could merge conditional assignment into variable declaration (if is easier to read)
31 | - -QF1008 # could remove embedded field "Address" from selector (its easier to read)
32 | - -ST1016 # methods on the same type should have the same receiver name (its nice, but not important to change)
33 | - -S1002 # should omit comparison to bool constant, can be simplified (== false is easier to read)
34 | - -S1008 # should use 'return len(s) > 4' instead of 'if len(s) > 4 { return true }; return false' (if can be easier to read)
35 | - -SA5011 # disable related information rule
36 |
37 | exclusions:
38 | presets:
39 | - comments
40 | - common-false-positives
41 | - legacy
42 | - std-error-handling
43 | rules:
44 | - path: (.+)\.go$
45 | text: 'nilness: range of nil slice'
46 | paths:
47 | - .github
48 | - bin
49 |
50 | formatters:
51 | enable:
52 | - gofmt
53 | exclusions:
54 | paths:
55 | - .github
56 | - bin
--------------------------------------------------------------------------------
/kafka/bootstrap.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "log"
7 | "time"
8 |
9 | "github.com/Shopify/sarama"
10 | "github.com/rcrowley/go-metrics"
11 | )
12 |
13 | func initClient(brokers []string, options ...Option) (sarama.Client, error) {
14 | if len(brokers) == 0 {
15 | return nil, errors.New("the brokers list cannot be empty")
16 | }
17 | ops := NewOptions()
18 | for _, option := range options {
19 | option(ops)
20 | }
21 |
22 | sarama.Logger = log.New(ops.logWriter, "KAFKA Client: ", log.LstdFlags)
23 | version, err := sarama.ParseKafkaVersion(ops.ClusterVersion)
24 | if err != nil {
25 | return nil, err
26 | }
27 |
28 | config := sarama.NewConfig()
29 | config.Version = version
30 | config.Consumer.Return.Errors = true
31 | config.ClientID = "Trubka"
32 |
33 | config.Producer.RequiredAcks = sarama.WaitForAll
34 | config.Producer.Return.Successes = true
35 | config.Producer.Partitioner = sarama.NewHashPartitioner
36 | config.Consumer.MaxWaitTime = 500 * time.Millisecond
37 |
38 | metrics.UseNilMetrics = true
39 | if ops.sasl != nil {
40 | config.Net.SASL.Enable = true
41 | config.Net.SASL.Enable = true
42 | config.Net.SASL.Version = ops.sasl.version
43 | config.Net.SASL.Mechanism = ops.sasl.mechanism
44 | config.Net.SASL.User = ops.sasl.username
45 | config.Net.SASL.Password = ops.sasl.password
46 | config.Net.SASL.SCRAMClientGeneratorFunc = ops.sasl.client
47 | }
48 |
49 | if ops.TLS != nil {
50 | config.Net.TLS.Enable = true
51 | config.Net.TLS.Config = ops.TLS
52 | }
53 |
54 | client, err := sarama.NewClient(brokers, config)
55 | if err != nil {
56 | return nil, fmt.Errorf("failed to initialise the Kafka client: %w", err)
57 | }
58 |
59 | return client, nil
60 | }
61 |
--------------------------------------------------------------------------------
/kafka/partition_consumer_mock.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "sync"
5 | "time"
6 |
7 | "github.com/Shopify/sarama"
8 | )
9 |
10 | type partitionConsumerMock struct {
11 | mux sync.Mutex
12 | closed bool
13 | messages chan *sarama.ConsumerMessage
14 | errors chan *sarama.ConsumerError
15 | offset int64
16 | partition int32
17 | topic string
18 | }
19 |
20 | func newPartitionConsumerMock(topic string, partition int32, offset int64) *partitionConsumerMock {
21 | return &partitionConsumerMock{
22 | messages: make(chan *sarama.ConsumerMessage, 100),
23 | errors: make(chan *sarama.ConsumerError, 100),
24 | topic: topic,
25 | partition: partition,
26 | offset: offset,
27 | }
28 | }
29 |
30 | func (p *partitionConsumerMock) AsyncClose() {
31 | _ = p.Close()
32 | }
33 |
34 | func (p *partitionConsumerMock) Close() error {
35 | p.mux.Lock()
36 | defer p.mux.Unlock()
37 | if p.closed {
38 | return nil
39 | }
40 | p.closed = true
41 | close(p.messages)
42 | close(p.errors)
43 | return nil
44 | }
45 |
46 | func (p *partitionConsumerMock) receive(at time.Time) {
47 | p.mux.Lock()
48 | defer p.mux.Unlock()
49 | if p.closed {
50 | return
51 | }
52 |
53 | p.messages <- &sarama.ConsumerMessage{
54 | Timestamp: at,
55 | Key: []byte("key"),
56 | Value: []byte("value"),
57 | Topic: p.topic,
58 | Partition: p.partition,
59 | Offset: p.offset,
60 | }
61 | p.offset++
62 | }
63 |
64 | func (p *partitionConsumerMock) Messages() <-chan *sarama.ConsumerMessage {
65 | return p.messages
66 | }
67 |
68 | func (p *partitionConsumerMock) Errors() <-chan *sarama.ConsumerError {
69 | return p.errors
70 | }
71 |
72 | func (p *partitionConsumerMock) HighWaterMarkOffset() int64 {
73 | return p.offset + 1
74 | }
75 |
--------------------------------------------------------------------------------
/internal/counter.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "github.com/dustin/go-humanize"
5 |
6 | "github.com/xitonix/trubka/internal/output/format"
7 | "github.com/xitonix/trubka/internal/output/format/tabular"
8 | )
9 |
10 | type stats struct {
11 | success int64
12 | failure int64
13 | }
14 |
15 | // Counter represents a in-memory counter for consumed Kafka events.
16 | type Counter struct {
17 | topicStats map[string]*stats
18 | }
19 |
20 | // NewCounter creates a new instance of a counter.
21 | func NewCounter() *Counter {
22 | return &Counter{
23 | topicStats: make(map[string]*stats),
24 | }
25 | }
26 |
27 | // PrintAsTable prints the counter to stdout in tabular format.
28 | func (c *Counter) PrintAsTable(highlight bool) {
29 | if c == nil || len(c.topicStats) == 0 {
30 | return
31 | }
32 | table := tabular.NewTable(highlight,
33 | tabular.C("Topic").Align(tabular.AlignLeft),
34 | tabular.C("Succeeded"),
35 | tabular.C("Failed"))
36 |
37 | for topic, s := range c.topicStats {
38 | failed := format.RedIfTrue(humanize.Comma(s.failure), func() bool {
39 | return s.failure > 0
40 | }, highlight)
41 |
42 | succeeded := format.GreenIfTrue(humanize.Comma(s.success), func() bool {
43 | return s.success > 0
44 | }, highlight)
45 | table.AddRow(topic, succeeded, failed)
46 | }
47 | table.SetTitle("SUMMARY")
48 | table.TitleAlignment(tabular.AlignCenter)
49 | table.Render()
50 | }
51 |
52 | // IncrSuccess increases the success counter.
53 | func (c *Counter) IncrSuccess(topic string) {
54 | if _, ok := c.topicStats[topic]; !ok {
55 | c.topicStats[topic] = &stats{}
56 | }
57 | c.topicStats[topic].success++
58 | }
59 |
60 | // IncrFailure increases the failure counter.
61 | func (c *Counter) IncrFailure(topic string) {
62 | if _, ok := c.topicStats[topic]; !ok {
63 | c.topicStats[topic] = &stats{}
64 | }
65 | c.topicStats[topic].failure++
66 | }
67 |
--------------------------------------------------------------------------------
/kafka/broker.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/Shopify/sarama"
7 |
8 | "github.com/xitonix/trubka/internal"
9 | )
10 |
11 | // ControllerBrokerLabel controller node marker.
12 | const ControllerBrokerLabel = "*"
13 |
14 | // Broker represents a Kafka broker node.
15 | type Broker struct {
16 | // Address the raw address of the broker.
17 | Address string `json:"-"`
18 | // ID the broker id returned from the server.
19 | ID int32 `json:"id"`
20 | // Host the printable broker address.
21 | Host string `json:"host"`
22 | // IsController is true if the broker is a controller node.
23 | IsController bool `json:"controller"`
24 | *sarama.Broker `json:"-"`
25 | }
26 |
27 | // NewBroker creates a new instance of Kafka broker.
28 | func NewBroker(broker *sarama.Broker, controllerID int32) *Broker {
29 | address := broker.Addr()
30 | id := broker.ID()
31 | return &Broker{
32 | Address: address,
33 | Host: internal.RemovePort(address),
34 | ID: id,
35 | IsController: controllerID == id,
36 | Broker: broker,
37 | }
38 | }
39 |
40 | // MarkedHostName returns the marked Host name if the broker is a controller, otherwise the original Host value will be returned.
41 | func (b *Broker) MarkedHostName() string {
42 | if b.IsController {
43 | return b.Host + ControllerBrokerLabel
44 | }
45 | return b.Host
46 | }
47 |
48 | // String returns the string representation of the broker.
49 | func (b *Broker) String() string {
50 | if b == nil {
51 | return ""
52 | }
53 | return fmt.Sprintf("%d/%s", b.ID, b.Host)
54 | }
55 |
56 | // BrokersByID sorts the brokers by ID.
57 | type BrokersByID []*Broker
58 |
59 | func (b BrokersByID) Len() int {
60 | return len(b)
61 | }
62 |
63 | func (b BrokersByID) Swap(i, j int) {
64 | b[i], b[j] = b[j], b[i]
65 | }
66 |
67 | func (b BrokersByID) Less(i, j int) bool {
68 | return b[i].ID < b[j].ID
69 | }
70 |
--------------------------------------------------------------------------------
/kafka/sasl_authentication.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "crypto/sha256"
5 | "crypto/sha512"
6 | "hash"
7 | "strings"
8 |
9 | "github.com/Shopify/sarama"
10 | )
11 |
12 | const (
13 | // SASLMechanismNone SASL authentication is not enabled.
14 | SASLMechanismNone = "none"
15 | // SASLMechanismPlain plain text authentication mode.
16 | SASLMechanismPlain = "plain"
17 | // SASLMechanismSCRAM256 sha-256 authentication mode.
18 | SASLMechanismSCRAM256 = "scram-sha-256"
19 | // SASLMechanismSCRAM512 sha-512 authentication mode.
20 | SASLMechanismSCRAM512 = "scram-sha-512"
21 | )
22 |
23 | type sasl struct {
24 | mechanism sarama.SASLMechanism
25 | username string
26 | password string
27 | client func() sarama.SCRAMClient
28 | version int16
29 | }
30 |
31 | // This will return nil if the mechanism is not valid.
32 | func newSASL(mechanism, username, password string, version SASLHandshakeVersion) *sasl {
33 | switch strings.ToLower(mechanism) {
34 | case SASLMechanismPlain:
35 | return &sasl{
36 | mechanism: sarama.SASLTypePlaintext,
37 | username: username,
38 | password: password,
39 | version: version.toSaramaVersion(),
40 | }
41 | case SASLMechanismSCRAM256:
42 | hashed := func() hash.Hash { return sha256.New() }
43 | return &sasl{
44 | client: func() sarama.SCRAMClient { return &xdgSCRAMClient{HashGeneratorFcn: hashed} },
45 | mechanism: sarama.SASLTypeSCRAMSHA256,
46 | username: username,
47 | password: password,
48 | version: version.toSaramaVersion(),
49 | }
50 | case SASLMechanismSCRAM512:
51 | hashed := func() hash.Hash { return sha512.New() }
52 | return &sasl{
53 | client: func() sarama.SCRAMClient { return &xdgSCRAMClient{HashGeneratorFcn: hashed} },
54 | mechanism: sarama.SASLTypeSCRAMSHA512,
55 | username: username,
56 | password: password,
57 | version: version.toSaramaVersion(),
58 | }
59 | default:
60 | return nil
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/kafka/options.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "crypto/tls"
5 | "io"
6 |
7 | "github.com/Shopify/sarama"
8 |
9 | "github.com/xitonix/trubka/internal"
10 | )
11 |
12 | var (
13 | // DefaultClusterVersion default cluster version.
14 | DefaultClusterVersion = sarama.MaxVersion.String()
15 | )
16 |
17 | // Options holds the configuration settings for kafka consumer.
18 | type Options struct {
19 | // DisableErrorReporting disables sending consumer errors to the Errors() channel.
20 | DisableErrorReporting bool
21 | // ClusterVersion kafka cluster version.
22 | ClusterVersion string
23 | // TLS configuration to connect to Kafka cluster.
24 | TLS *tls.Config
25 | sasl *sasl
26 | logWriter io.Writer
27 | }
28 |
29 | // NewOptions creates a new Options object with default values.
30 | func NewOptions() *Options {
31 | return &Options{
32 | DisableErrorReporting: false,
33 | ClusterVersion: DefaultClusterVersion,
34 | logWriter: io.Discard,
35 | }
36 | }
37 |
38 | // Option represents a configuration function.
39 | type Option func(options *Options)
40 |
41 | // WithClusterVersion kafka cluster version.
42 | func WithClusterVersion(version string) Option {
43 | return func(options *Options) {
44 | if internal.IsEmpty(version) {
45 | version = DefaultClusterVersion
46 | }
47 | options.ClusterVersion = version
48 | }
49 | }
50 |
51 | // WithSASL enables SASL authentication.
52 | func WithSASL(mechanism, username, password, handshakeVersion string) Option {
53 | return func(options *Options) {
54 | options.sasl = newSASL(mechanism, username, password, SASLHandshakeVersion(handshakeVersion))
55 | }
56 | }
57 |
58 | // WithLogWriter sets the writer to write the internal Sarama logs to.
59 | func WithLogWriter(writer io.Writer) Option {
60 | return func(options *Options) {
61 | options.logWriter = writer
62 | }
63 | }
64 |
65 | // WithTLS enables TLS.
66 | func WithTLS(tls *tls.Config) Option {
67 | return func(options *Options) {
68 | options.TLS = tls
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/internal/utils.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | "os/signal"
8 | "regexp"
9 | "strings"
10 | "syscall"
11 | "time"
12 | "unicode"
13 | "unicode/utf8"
14 | )
15 |
16 | // IsEmpty returns true of the trimmed input is empty.
17 | func IsEmpty(val string) bool {
18 | return len(strings.TrimSpace(val)) == 0
19 | }
20 |
21 | // FormatTime formats the time using time.RFC3339Nano layout.
22 | func FormatTime(t time.Time) string {
23 | return t.Format(time.RFC3339Nano)
24 | }
25 |
26 | // NotFoundError constructs a new NotFound error for the specified entity.
27 | func NotFoundError(entity, filterName string, ex *regexp.Regexp) error {
28 | msg := fmt.Sprintf("No %s has been found.", entity)
29 | if ex != nil {
30 | msg += fmt.Sprintf(" You might need to tweak the %s filter (%s).", filterName, ex.String())
31 | }
32 | return errors.New(msg)
33 | }
34 |
35 | // WaitForCancellationSignal waits for the user to press Ctrl+C.
36 | func WaitForCancellationSignal() {
37 | signals := make(chan os.Signal, 1)
38 | signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
39 | <-signals
40 | }
41 |
42 | // RemovePort removes :Port from an address if exists.
43 | func RemovePort(address string) string {
44 | if i := strings.Index(address, ":"); i > 0 {
45 | return address[:i]
46 | }
47 | return address
48 | }
49 |
50 | // IgnoreRegexCase returns a case-insensitive regular expression.
51 | func IgnoreRegexCase(r *regexp.Regexp) (*regexp.Regexp, error) {
52 | if r == nil {
53 | return r, nil
54 | }
55 | ex, err := regexp.Compile("(?i)" + r.String())
56 | if err != nil {
57 | return nil, err
58 | }
59 | return ex, nil
60 | }
61 |
62 | // Title capitalises the first letter of the error message if the error is not nil, otherwise returns "".
63 | func Title(err error) string {
64 | if err == nil {
65 | return ""
66 | }
67 | input := err.Error()
68 | if input == "" {
69 | return ""
70 | }
71 | r, n := utf8.DecodeRuneInString(input)
72 | return string(unicode.ToUpper(r)) + input[n:]
73 | }
74 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | [](https://goreportcard.com/report/github.com/xitonix/trubka)
3 | [](https://github.com/avelino/awesome-go#devops-tools)
4 |
5 |
6 |
7 | **Trubka** is a Kafka CLI tool built in [Go](https://go.dev/) which gives you everything you need to
8 |
9 | * Manage, query and troubleshoot your Kafka clusters.
10 | * Consume [protocol buffer](https://developers.google.com/protocol-buffers/) and plain text messages from Kafka.
11 | * Publish protocol buffer and plain text messages to Kafka.
12 |
13 | ## Documentation
14 |
15 | - [Installation](https://github.com/xitonix/trubka/wiki)
16 | - [Cluster Administration](https://github.com/xitonix/trubka/wiki/Cluster-Administration)
17 | - [Consuming from Kafka](https://github.com/xitonix/trubka/wiki/Consume-from-Kafka)
18 | - [Publishing to Kafka](https://github.com/xitonix/trubka/wiki/Publish-to-Kafka)
19 |
20 | ## Acknowledgments
21 |
22 | Special thanks to **Joshua Humphries** for building the
23 | fascinating [protoreflect](https://github.com/jhump/protoreflect) package.
24 |
25 | I would also like to mention some of the amazing libraries and packages I used for building Trubka:
26 |
27 | - [sarama](https://github.com/Shopify/sarama) by the Shopify team
28 |
29 | - [kingpin](https://github.com/alecthomas/kingpin) and [chroma](https://github.com/alecthomas/chroma) by Alec Thomas
30 |
31 | - [go-pretty](https://github.com/jedib0t/go-pretty) by Naveen Mahalingam
32 |
33 | - [diskv](https://github.com/peterbourgon/diskv) by Peter Bourgon
34 |
35 | - [gofakeit](https://github.com/brianvoe/gofakeit/) by Brian Voelker
36 |
37 | - [go-humanize](https://github.com/dustin/go-humanize) by Dustin Sallings
38 |
39 | - [confdir](https://github.com/kirsle/configdir) by Noah Petherbridge
40 |
41 | - [go-homedir](https://github.com/mitchellh/go-homedir) by Mitchell Hashimoto
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL := all
2 |
3 | EXECUTABLE=trubka
4 | WINDOWS=./bin/windows_amd64
5 | LINUX=./bin/linux_amd64
6 | DARWIN=./bin/darwin_amd64
7 | VERSION=$(shell git describe --tags --abbrev=0)
8 | COMMIT=$(shell git rev-parse HEAD)
9 | BUILT := $(shell date -u '+%a %d %b %Y %H:%M:%S GMT')
10 | RUNTIME=$(shell go version | cut -d' ' -f 3)
11 |
12 | prepare:
13 | @echo Cleaning the bin directory
14 | @rm -rfv ./bin/*
15 |
16 | windows:
17 | @echo Building Windows amd64 binaries
18 | @env GOOS=windows GOARCH=amd64 go build -v -o $(WINDOWS)/$(EXECUTABLE).exe -ldflags="-s -w -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.runtimeVer=$(RUNTIME) -X 'main.built=$(BUILT)'" *.go
19 |
20 | linux:
21 | @echo Building Linux amd64 binaries
22 | @env GOOS=linux GOARCH=amd64 go build -v -o $(LINUX)/$(EXECUTABLE) -ldflags="-s -w -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.runtimeVer=$(RUNTIME) -X 'main.built=$(BUILT)'" *.go
23 |
24 | darwin:
25 | @echo Building Mac amd64 binaries
26 | @env GOOS=darwin GOARCH=amd64 go build -v -o $(DARWIN)/$(EXECUTABLE) -ldflags="-s -w -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.runtimeVer=$(RUNTIME) -X 'main.built=$(BUILT)'" *.go
27 |
28 | ## Builds the binaries.
29 | build: windows linux darwin
30 | @echo Version: $(VERSION)
31 |
32 | test: ## Runs the unit tests.
33 | @echo Running unit tests
34 | @go test -v -race ./...
35 |
36 | package:
37 | @echo Creating the zip file
38 | @tar -C $(DARWIN) -cvzf ./bin/$(EXECUTABLE)_darwin-$(VERSION).tar.gz $(EXECUTABLE)
39 | @zip -j ./bin/$(EXECUTABLE)_windows-$(VERSION).zip $(WINDOWS)/$(EXECUTABLE).exe
40 | @tar -C $(LINUX) -cvzf ./bin/$(EXECUTABLE)_linux-$(VERSION).tar.gz $(EXECUTABLE)
41 | @echo Darwin Checksum:
42 | @shasum -a 256 ./bin/$(EXECUTABLE)_darwin-$(VERSION).tar.gz
43 |
44 | install:
45 | @cp -pv $(DARWIN)/$(EXECUTABLE)
46 |
47 | help: ## Show this help.
48 | @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//'
49 |
50 | all: test prepare build package clean
51 |
52 | clean: ## Removes the artifacts.
53 | @rm -rf $(WINDOWS) $(LINUX) $(DARWIN)
54 |
55 | .PHONY: all
--------------------------------------------------------------------------------
/commands/produce/plain.go:
--------------------------------------------------------------------------------
1 | package produce
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "gopkg.in/alecthomas/kingpin.v2"
9 |
10 | "github.com/xitonix/trubka/commands"
11 | "github.com/xitonix/trubka/commands/produce/template"
12 | "github.com/xitonix/trubka/internal"
13 | )
14 |
15 | type plain struct {
16 | kafkaParams *commands.KafkaParameters
17 | globalParams *commands.GlobalParameters
18 | message string
19 | key string
20 | topic string
21 | count uint64
22 | parser *template.Parser
23 | random bool
24 | sleep time.Duration
25 | }
26 |
27 | func addPlainSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
28 | cmd := &plain{
29 | kafkaParams: kafkaParams,
30 | globalParams: global,
31 | parser: template.NewParser(),
32 | }
33 | c := parent.Command("plain", "Publishes plain text messages to Kafka. The content can be arbitrary text, json, base64 or hex encoded strings.").Action(cmd.run)
34 | c.Arg("topic", "The topic to publish to.").Required().StringVar(&cmd.topic)
35 | c.Arg("content", "The message content. You can pipe the content in, or pass it as the command's second argument.").StringVar(&cmd.message)
36 | addProducerFlags(c, &cmd.sleep, &cmd.key, &cmd.random, &cmd.count)
37 | }
38 |
39 | func (c *plain) run(_ *kingpin.ParseContext) error {
40 | value, err := getValue(c.message)
41 | if err != nil {
42 | return err
43 | }
44 |
45 | ctx, cancel := context.WithCancel(context.Background())
46 | defer cancel()
47 | go func() {
48 | internal.WaitForCancellationSignal()
49 | cancel()
50 | }()
51 |
52 | return produce(ctx, c.kafkaParams, c.globalParams, c.topic, c.key, value, c.serialize, c.count, c.sleep)
53 | }
54 |
55 | func (c *plain) serialize(value string) ([]byte, error) {
56 | if !c.random {
57 | return []byte(value), nil
58 | }
59 | value, err := c.parser.Parse(value)
60 | if err != nil {
61 | return nil, err
62 | }
63 | if c.globalParams.Verbosity >= internal.Verbose {
64 | fmt.Printf("%s\n", value)
65 | }
66 | return []byte(value), nil
67 | }
68 |
--------------------------------------------------------------------------------
/commands/deletion/local_offset.go:
--------------------------------------------------------------------------------
1 | package deletion
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "strings"
7 |
8 | "gopkg.in/alecthomas/kingpin.v2"
9 |
10 | "github.com/xitonix/trubka/commands"
11 | "github.com/xitonix/trubka/internal"
12 | "github.com/xitonix/trubka/kafka"
13 | )
14 |
15 | type localOffsets struct {
16 | globalParams *commands.GlobalParameters
17 | topic string
18 | environment string
19 | }
20 |
21 | func addDeleteLocalOffsetsSubCommand(parent *kingpin.CmdClause, params *commands.GlobalParameters) {
22 | cmd := &localOffsets{
23 | globalParams: params,
24 | }
25 | c := parent.Command("local-offsets", "Deletes the local offsets from the given environment.").Action(cmd.run)
26 | c.Arg("environment", "The case-sensitive environment of which the local offsets will be deleted.").
27 | Required().
28 | StringVar(&cmd.environment)
29 | c.Arg("topic", "The case-sensitive topic name to delete the local offsets of. Set to ALL to delete all the topics within the specified environment.").StringVar(&cmd.topic)
30 | }
31 |
32 | func (c *localOffsets) run(_ *kingpin.ParseContext) error {
33 | offsetManager := kafka.NewLocalOffsetManager(internal.NewPrinter(c.globalParams.Verbosity, os.Stdout))
34 | path, err := offsetManager.GetOffsetFileOrRoot(c.environment, c.topic)
35 | if err != nil {
36 | return err
37 | }
38 |
39 | topicMode := !internal.IsEmpty(c.topic) && !strings.EqualFold(c.topic, "all")
40 |
41 | var msg string
42 | if topicMode {
43 | msg = fmt.Sprintf("The local offsets of %s topic will be deleted from %s environment. Are you sure", c.topic, c.environment)
44 | } else {
45 | msg = fmt.Sprintf("The local offsets of all the topics will be deleted from %s environment. Are you sure", c.environment)
46 | }
47 | return confirmAndDelete(msg, path, !topicMode)
48 | }
49 |
50 | func confirmAndDelete(message, path string, all bool) error {
51 | if commands.AskForConfirmation(message) {
52 | var err error
53 | if all {
54 | err = os.RemoveAll(path)
55 | } else {
56 | err = os.Remove(path)
57 | }
58 | if err != nil {
59 | return err
60 | }
61 | fmt.Println("The local offsets have been removed.")
62 | }
63 | return nil
64 | }
65 |
--------------------------------------------------------------------------------
/commands/create/topic.go:
--------------------------------------------------------------------------------
1 | package create
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "time"
7 |
8 | "gopkg.in/alecthomas/kingpin.v2"
9 |
10 | "github.com/xitonix/trubka/commands"
11 | "github.com/xitonix/trubka/internal"
12 | )
13 |
14 | type topic struct {
15 | globalParams *commands.GlobalParameters
16 | kafkaParams *commands.KafkaParameters
17 | topic string
18 | numberOfPartitions int32
19 | replicationFactor int16
20 | retention time.Duration
21 | validateOnly bool
22 | }
23 |
24 | func addCreateTopicSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
25 | cmd := &topic{
26 | globalParams: global,
27 | kafkaParams: kafkaParams,
28 | }
29 | c := parent.Command("topic", "Creates a new topic.").Action(cmd.run)
30 | c.Arg("topic", "The topic name.").
31 | Required().
32 | StringVar(&cmd.topic)
33 |
34 | c.Flag("number-of-partitions", "Number of partitions.").
35 | Short('p').
36 | Required().
37 | NoEnvar().
38 | Int32Var(&cmd.numberOfPartitions)
39 |
40 | c.Flag("replication-factor", "Replication factor.").
41 | Short('r').
42 | Required().
43 | NoEnvar().
44 | Int16Var(&cmd.replicationFactor)
45 |
46 | c.Flag("validate-only", "Validates the request instead of creating the topic.").
47 | Short('A').
48 | NoEnvar().
49 | BoolVar(&cmd.validateOnly)
50 |
51 | c.Flag("retention", "Topic retention period. Examples 300ms, 150s, 1.5h or 2h45m.").
52 | NoEnvar().
53 | DurationVar(&cmd.retention)
54 | }
55 |
56 | func (c *topic) run(_ *kingpin.ParseContext) error {
57 | manager, _, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
58 |
59 | if err != nil {
60 | return err
61 | }
62 |
63 | defer func() {
64 | manager.Close()
65 | cancel()
66 | }()
67 |
68 | if internal.IsEmpty(c.topic) {
69 | return errors.New("topic cannot be empty")
70 | }
71 |
72 | err = manager.CreateTopic(c.topic, c.numberOfPartitions, c.replicationFactor, c.validateOnly, c.retention)
73 | if err == nil {
74 | if c.validateOnly {
75 | fmt.Println("The server WILL ACCEPT the request.")
76 | } else {
77 | fmt.Printf("Topic %s has been created successfully.", c.topic)
78 | }
79 | }
80 |
81 | return err
82 | }
83 |
--------------------------------------------------------------------------------
/protobuf/file_finder.go:
--------------------------------------------------------------------------------
1 | package protobuf
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "strings"
9 |
10 | "github.com/mitchellh/go-homedir"
11 |
12 | "github.com/xitonix/trubka/internal"
13 | )
14 |
15 | type fileFinder struct {
16 | root string
17 | verbosity internal.VerbosityLevel
18 | }
19 |
20 | func newFileFinder(verbosity internal.VerbosityLevel, root string) (*fileFinder, error) {
21 | if strings.HasPrefix(root, "~") {
22 | expanded, err := homedir.Expand(root)
23 | if err != nil {
24 | return nil, err
25 | }
26 | root = expanded
27 | }
28 | dir, err := os.Stat(root)
29 | if err != nil {
30 | return nil, err
31 | }
32 | if !dir.IsDir() {
33 | return nil, fmt.Errorf("%s is not a directory", root)
34 | }
35 | return &fileFinder{
36 | root: root,
37 | verbosity: verbosity,
38 | }, nil
39 | }
40 |
41 | func (f *fileFinder) ls(ctx context.Context) ([]string, error) {
42 | var files []string
43 | if f.verbosity >= internal.VeryVerbose {
44 | fmt.Printf("Looking for proto contracts in %s\n", f.root)
45 | }
46 | err := filepath.Walk(f.root, func(path string, fileInfo os.FileInfo, err error) error {
47 | select {
48 | case <-ctx.Done():
49 | return ctx.Err()
50 | default:
51 | if err != nil {
52 | return err
53 | }
54 | isDir := fileInfo.IsDir()
55 | if f.verbosity >= internal.VeryVerbose && isDir {
56 | fmt.Printf("Loading %s\n", path)
57 | }
58 | if !isDir && strings.HasSuffix(strings.ToLower(fileInfo.Name()), ".proto") {
59 | if f.verbosity >= internal.Chatty {
60 | fmt.Printf("Proto file loaded %s\n", path)
61 | }
62 | files = append(files, path)
63 | }
64 | return nil
65 | }
66 | })
67 | if err != nil {
68 | return nil, err
69 | }
70 | return files, nil
71 | }
72 |
73 | func (f *fileFinder) dirs(ctx context.Context) ([]string, error) {
74 | var dirs []string
75 | err := filepath.Walk(f.root, func(path string, fileInfo os.FileInfo, err error) error {
76 | select {
77 | case <-ctx.Done():
78 | return ctx.Err()
79 | default:
80 | if err != nil {
81 | return err
82 | }
83 | if fileInfo.IsDir() {
84 | if f.verbosity >= internal.Chatty {
85 | fmt.Printf("Import path detected %s\n", path)
86 | }
87 | dirs = append(dirs, path)
88 | }
89 | return nil
90 | }
91 | })
92 | if err != nil {
93 | return nil, err
94 | }
95 | return dirs, nil
96 | }
97 |
--------------------------------------------------------------------------------
/internal/output/format/tabular/table.go:
--------------------------------------------------------------------------------
1 | package tabular
2 |
3 | import (
4 | "os"
5 | "runtime"
6 |
7 | "github.com/jedib0t/go-pretty/table"
8 | "github.com/jedib0t/go-pretty/text"
9 | )
10 |
11 | // Table represents a new table to print Tabular output.
12 | type Table struct {
13 | writer table.Writer
14 | style *table.Style
15 | }
16 |
17 | // NewTable creates a new table.
18 | func NewTable(enableColor bool, columns ...*Column) *Table {
19 | t := table.NewWriter()
20 | if runtime.GOOS == "windows" {
21 | t.SetStyle(table.StyleLight)
22 | } else {
23 | t.SetStyle(table.StyleRounded)
24 | }
25 | t.SetOutputMirror(os.Stdout)
26 | headers := make(table.Row, len(columns))
27 | configs := make([]table.ColumnConfig, len(columns))
28 | for i, column := range columns {
29 | headers[i] = column.Header
30 | configs[i] = column.configuration(enableColor)
31 | }
32 | t.AppendHeader(headers)
33 | t.SetColumnConfigs(configs)
34 | style := t.Style()
35 | style.Title.Align = text.AlignLeft
36 | style.Options.SeparateRows = true
37 | style.Format.Header = text.FormatDefault
38 | style.Format.Footer = text.FormatDefault
39 | return &Table{
40 | writer: t,
41 | style: style,
42 | }
43 | }
44 |
45 | // TitleAlignment sets the alignment of the title.
46 | func (t *Table) TitleAlignment(alignment Alignment) {
47 | t.style.Title.Align = text.Align(alignment)
48 | }
49 |
50 | // AddRow adds a new row to the table.
51 | func (t *Table) AddRow(values ...interface{}) {
52 | row := make(table.Row, len(values))
53 | for i, value := range values {
54 | row[i] = value
55 | }
56 | t.writer.AppendRow(row)
57 | }
58 |
59 | // SetTitle sets the title of the table.
60 | func (t *Table) SetTitle(title string) {
61 | t.writer.SetTitle(title)
62 | }
63 |
64 | // SetCaption sets the caption of the table.
65 | func (t *Table) SetCaption(caption string) {
66 | t.writer.SetCaption(" " + caption)
67 | }
68 |
69 | // DisableRowSeparators disables the separator lines between the table rows.
70 | func (t *Table) DisableRowSeparators() {
71 | t.style.Options.SeparateRows = false
72 | }
73 |
74 | // AddFooter use "" for the columns without any footer value.
75 | func (t *Table) AddFooter(values ...interface{}) {
76 | row := make(table.Row, len(values))
77 | for i, value := range values {
78 | row[i] = value
79 | }
80 | t.writer.AppendFooter(row)
81 | }
82 |
83 | // Render renders the table into stdout.
84 | func (t *Table) Render() {
85 | t.writer.Render()
86 | }
87 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/xitonix/trubka
2 |
3 | go 1.24.2
4 |
5 | require (
6 | github.com/Shopify/sarama v1.27.2
7 | github.com/alecthomas/chroma v0.7.3
8 | github.com/araddon/dateparse v0.0.0-20200409225146-d820a6159ab1
9 | github.com/brianvoe/gofakeit/v4 v4.3.0
10 | github.com/dustin/go-humanize v1.0.0
11 | github.com/golang/protobuf v1.4.2
12 | github.com/jedib0t/go-pretty v1.0.1-0.20200513162803-d24d83bda5d4
13 | github.com/jhump/protoreflect v1.7.0
14 | github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f
15 | github.com/mitchellh/go-homedir v1.1.0
16 | github.com/peterbourgon/diskv v2.0.1+incompatible
17 | github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
18 | github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
19 | gopkg.in/alecthomas/kingpin.v2 v2.2.6
20 | )
21 |
22 | require (
23 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
24 | github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
25 | github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
26 | github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect
27 | github.com/davecgh/go-spew v1.1.1 // indirect
28 | github.com/dlclark/regexp2 v1.2.0 // indirect
29 | github.com/eapache/go-resiliency v1.2.0 // indirect
30 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
31 | github.com/eapache/queue v1.1.0 // indirect
32 | github.com/go-openapi/errors v0.19.6 // indirect
33 | github.com/go-openapi/strfmt v0.19.5 // indirect
34 | github.com/go-stack/stack v1.8.0 // indirect
35 | github.com/golang/snappy v0.0.1 // indirect
36 | github.com/google/btree v1.0.0 // indirect
37 | github.com/hashicorp/go-uuid v1.0.2 // indirect
38 | github.com/jcmturner/gofork v1.0.0 // indirect
39 | github.com/klauspost/compress v1.11.0 // indirect
40 | github.com/mattn/go-runewidth v0.0.9 // indirect
41 | github.com/mitchellh/mapstructure v1.3.3 // indirect
42 | github.com/pierrec/lz4 v2.5.2+incompatible // indirect
43 | github.com/xdg/stringprep v1.0.0 // indirect
44 | go.mongodb.org/mongo-driver v1.3.5 // indirect
45 | golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
46 | golang.org/x/net v0.0.0-20200904194848-62affa334b73 // indirect
47 | golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c // indirect
48 | golang.org/x/text v0.3.3 // indirect
49 | google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 // indirect
50 | google.golang.org/protobuf v1.25.0 // indirect
51 | gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
52 | gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
53 | gopkg.in/jcmturner/gokrb5.v7 v7.5.0 // indirect
54 | gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
55 | )
56 |
--------------------------------------------------------------------------------
/kafka/partition_offset.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "fmt"
7 | "sort"
8 | "strings"
9 | )
10 |
11 | // PartitionOffset represents a map of partition offset pairs.
12 | type PartitionOffset map[int32]Offset
13 |
14 | // ToJSON returns an object ready to be serialised into json string.
15 | func (p PartitionOffset) ToJSON() interface{} {
16 | if p == nil {
17 | return nil
18 | }
19 | type offset struct {
20 | Current int64 `json:"current_offset"`
21 | Latest int64 `json:"latest_offset"`
22 | Lag int64 `json:"lag"`
23 | }
24 | output := make(map[int32]offset, len(p))
25 | for partition, off := range p {
26 | output[partition] = offset{
27 | Current: off.Current,
28 | Latest: off.Latest,
29 | Lag: off.Lag(),
30 | }
31 | }
32 | return output
33 | }
34 |
35 | // SortPartitions returns a list of sorted partitions.
36 | func (p PartitionOffset) SortPartitions() []int {
37 | sorted := make([]int, 0)
38 | if len(p) == 0 {
39 | return sorted
40 | }
41 | for partition := range p {
42 | sorted = append(sorted, int(partition))
43 | }
44 | sort.Ints(sorted)
45 | return sorted
46 | }
47 |
48 | // serialises the offset map and returns the bytes as well as the checksum string of the current values.
49 | func (p PartitionOffset) marshal() (string, []byte, error) {
50 | if len(p) == 0 {
51 | return "", []byte{}, nil
52 | }
53 | toWrite := make(map[int32]int64)
54 | for pt, of := range p {
55 | if of.Current >= 0 {
56 | toWrite[pt] = of.Current
57 | }
58 | }
59 | if len(toWrite) == 0 {
60 | return "", nil, nil
61 | }
62 | buff := bytes.Buffer{}
63 | enc := gob.NewEncoder(&buff)
64 | err := enc.Encode(toWrite)
65 | if err != nil {
66 | return "", nil, err
67 | }
68 | return strings.Replace(fmt.Sprintf("%v", toWrite), "map", "", 1), buff.Bytes(), nil
69 | }
70 |
71 | func (p PartitionOffset) copyTo(dest PartitionOffset) {
72 | if len(p) == 0 {
73 | return
74 | }
75 | if dest == nil {
76 | dest = make(PartitionOffset)
77 | }
78 | for partition, offset := range p {
79 | if offset.Current >= 0 {
80 | dest[partition] = offset
81 | }
82 | }
83 | }
84 |
85 | // ToPartitionOffset creates a new PartitionOffset map from a raw map.
86 | //
87 | // Set latest parameter to true, if you would like to set the Latest offset value instead of the Current value.
88 | func ToPartitionOffset(po map[int32]int64, latest bool) PartitionOffset {
89 | result := make(PartitionOffset)
90 | for partition, offset := range po {
91 | off := Offset{}
92 | if latest {
93 | off.Latest = offset
94 | } else {
95 | off.Current = offset
96 | }
97 | result[partition] = off
98 | }
99 | return result
100 | }
101 |
--------------------------------------------------------------------------------
/internal/json_message_processor.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "time"
9 | )
10 |
11 | // JSONIndentation the indentation of JSON output.
12 | const JSONIndentation = " "
13 |
14 | // JSONMessageProcessor prepares json output for printing.
15 | type JSONMessageProcessor struct {
16 | outputEncoding string
17 | enableColor bool
18 | highlighter *JSONHighlighter
19 | indent bool
20 | inclusions *MessageMetadata
21 | }
22 |
23 | // NewJSONMessageProcessor creates a new instance of JSON message processor.
24 | func NewJSONMessageProcessor(
25 | outputFormat string,
26 | inclusions *MessageMetadata,
27 | enableColor bool,
28 | highlightStyle string) *JSONMessageProcessor {
29 | return &JSONMessageProcessor{
30 | outputEncoding: outputFormat,
31 | inclusions: inclusions,
32 | enableColor: enableColor,
33 | highlighter: NewJSONHighlighter(highlightStyle, enableColor),
34 | indent: outputFormat == JSONIndentEncoding,
35 | }
36 | }
37 |
38 | // Process prepares json output for printing.
39 | //
40 | // The method injects the metadata into the json object if required.
41 | func (j *JSONMessageProcessor) Process(message, key []byte, ts time.Time, topic string, partition int32, offset int64) ([]byte, error) {
42 | if !j.inclusions.IsRequested() {
43 | return j.highlight(message), nil
44 | }
45 |
46 | output := struct {
47 | Topic string `json:"topic,omitempty"`
48 | Timestamp string `json:"timestamp,omitempty"`
49 | Partition *int32 `json:"partition,omitempty"`
50 | PartitionKey string `json:"key,omitempty"`
51 | Offset *int64 `json:"offset,omitempty"`
52 | Message json.RawMessage `json:"message"`
53 | }{
54 | Message: message,
55 | }
56 |
57 | if j.inclusions.Topic {
58 | output.Topic = topic
59 | }
60 |
61 | if j.inclusions.Partition {
62 | output.Partition = &partition
63 | }
64 |
65 | if j.inclusions.Offset {
66 | output.Offset = &offset
67 | }
68 |
69 | if j.inclusions.Key {
70 | output.PartitionKey = fmt.Sprintf("%X", key)
71 | }
72 |
73 | if j.inclusions.Timestamp {
74 | output.Timestamp = FormatTime(ts)
75 | }
76 | var err error
77 | if j.indent {
78 | message, err = json.MarshalIndent(output, "", JSONIndentation)
79 | if err != nil {
80 | return nil, err
81 | }
82 | } else {
83 | marshal, err := json.Marshal(output)
84 | if err != nil {
85 | return nil, err
86 | }
87 | var compact bytes.Buffer
88 | if err = json.Compact(&compact, marshal); err != nil {
89 | return nil, err
90 | }
91 | message, err = io.ReadAll(&compact)
92 | if err != nil {
93 | return nil, err
94 | }
95 | }
96 |
97 | return j.highlight(message), nil
98 | }
99 |
100 | func (j *JSONMessageProcessor) highlight(input []byte) []byte {
101 | if j.indent {
102 | return j.highlighter.Highlight(input)
103 | }
104 | return input
105 | }
106 |
--------------------------------------------------------------------------------
/protobuf/marshaller.go:
--------------------------------------------------------------------------------
1 | package protobuf
2 |
3 | import (
4 | "encoding/base64"
5 | "fmt"
6 | "strings"
7 | "time"
8 |
9 | //nolint:staticcheck
10 | "github.com/golang/protobuf/jsonpb"
11 | "github.com/jhump/protoreflect/dynamic"
12 |
13 | "github.com/xitonix/trubka/internal"
14 | )
15 |
16 | // Marshaller protocol buffers output serializer.
17 | type Marshaller struct {
18 | outputFormat string
19 | inclusions *internal.MessageMetadata
20 | enableColor bool
21 | jsonMarshaller *jsonpb.Marshaler
22 | jsonProcessor *internal.JSONMessageProcessor
23 | }
24 |
25 | // NewMarshaller creates a new protocol buffer Marshaller.
26 | func NewMarshaller(
27 | outputFormat string,
28 | inclusions *internal.MessageMetadata,
29 | enableColor bool,
30 | highlightStyle string) *Marshaller {
31 | outputFormat = strings.TrimSpace(strings.ToLower(outputFormat))
32 | m := &Marshaller{
33 | outputFormat: outputFormat,
34 | inclusions: inclusions,
35 | enableColor: enableColor,
36 | jsonProcessor: internal.NewJSONMessageProcessor(
37 | outputFormat,
38 | inclusions,
39 | enableColor,
40 | highlightStyle),
41 | }
42 |
43 | var indentation string
44 | if m.outputFormat == internal.JSONIndentEncoding {
45 | indentation = internal.JSONIndentation
46 | }
47 | m.jsonMarshaller = newJSONMarshaller(indentation)
48 | return m
49 | }
50 |
51 | // Marshal serialises the proto message into bytes.
52 | func (m *Marshaller) Marshal(msg *dynamic.Message, key []byte, ts time.Time, topic string, partition int32, offset int64) ([]byte, error) {
53 | var (
54 | result []byte
55 | err error
56 | )
57 |
58 | switch m.outputFormat {
59 | case internal.Base64Encoding:
60 | result, err = m.marshalBase64(msg)
61 | case internal.HexEncoding:
62 | result, err = m.marshalHex(msg)
63 | default:
64 | message, err := msg.MarshalJSONPB(m.jsonMarshaller)
65 | if err != nil {
66 | return nil, err
67 | }
68 | return m.jsonProcessor.Process(message, key, ts, topic, partition, offset)
69 | }
70 |
71 | if err != nil {
72 | return nil, err
73 | }
74 |
75 | result = m.inclusions.Render(key, result, ts, topic, partition, offset, m.outputFormat == internal.Base64Encoding)
76 |
77 | return result, nil
78 | }
79 |
80 | func (m *Marshaller) marshalBase64(msg *dynamic.Message) ([]byte, error) {
81 | output, err := msg.Marshal()
82 | if err != nil {
83 | return nil, err
84 | }
85 | buf := make([]byte, base64.StdEncoding.EncodedLen(len(output)))
86 | base64.StdEncoding.Encode(buf, output)
87 | return buf, nil
88 | }
89 |
90 | func newJSONMarshaller(indent string) *jsonpb.Marshaler {
91 | return &jsonpb.Marshaler{
92 | EnumsAsInts: false,
93 | EmitDefaults: false,
94 | Indent: indent,
95 | OrigName: true,
96 | AnyResolver: nil,
97 | }
98 | }
99 |
100 | func (m *Marshaller) marshalHex(msg *dynamic.Message) ([]byte, error) {
101 | output, err := msg.Marshal()
102 | if err != nil {
103 | return nil, err
104 | }
105 | out := []byte(fmt.Sprintf("%X", output))
106 | return out, nil
107 | }
108 |
--------------------------------------------------------------------------------
/commands/list/local_topics.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "regexp"
7 | "sort"
8 |
9 | "gopkg.in/alecthomas/kingpin.v2"
10 |
11 | "github.com/xitonix/trubka/commands"
12 | "github.com/xitonix/trubka/internal"
13 | "github.com/xitonix/trubka/internal/output"
14 | "github.com/xitonix/trubka/internal/output/format"
15 | "github.com/xitonix/trubka/internal/output/format/list"
16 | "github.com/xitonix/trubka/internal/output/format/tabular"
17 | "github.com/xitonix/trubka/kafka"
18 | )
19 |
20 | type listLocalTopics struct {
21 | globalParams *commands.GlobalParameters
22 | topicsFilter *regexp.Regexp
23 | envFilter *regexp.Regexp
24 | format string
25 | style string
26 | }
27 |
28 | func addLocalTopicsSubCommand(parent *kingpin.CmdClause, params *commands.GlobalParameters) {
29 | cmd := &listLocalTopics{
30 | globalParams: params,
31 | }
32 | c := parent.Command("local-topics", "Lists the locally stored topics and the environments.").Action(cmd.run)
33 | c.Flag("topic-filter", "An optional regular expression to filter the topics by.").Short('t').RegexpVar(&cmd.topicsFilter)
34 | c.Flag("environment-filter", "An optional case-insensitive regular expression to filter the environments by.").Short('e').RegexpVar(&cmd.envFilter)
35 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
36 | }
37 |
38 | func (l *listLocalTopics) run(_ *kingpin.ParseContext) error {
39 | var err error
40 | l.envFilter, err = internal.IgnoreRegexCase(l.envFilter)
41 | if err != nil {
42 | return fmt.Errorf("invalid environment filter: %w", err)
43 | }
44 |
45 | offsetManager := kafka.NewLocalOffsetManager(internal.NewPrinter(l.globalParams.Verbosity, os.Stdout))
46 | localStore, err := offsetManager.List(l.topicsFilter, l.envFilter)
47 | if err != nil {
48 | return err
49 | }
50 |
51 | if len(localStore) == 0 {
52 | fmt.Println("No topic offsets have been stored locally.")
53 | }
54 |
55 | switch l.format {
56 | case commands.JSONFormat:
57 | return output.PrintAsJSON(localStore, l.style, l.globalParams.EnableColor)
58 | case commands.TableFormat:
59 | return l.printAsTable(localStore)
60 | case commands.TreeFormat:
61 | return l.printAsList(localStore, false)
62 | case commands.PlainTextFormat:
63 | return l.printAsList(localStore, true)
64 | }
65 | return nil
66 | }
67 |
68 | func (l *listLocalTopics) printAsTable(store map[string][]string) error {
69 | for env, topics := range store {
70 | table := tabular.NewTable(l.globalParams.EnableColor, tabular.C(format.WithCount(env, len(topics))).Align(tabular.AlignLeft).MinWidth(60))
71 | sort.Strings(topics)
72 | for _, topic := range topics {
73 | table.AddRow(format.SpaceIfEmpty(topic))
74 | }
75 | table.AddFooter(fmt.Sprintf("Total: %d", len(topics)))
76 | table.Render()
77 | output.NewLines(1)
78 | }
79 | return nil
80 | }
81 |
82 | func (l *listLocalTopics) printAsList(store map[string][]string, plain bool) error {
83 | ls := list.New(plain)
84 | for env, topics := range store {
85 | ls.AddItem(env)
86 | ls.Indent()
87 | sort.Strings(topics)
88 | for _, topic := range topics {
89 | ls.AddItem(topic)
90 | }
91 | ls.UnIndent()
92 | }
93 | ls.Render()
94 | return nil
95 | }
96 |
--------------------------------------------------------------------------------
/commands/list/topics.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "sort"
7 |
8 | "github.com/dustin/go-humanize"
9 | "gopkg.in/alecthomas/kingpin.v2"
10 |
11 | "github.com/xitonix/trubka/commands"
12 | "github.com/xitonix/trubka/internal"
13 | "github.com/xitonix/trubka/internal/output"
14 | "github.com/xitonix/trubka/internal/output/format"
15 | "github.com/xitonix/trubka/internal/output/format/list"
16 | "github.com/xitonix/trubka/internal/output/format/tabular"
17 | "github.com/xitonix/trubka/kafka"
18 | )
19 |
20 | type topics struct {
21 | kafkaParams *commands.KafkaParameters
22 | globalParams *commands.GlobalParameters
23 | topicFilter *regexp.Regexp
24 | format string
25 | style string
26 | }
27 |
28 | func addTopicsSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
29 | cmd := &topics{
30 | kafkaParams: kafkaParams,
31 | globalParams: global,
32 | }
33 | c := parent.Command("topics", "Loads the existing topics from the server.").Action(cmd.run)
34 | c.Flag("topic-filter", "An optional regular expression to filter the topics by.").
35 | Short('t').
36 | RegexpVar(&cmd.topicFilter)
37 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
38 | }
39 |
40 | func (c *topics) run(_ *kingpin.ParseContext) error {
41 | manager, ctx, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
42 |
43 | if err != nil {
44 | return err
45 | }
46 |
47 | defer func() {
48 | manager.Close()
49 | cancel()
50 | }()
51 |
52 | topics, err := manager.GetTopics(ctx, c.topicFilter)
53 | if err != nil {
54 | return err
55 | }
56 |
57 | if len(topics) == 0 {
58 | return internal.NotFoundError("topic", "topic", c.topicFilter)
59 | }
60 |
61 | sort.Sort(kafka.TopicsByName(topics))
62 |
63 | switch c.format {
64 | case commands.JSONFormat:
65 | return output.PrintAsJSON(topics, c.style, c.globalParams.EnableColor)
66 | case commands.TableFormat:
67 | return c.printAsTable(topics)
68 | case commands.TreeFormat:
69 | return c.printAsList(topics, false)
70 | case commands.PlainTextFormat:
71 | return c.printAsList(topics, true)
72 | default:
73 | return nil
74 | }
75 | }
76 |
77 | func (c *topics) printAsList(topics []kafka.Topic, plain bool) error {
78 | l := list.New(plain)
79 | for _, topic := range topics {
80 | l.AddItem(topic.Name)
81 | }
82 | l.Render()
83 | return nil
84 | }
85 |
86 | func (c *topics) printAsTable(topics []kafka.Topic) error {
87 | table := tabular.NewTable(c.globalParams.EnableColor,
88 | tabular.C("Topic").Align(tabular.AlignLeft),
89 | tabular.C("Number of Partitions").FAlign(tabular.AlignCenter),
90 | tabular.C("Replication Factor"),
91 | )
92 | table.SetTitle(format.WithCount("Topics", len(topics)))
93 |
94 | var totalPartitions int64
95 | for _, topic := range topics {
96 | totalPartitions += int64(topic.NumberOfPartitions)
97 | table.AddRow(topic.Name, topic.NumberOfPartitions, topic.ReplicationFactor)
98 | }
99 | table.AddFooter(fmt.Sprintf("Total: %s", humanize.Comma(int64(len(topics)))), humanize.Comma(totalPartitions), " ")
100 | table.Render()
101 | return nil
102 | }
103 |
--------------------------------------------------------------------------------
/release/windows/release.wxs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
19 |
20 |
21 |
22 |
25 |
26 |
27 |
28 |
29 |
30 | ((VersionNT > 601) OR (VersionNT = 601 AND ServicePackLevel >= 1))
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
51 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/internal/output/format/tabular/column.go:
--------------------------------------------------------------------------------
1 | package tabular
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 |
7 | "github.com/dustin/go-humanize"
8 | "github.com/jedib0t/go-pretty/table"
9 | "github.com/jedib0t/go-pretty/text"
10 | )
11 |
12 | // Alignment content horizontal alignment.
13 | type Alignment int
14 |
15 | const (
16 | // AlignLeft align the content to the left.
17 | AlignLeft Alignment = iota + 1
18 | // AlignCenter align the content to the centre.
19 | AlignCenter
20 | // AlignRight align the content to the right.
21 | AlignRight
22 | )
23 |
24 | // Column represents a table column.
25 | type Column struct {
26 | Header string
27 | humanize bool
28 | greenOtherwise bool
29 | warningLevel *int64
30 | config table.ColumnConfig
31 | }
32 |
33 | // C creates a new table column.
34 | func C(header string) *Column {
35 | return &Column{
36 | Header: header,
37 | config: table.ColumnConfig{
38 | Name: header,
39 | Align: text.Align(AlignCenter),
40 | AlignHeader: text.Align(AlignCenter),
41 | VAlign: text.VAlignMiddle,
42 | AlignFooter: text.Align(AlignRight),
43 | },
44 | }
45 | }
46 |
47 | func (c *Column) configuration(enableColor bool) table.ColumnConfig {
48 | if c.humanize || c.warningLevel != nil {
49 | transformer := func(val interface{}) string {
50 | switch value := val.(type) {
51 | case int:
52 | return c.renderNumber(enableColor, int64(value))
53 | case int64:
54 | return c.renderNumber(enableColor, value)
55 | }
56 |
57 | return fmt.Sprint(val)
58 | }
59 | c.config.Transformer = transformer
60 | c.config.TransformerFooter = transformer
61 | }
62 | return c.config
63 | }
64 |
65 | // Humanize enables comma separation of the digits for numeric columns.
66 | func (c *Column) Humanize() *Column {
67 | c.humanize = true
68 | return c
69 | }
70 |
71 | // Warn sets the warning level for the numeric columns.
72 | func (c *Column) Warn(level int64, greenOtherwise bool) *Column {
73 | c.warningLevel = &level
74 | c.greenOtherwise = greenOtherwise
75 | return c
76 | }
77 |
78 | // HAlign sets the horizontal alignment of the header.
79 | func (c *Column) HAlign(alignment Alignment) *Column {
80 | c.config.AlignHeader = text.Align(alignment)
81 | return c
82 | }
83 |
84 | // FAlign sets the horizontal alignment of the footer.
85 | func (c *Column) FAlign(alignment Alignment) *Column {
86 | c.config.AlignFooter = text.Align(alignment)
87 | return c
88 | }
89 |
90 | // Align sets the horizontal alignment of the cell content.
91 | func (c *Column) Align(alignment Alignment) *Column {
92 | c.config.Align = text.Align(alignment)
93 | return c
94 | }
95 |
96 | // MinWidth sets the column's minimum width.
97 | func (c *Column) MinWidth(width int) *Column {
98 | c.config.WidthMin = width
99 | return c
100 | }
101 |
102 | // MaxWidth sets the column's maximum width.
103 | func (c *Column) MaxWidth(width int) *Column {
104 | c.config.WidthMax = width
105 | return c
106 | }
107 |
108 | func (c *Column) renderNumber(enableColor bool, value int64) string {
109 | var rendered string
110 | if c.humanize {
111 | rendered = humanize.Comma(value)
112 | } else {
113 | rendered = strconv.FormatInt(value, 10)
114 | }
115 | if enableColor && c.warningLevel != nil {
116 | if value > *c.warningLevel {
117 | return text.Colors{text.FgHiYellow, text.Bold}.Sprint(rendered)
118 | }
119 | if c.greenOtherwise {
120 | return text.Colors{text.FgHiGreen, text.Bold}.Sprint(rendered)
121 | }
122 | }
123 |
124 | return rendered
125 | }
126 |
--------------------------------------------------------------------------------
/internal/output/format/formatters.go:
--------------------------------------------------------------------------------
1 | package format
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/dustin/go-humanize"
8 | "github.com/jedib0t/go-pretty/text"
9 | )
10 |
11 | const stableGroupLabel = "Stable"
12 |
13 | // GreenLabel returns a decorated green label if colours are enabled, otherwise returns "[val]".
14 | func GreenLabel(val interface{}, enableColor bool) interface{} {
15 | if !enableColor {
16 | return fmt.Sprintf("[%v]", val)
17 | }
18 | return text.Colors{text.Bold, text.BgGreen, text.FgWhite}.Sprintf(" %v ", val)
19 | }
20 |
21 | // Warn returns a yellow warning message if colours are enabled.
22 | func Warn(input int64, colorEnabled, greenOtherwise bool) interface{} {
23 | humanised := humanize.Comma(input)
24 | if !colorEnabled {
25 | return humanised
26 | }
27 | if input > 0 {
28 | return text.Colors{text.FgHiYellow, text.Bold}.Sprint(humanised)
29 | }
30 | if greenOtherwise {
31 | return text.Colors{text.FgHiGreen, text.Bold}.Sprint(humanised)
32 | }
33 | return humanised
34 | }
35 |
36 | // GroupStateLabel returns a decorated consumer group state label if colours are enabled.
37 | func GroupStateLabel(state string, enableColor bool) string {
38 | if strings.EqualFold(state, stableGroupLabel) {
39 | return fmt.Sprint(GreenLabel(stableGroupLabel, enableColor))
40 | }
41 | return state
42 | }
43 |
44 | // BoldGreen returns a bold green string if colours are enabled.
45 | func BoldGreen(val interface{}, enableColor bool) interface{} {
46 | return colorIfEnabled(val, enableColor, text.Bold, text.FgHiGreen)
47 | }
48 |
49 | // SpaceIfEmpty returns a single whitespace if the input is an empty string, otherwise returns the input.
50 | func SpaceIfEmpty(in string) string {
51 | if len(in) > 0 {
52 | return in
53 | }
54 | return " "
55 | }
56 |
57 | // Yellow returns the input in yellow if coloring is enabled.
58 | func Yellow(input interface{}, colorEnabled bool) interface{} {
59 | return colorIfEnabled(input, colorEnabled, text.FgHiYellow)
60 | }
61 |
62 | // Red returns the input in red if coloring is enabled.
63 | func Red(input interface{}, colorEnabled bool) interface{} {
64 | return colorIfEnabled(input, colorEnabled, text.FgHiRed)
65 | }
66 |
67 | // RedIfTrue highlights the input in red, if coloring is enabled and the evaluation function returns true.
68 | func RedIfTrue(input interface{}, eval func() bool, colorEnabled bool) interface{} {
69 | return colorIfEnabled(input, colorEnabled && eval(), text.FgHiRed)
70 | }
71 |
72 | // GreenIfTrue highlights the input in green, if coloring is enabled and the evaluation function returns true.
73 | func GreenIfTrue(input interface{}, eval func() bool, colorEnabled bool) interface{} {
74 | return colorIfEnabled(input, colorEnabled && eval(), text.FgHiGreen)
75 | }
76 |
77 | // Underline returns the underlined text.
78 | func Underline(input string) string {
79 | return underlineLen(input, len(input))
80 | }
81 |
82 | // WithCount returns the input in "title [count]" format.
83 | func WithCount(title string, count int) string {
84 | return titleWithCount(title, count)
85 | }
86 |
87 | func titleWithCount(title string, count int) string {
88 | return fmt.Sprintf("%s [%d]", title, count)
89 | }
90 |
91 | func underline(length int) string {
92 | return strings.Repeat("─", length)
93 | }
94 |
95 | func underlineLen(input string, length int) string {
96 | return fmt.Sprintf("%s\n%s", input, underline(length))
97 | }
98 |
99 | func colorIfEnabled(input interface{}, colorEnabled bool, color ...text.Color) interface{} {
100 | if colorEnabled {
101 | return append(text.Colors{}, color...).Sprint(input)
102 | }
103 | return input
104 | }
105 |
--------------------------------------------------------------------------------
/kafka/checkpoint.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "time"
7 |
8 | "github.com/Shopify/sarama"
9 | "github.com/araddon/dateparse"
10 |
11 | "github.com/xitonix/trubka/internal"
12 | )
13 |
14 | type checkpointMode int8
15 |
16 | const (
17 | // Predefined Sarama values (eg. newest, oldest etc)
18 | predefinedMode checkpointMode = iota
19 | explicitMode
20 | localMode
21 | timestampMode
22 | )
23 |
24 | type checkpointPair struct {
25 | from *checkpoint
26 | to *checkpoint
27 | }
28 |
29 | type checkpoint struct {
30 | offset int64
31 | at time.Time
32 | mode checkpointMode
33 | }
34 |
35 | func newPredefinedCheckpoint(rewind bool) *checkpoint {
36 | offset := sarama.OffsetNewest
37 | if rewind {
38 | offset = sarama.OffsetOldest
39 | }
40 | return &checkpoint{
41 | offset: offset,
42 | mode: predefinedMode,
43 | }
44 | }
45 |
46 | func newLocalCheckpoint() *checkpoint {
47 | return &checkpoint{
48 | mode: localMode,
49 | // Fallback offset, in case no offsets are stored locally!
50 | offset: sarama.OffsetNewest,
51 | }
52 | }
53 |
54 | func newExplicitCheckpoint(offset int64) *checkpoint {
55 | if offset < sarama.OffsetOldest {
56 | offset = sarama.OffsetOldest
57 | }
58 | return &checkpoint{
59 | offset: offset,
60 | mode: explicitMode,
61 | }
62 | }
63 |
64 | // newTimeCheckpoint creates a new checkpoint and sets the offset to the milliseconds of the given time and the mode to MillisecondsOffsetMode.
65 | func newTimeCheckpoint(at time.Time) *checkpoint {
66 | var offsetMilliSeconds int64
67 | switch {
68 | case at.IsZero():
69 | offsetMilliSeconds = sarama.OffsetOldest
70 | default:
71 | offsetMilliSeconds = at.UnixNano() / 1000000
72 | }
73 | return &checkpoint{
74 | mode: timestampMode,
75 | offset: offsetMilliSeconds,
76 | at: at,
77 | }
78 | }
79 |
80 | func (c *checkpoint) String() string {
81 | if c.mode == timestampMode {
82 | return internal.FormatTime(c.at)
83 | }
84 | switch c.offset {
85 | case sarama.OffsetNewest:
86 | return "'newest'"
87 | case sarama.OffsetOldest:
88 | return "'oldest'"
89 | default:
90 | return strconv.FormatInt(c.offset, 10)
91 | }
92 | }
93 |
94 | func (c *checkpoint) after(cp *checkpoint) bool {
95 | if c == nil || cp == nil {
96 | return false
97 | }
98 |
99 | if c.mode == timestampMode {
100 | if cp.mode == timestampMode {
101 | return c.at.After(cp.at)
102 | }
103 | return false
104 | }
105 |
106 | return c.offset > cp.offset
107 | }
108 |
109 | func parseCheckpoint(value string, isStopOffset bool) (*checkpoint, error) {
110 | if len(value) == 0 {
111 | return nil, fmt.Errorf("start/stop checkpoint value cannot be empty")
112 | }
113 | t, err := dateparse.ParseAny(value)
114 | if err == nil {
115 | return newTimeCheckpoint(t), nil
116 | }
117 |
118 | switch value {
119 | case "local", "stored":
120 | if isStopOffset {
121 | return nil, invalidStopValue(value)
122 | }
123 | return newLocalCheckpoint(), nil
124 | case "newest", "latest", "end":
125 | if isStopOffset {
126 | return nil, invalidStopValue(value)
127 | }
128 | return newPredefinedCheckpoint(false), nil
129 | case "oldest", "earliest", "beginning", "start":
130 | if isStopOffset {
131 | return nil, invalidStopValue(value)
132 | }
133 | return newPredefinedCheckpoint(true), nil
134 | }
135 |
136 | offset, err := parseInt(value, "offset")
137 | if err != nil {
138 | return nil, err
139 | }
140 |
141 | return newExplicitCheckpoint(offset), nil
142 | }
143 |
144 | func invalidStopValue(value string) error {
145 | return fmt.Errorf("'%s' is not an acceptable stop condition", value)
146 | }
147 |
--------------------------------------------------------------------------------
/kafka/consumer_group_details.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/Shopify/sarama"
7 | )
8 |
9 | // GroupMembers represents a map to hold consumer group members.
10 | type GroupMembers map[string]*GroupMemberDetails
11 |
12 | // ConsumerGroupDetails represents consumer group details.
13 | type ConsumerGroupDetails struct {
14 | // Name group name.
15 | Name string `json:"name"`
16 | // State group state.
17 | State string `json:"state"`
18 | // Protocol group protocol.
19 | Protocol string `json:"protocol"`
20 | // ProtocolType group protocol type.
21 | ProtocolType string `json:"protocol_type"`
22 | // Broker group coordinator.
23 | Coordinator Broker `json:"coordinator"`
24 | // Members consumer group members.
25 | Members GroupMembers `json:"members,omitempty"`
26 | }
27 |
28 | // ToJSON returns an object ready to be serialised into json string.
29 | func (c *ConsumerGroupDetails) ToJSON(includeMembers bool) interface{} {
30 | if c == nil {
31 | return nil
32 | }
33 | type assignment struct {
34 | Topic string `json:"topic"`
35 | Partitions []int32 `json:"partitions"`
36 | }
37 | type member struct {
38 | ID string `json:"id"`
39 | Host string `json:"host"`
40 | Assignments []*assignment `json:"assignments"`
41 | }
42 | type protocol struct {
43 | Name string `json:"name,omitempty"`
44 | Type string `json:"type,omitempty"`
45 | }
46 | output := struct {
47 | Name string `json:"name"`
48 | State string `json:"state,omitempty"`
49 | Protocol *protocol `json:"protocol,omitempty"`
50 | Coordinator *Broker `json:"coordinator,omitempty"`
51 | Members []*member `json:"members,omitempty"`
52 | }{
53 | Name: c.Name,
54 | State: c.State,
55 | Members: []*member{},
56 | }
57 |
58 | if len(c.Coordinator.Host) > 0 {
59 | output.Coordinator = &c.Coordinator
60 | }
61 |
62 | if len(c.Protocol) > 0 {
63 | output.Protocol = &protocol{
64 | Name: c.Protocol,
65 | Type: c.ProtocolType,
66 | }
67 | }
68 |
69 | if includeMembers {
70 | for id, gMember := range c.Members {
71 | m := &member{
72 | ID: id,
73 | Host: gMember.ClientHost,
74 | Assignments: []*assignment{},
75 | }
76 | for topic, partitions := range gMember.Assignments {
77 | m.Assignments = append(m.Assignments, &assignment{
78 | Topic: topic,
79 | Partitions: partitions,
80 | })
81 | }
82 | output.Members = append(output.Members, m)
83 | }
84 | }
85 | return output
86 | }
87 |
88 | // ConsumerGroupDetailsByName sorts a list of consumer group details by group name.
89 | type ConsumerGroupDetailsByName []*ConsumerGroupDetails
90 |
91 | func (c ConsumerGroupDetailsByName) Len() int {
92 | return len(c)
93 | }
94 |
95 | func (c ConsumerGroupDetailsByName) Swap(i, j int) {
96 | c[i], c[j] = c[j], c[i]
97 | }
98 |
99 | func (c ConsumerGroupDetailsByName) Less(i, j int) bool {
100 | return c[i].Name < c[j].Name
101 | }
102 |
103 | // GroupMemberDetails represents consumer group member details.
104 | type GroupMemberDetails struct {
105 | // ClientHost the host name of the group member.
106 | ClientHost string `json:"client_host"`
107 | // Assignments partitions assigned to the group member for each topic.
108 | Assignments TopicPartitions `json:"assignments"`
109 | }
110 |
111 | func fromGroupMemberDescription(md *sarama.GroupMemberDescription) (*GroupMemberDetails, error) {
112 | assignments, err := md.GetMemberAssignment()
113 | if err != nil {
114 | return nil, err
115 | }
116 | return &GroupMemberDetails{
117 | ClientHost: strings.Trim(md.ClientHost, "/"),
118 | Assignments: assignments.Topics,
119 | }, nil
120 | }
121 |
--------------------------------------------------------------------------------
/internal/message_metadata.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "encoding/base64"
5 | "fmt"
6 | "strings"
7 | "time"
8 | )
9 |
10 | const (
11 | topicPrefix = "Topic"
12 | partitionPrefix = "Partition"
13 | offsetPrefix = "Offset"
14 | keyPrefix = "Key"
15 | timePrefix = "Time"
16 | )
17 |
18 | // MessageMetadata represents the message metadata which should be included in the output.
19 | type MessageMetadata struct {
20 | // Partition enables printing partition to the output.
21 | Partition bool
22 | // Offset enables printing offset to the output.
23 | Offset bool
24 | // Key enable printing partition key to the output.
25 | Key bool
26 | // Timestamp enabled printing message timestamp to the output.
27 | Timestamp bool
28 | // Topic enabled printing topic name to the output.
29 | Topic bool
30 |
31 | maxPrefixLength int
32 | }
33 |
34 | // IsRequested returns true if any piece of metadata has been requested by the user to be included in the output.
35 | func (m *MessageMetadata) IsRequested() bool {
36 | return m.Timestamp || m.Key || m.Offset || m.Partition || m.Topic
37 | }
38 |
39 | // Render prepends the requested metadata to the message.
40 | func (m *MessageMetadata) Render(key, message []byte, ts time.Time, topic string, partition int32, offset int64, b64 bool) []byte {
41 | if m.Timestamp {
42 | message = m.prependTimestamp(ts, message)
43 | }
44 |
45 | if m.Key {
46 | message = m.prependKey(key, message, b64)
47 | }
48 |
49 | if m.Offset {
50 | message = m.prependOffset(offset, message)
51 | }
52 |
53 | if m.Partition {
54 | message = m.prependPartition(partition, message)
55 | }
56 |
57 | if m.Topic {
58 | message = m.prependTopic(topic, message)
59 | }
60 |
61 | return message
62 | }
63 |
64 | // SetIndentation sets the indentation for metadata.
65 | func (m *MessageMetadata) SetIndentation() {
66 | // MAKE sure the if clauses are in descending order by prefix length.
67 | if m.Partition {
68 | m.maxPrefixLength = len(partitionPrefix)
69 | return
70 | }
71 |
72 | if m.Offset {
73 | m.maxPrefixLength = len(offsetPrefix)
74 | return
75 | }
76 |
77 | if m.Topic {
78 | m.maxPrefixLength = len(topicPrefix)
79 | return
80 | }
81 |
82 | if m.Timestamp {
83 | m.maxPrefixLength = len(timePrefix)
84 | return
85 | }
86 |
87 | if m.Key {
88 | m.maxPrefixLength = len(keyPrefix)
89 | }
90 | }
91 |
92 | func (m *MessageMetadata) getPrefix(prefix string) string {
93 | indentation := m.maxPrefixLength - len(prefix)
94 | if indentation > 0 {
95 | return fmt.Sprintf("%s%s", strings.Repeat(" ", indentation), prefix)
96 | }
97 | return prefix
98 | }
99 |
100 | func (m *MessageMetadata) prependTimestamp(ts time.Time, in []byte) []byte {
101 | return append([]byte(fmt.Sprintf("%s: %s\n", m.getPrefix(timePrefix), FormatTime(ts))), in...)
102 | }
103 |
104 | func (m *MessageMetadata) prependTopic(topic string, in []byte) []byte {
105 | return append([]byte(fmt.Sprintf("%s: %s\n", m.getPrefix(topicPrefix), topic)), in...)
106 | }
107 |
108 | func (m *MessageMetadata) prependKey(key []byte, in []byte, b64 bool) []byte {
109 | prefix := m.getPrefix(keyPrefix)
110 | if b64 {
111 | return append([]byte(fmt.Sprintf("%s: %s\n", prefix, base64.StdEncoding.EncodeToString(key))), in...)
112 | }
113 | return append([]byte(fmt.Sprintf("%s: %X\n", prefix, key)), in...)
114 | }
115 |
116 | func (m *MessageMetadata) prependOffset(offset int64, in []byte) []byte {
117 | return append([]byte(fmt.Sprintf("%s: %d\n", m.getPrefix(offsetPrefix), offset)), in...)
118 | }
119 |
120 | func (m *MessageMetadata) prependPartition(partition int32, in []byte) []byte {
121 | return append([]byte(fmt.Sprintf("%s: %d\n", m.getPrefix(partitionPrefix), partition)), in...)
122 | }
123 |
--------------------------------------------------------------------------------
/commands/describe/cluster.go:
--------------------------------------------------------------------------------
1 | package describe
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "sort"
7 |
8 | "gopkg.in/alecthomas/kingpin.v2"
9 |
10 | "github.com/xitonix/trubka/commands"
11 | "github.com/xitonix/trubka/internal/output"
12 | "github.com/xitonix/trubka/internal/output/format"
13 | "github.com/xitonix/trubka/internal/output/format/list"
14 | "github.com/xitonix/trubka/internal/output/format/tabular"
15 | "github.com/xitonix/trubka/kafka"
16 | )
17 |
18 | const (
19 | controlNodeFlag = "CTRL"
20 | )
21 |
22 | type cluster struct {
23 | globalParams *commands.GlobalParameters
24 | kafkaParams *commands.KafkaParameters
25 | format string
26 | style string
27 | loadConfigs bool
28 | }
29 |
30 | func addClusterSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
31 | cmd := &cluster{
32 | globalParams: global,
33 | kafkaParams: kafkaParams,
34 | }
35 | c := parent.Command("cluster", "Describes the Kafka cluster.").Action(cmd.run)
36 | c.Flag("load-config", "Loads the cluster's configurations from the server.").
37 | NoEnvar().
38 | Short('c').
39 | BoolVar(&cmd.loadConfigs)
40 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
41 | }
42 |
43 | func (c *cluster) run(_ *kingpin.ParseContext) error {
44 | manager, ctx, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
45 |
46 | if err != nil {
47 | return err
48 | }
49 |
50 | defer func() {
51 | manager.Close()
52 | cancel()
53 | }()
54 |
55 | meta, err := manager.DescribeCluster(ctx, c.loadConfigs)
56 | if err != nil {
57 | return fmt.Errorf("failed to list the brokers: %w", err)
58 | }
59 |
60 | if len(meta.Brokers) == 0 {
61 | return errors.New("no brokers found")
62 | }
63 |
64 | sort.Sort(kafka.BrokersByID(meta.Brokers))
65 |
66 | switch c.format {
67 | case commands.JSONFormat:
68 | return output.PrintAsJSON(meta, c.style, c.globalParams.EnableColor)
69 | case commands.TableFormat:
70 | return c.printAsTable(meta)
71 | case commands.TreeFormat:
72 | return c.printAsList(meta, false)
73 | case commands.PlainTextFormat:
74 | return c.printAsList(meta, true)
75 | default:
76 | return nil
77 | }
78 | }
79 |
80 | func (c *cluster) printAsTable(meta *kafka.ClusterMetadata) error {
81 | table := tabular.NewTable(c.globalParams.EnableColor,
82 | tabular.C("ID").Align(tabular.AlignLeft),
83 | tabular.C("Address").Align(tabular.AlignLeft),
84 | )
85 | table.SetTitle(format.WithCount("Brokers", len(meta.Brokers)))
86 | for _, broker := range meta.Brokers {
87 | if broker.IsController {
88 | host := fmt.Sprintf("%v < %v",
89 | format.BoldGreen(broker.Host, c.globalParams.EnableColor),
90 | format.GreenLabel(controlNodeFlag, c.globalParams.EnableColor),
91 | )
92 | table.AddRow(format.BoldGreen(broker.ID, c.globalParams.EnableColor), host)
93 | continue
94 | }
95 | table.AddRow(broker.ID, broker.Host)
96 | }
97 | table.AddFooter("", fmt.Sprintf("Total: %d", len(meta.Brokers)))
98 | output.NewLines(1)
99 | table.Render()
100 |
101 | if len(meta.ConfigEntries) > 0 {
102 | output.NewLines(2)
103 | commands.PrintConfigTable(meta.ConfigEntries)
104 | }
105 |
106 | return nil
107 | }
108 |
109 | func (c *cluster) printAsList(meta *kafka.ClusterMetadata, plain bool) error {
110 | l := list.New(plain)
111 | if len(meta.Brokers) > 0 {
112 | l.AddItem("Brokers")
113 | l.Indent()
114 | for _, broker := range meta.Brokers {
115 | host := broker.String()
116 | if broker.IsController {
117 | host = fmt.Sprintf("%s %v", host, format.GreenLabel(controlNodeFlag, c.globalParams.EnableColor && !plain))
118 | }
119 | l.AddItem(host)
120 | }
121 | l.UnIndent()
122 | }
123 |
124 | if len(meta.ConfigEntries) > 0 {
125 | commands.PrintConfigList(l, meta.ConfigEntries, plain)
126 | }
127 | l.Render()
128 | return nil
129 | }
130 |
--------------------------------------------------------------------------------
/release_notes.md:
--------------------------------------------------------------------------------
1 | ## Release Notes
2 |
3 | ### v3.3.1 (WIP)
4 |
5 | - Fixed missing quote in boolean parsing logic ([PR](https://github.com/xitonix/trubka/pull/22))
6 |
7 | ### v3.3.0
8 |
9 | - Fixed build pipeline
10 | - Upgraded runtime to Go 1.24
11 | - JSON compact is set as the default encoder
12 |
13 | ### v3.2.1
14 |
15 | **[New Features]**
16 |
17 | - Multi-platform packaging.
18 |
19 | **[Fixes]**
20 | - Print the parsed content instead of the raw template when publishing plain text with `-gv` flags.
21 |
22 | ### v3.2.0
23 |
24 | **[New Features]**
25 |
26 | - `produce plain` command now supports data templates to push randomly generated messages to Kafka.
27 | - Added `--sleep=Duration` parameter to `produce` commands so that we can put a gap between publish operations.
28 | - `produce` commands now support `--count=0` to allow publishing to Kafka indefinitely.
29 |
30 | ### v3.1.2
31 |
32 | **[New Features]**
33 | - The process of loading proto files from disk respects logging verbosity levels.
34 | - The offset of the consumed message can be optionally included in the output.
35 | - Different time-based starting offsets can be defined for different partitions.
36 | - Predefined starting offsets (eg. `oldest`, `newest`, `local`, etc) can be defined for individual partitions.
37 | - The following new flags have been added to `consume` commands:
38 | - `--to`: To define a stop offset/timestamp.
39 | - `--idle-timeout`: The amount of time the consumer will wait for a message to arrive before stop consuming from a partition.
40 | - `--exclusive`: Only explicitly defined partitions (Partition#Offset|Timestamp) will be consumed. The rest will be excluded.
41 |
42 | **[Changes]**
43 | - `-U` (for SASL username) and `-P` (for SASL password) short flags have been removed.
44 | - `Partition` and `Key` metadata will be printed to the output as separate lines for non-json formats.
45 | - `UTC` suffix has been replaced with timezone offsets.
46 | - `--from` is now a repeatable flag instead of a single comma separated string.
47 | - Partition-Offset delimiter has been changed to `#` for `--from` and `--to` values.
48 | - Wildcard offset definition syntax (`:Offset`) has been replaced with `--exclusive` flag to reduce the complexity.
49 | - User will not be asked to provide `topic` (or proto `schema`) in the interactive mode, if it's already been provided via command arguments.
50 |
51 | **[Fixes]**
52 | - Loading proto files from disk respects termination signals received from the OS (Ctrl + C).
53 |
54 | ---
55 | ### v3.1.1
56 |
57 | **[New Features]**
58 | - `tree` output format added to administrative commands.
59 |
60 | **[Changes]**
61 | - Administrative Commands
62 | - `list` output format has been replaced with `tree`.
63 | - Colours are disabled by default for Json output (`--format json`)
64 | - Removed clutter from `plain` output.
65 | - Consume commands (plain/proto)
66 | - Timestamp (`-S, --include-timestamp`), partition key (`-K, --include-partition-key`) and topic name (`-T, --include-topic-name`) are injected into the Json output when consuming in `json` and `json-indent` modes.
67 |
68 | **[Fixes]**
69 |
70 | ---
71 |
72 | ### v3.1.0
73 |
74 | **[New Features]**
75 |
76 | - New list and json output formats for administrative commands.
77 | - `--style` support for json output.
78 | - Both `no-color` and `no-colour` flags have the same effect.
79 |
80 | **[Changes]**
81 | - Removed all the decorations from `--format=plain`.
82 |
83 | **[Fixes]**
84 |
85 | ---
86 |
87 | ### v3.0.3
88 |
89 | **[New Features]**
90 | - Tabular and plain text output format
91 |
92 | **[Changes]**
93 |
94 |
95 | **[Fixes]**
96 |
97 | ---
98 |
99 | ### v3.0.2
100 |
101 | **[New Features]**
102 |
103 |
104 | **[Changes]**
105 |
106 |
107 | **[Fixes]**
108 | - Random partition key generation logic for the producers.
109 |
--------------------------------------------------------------------------------
/commands/list/local_offsets.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/dustin/go-humanize"
8 | "gopkg.in/alecthomas/kingpin.v2"
9 |
10 | "github.com/xitonix/trubka/commands"
11 | "github.com/xitonix/trubka/internal"
12 | "github.com/xitonix/trubka/internal/output"
13 | "github.com/xitonix/trubka/internal/output/format"
14 | "github.com/xitonix/trubka/internal/output/format/list"
15 | "github.com/xitonix/trubka/internal/output/format/tabular"
16 | "github.com/xitonix/trubka/kafka"
17 | )
18 |
19 | type listLocalOffsets struct {
20 | globalParams *commands.GlobalParameters
21 | kafkaParams *commands.KafkaParameters
22 | topic string
23 | environment string
24 | format string
25 | style string
26 | }
27 |
28 | func addLocalOffsetsSubCommand(parent *kingpin.CmdClause, params *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
29 | cmd := &listLocalOffsets{
30 | globalParams: params,
31 | kafkaParams: kafkaParams,
32 | }
33 | c := parent.Command("local-offsets", "Lists the locally stored offsets of the given topic and environment.").Action(cmd.run)
34 | c.Arg("topic", "The topic to loads the local offsets of.").Required().StringVar(&cmd.topic)
35 | c.Arg("environment", "The environment to load the topic offset from.").Required().StringVar(&cmd.environment)
36 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
37 | }
38 |
39 | func (l *listLocalOffsets) run(_ *kingpin.ParseContext) error {
40 | offsetManager := kafka.NewLocalOffsetManager(internal.NewPrinter(l.globalParams.Verbosity, os.Stdout))
41 | localOffsets, err := offsetManager.ReadTopicOffsets(l.topic, l.environment)
42 | if err != nil {
43 | return err
44 | }
45 | if len(localOffsets) == 0 {
46 | return fmt.Errorf("no offset has been stored locally for %s topic in %s", l.topic, l.environment)
47 | }
48 |
49 | manager, ctx, cancel, err := commands.InitKafkaManager(l.globalParams, l.kafkaParams)
50 |
51 | if err != nil {
52 | return err
53 | }
54 |
55 | defer func() {
56 | manager.Close()
57 | cancel()
58 | }()
59 |
60 | offsets, err := manager.GetTopicOffsets(ctx, l.topic, localOffsets)
61 | if err != nil {
62 | return err
63 | }
64 |
65 | switch l.format {
66 | case commands.JSONFormat:
67 | return output.PrintAsJSON(offsets.ToJSON(), l.style, l.globalParams.EnableColor)
68 | case commands.TableFormat:
69 | return l.printAsTable(offsets)
70 | case commands.TreeFormat:
71 | return l.printAsList(offsets, false)
72 | case commands.PlainTextFormat:
73 | return l.printAsList(offsets, true)
74 | default:
75 | return nil
76 | }
77 | }
78 |
79 | func (l *listLocalOffsets) printAsTable(offsets kafka.PartitionOffset) error {
80 | sortedPartitions := offsets.SortPartitions()
81 | table := tabular.NewTable(l.globalParams.EnableColor,
82 | tabular.C("Partition"),
83 | tabular.C("Latest").MinWidth(10),
84 | tabular.C("Current").MinWidth(10),
85 | tabular.C("Lag").MinWidth(10).Humanize().Warn(0, true).FAlign(tabular.AlignCenter),
86 | )
87 | table.SetTitle(format.WithCount("Partitions", len(sortedPartitions)))
88 | var totalLag int64
89 | for _, partition := range sortedPartitions {
90 | offsets := offsets[int32(partition)]
91 | lag := offsets.Lag()
92 | totalLag += lag
93 | latest := humanize.Comma(offsets.Latest)
94 | current := humanize.Comma(offsets.Current)
95 | table.AddRow(partition, latest, current, lag)
96 | }
97 | table.AddFooter(" ", " ", " ", totalLag)
98 | table.Render()
99 | return nil
100 | }
101 |
102 | func (l *listLocalOffsets) printAsList(offsets kafka.PartitionOffset, plain bool) error {
103 | partitions := offsets.SortPartitions()
104 | var totalLag int64
105 | ls := list.New(plain)
106 | for _, partition := range partitions {
107 | offsets := offsets[int32(partition)]
108 | lag := offsets.Lag()
109 | totalLag += lag
110 | ls.AddItemF("P%d", partition)
111 | ls.Indent()
112 | ls.AddItemF(" Latest: %s", humanize.Comma(offsets.Latest))
113 | ls.AddItemF("Current: %s", humanize.Comma(offsets.Current))
114 | ls.AddItemF(" Lag: %v", format.Warn(lag, l.globalParams.EnableColor && !plain, true))
115 | ls.UnIndent()
116 | }
117 | ls.Render()
118 | return nil
119 | }
120 |
--------------------------------------------------------------------------------
/commands/list/groups.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "sort"
7 |
8 | "github.com/dustin/go-humanize"
9 | "gopkg.in/alecthomas/kingpin.v2"
10 |
11 | "github.com/xitonix/trubka/commands"
12 | "github.com/xitonix/trubka/internal"
13 | "github.com/xitonix/trubka/internal/output"
14 | "github.com/xitonix/trubka/internal/output/format"
15 | "github.com/xitonix/trubka/internal/output/format/list"
16 | "github.com/xitonix/trubka/internal/output/format/tabular"
17 | "github.com/xitonix/trubka/kafka"
18 | )
19 |
20 | type groups struct {
21 | kafkaParams *commands.KafkaParameters
22 | globalParams *commands.GlobalParameters
23 | groupFilter *regexp.Regexp
24 | includeState bool
25 | format string
26 | style string
27 | }
28 |
29 | func addGroupsSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
30 | cmd := &groups{
31 | kafkaParams: kafkaParams,
32 | globalParams: global,
33 | }
34 | c := parent.Command("groups", "Loads the consumer groups from the server.").Action(cmd.run)
35 | c.Flag("group-filter", "An optional regular expression to filter the groups by.").
36 | Short('g').
37 | RegexpVar(&cmd.groupFilter)
38 |
39 | c.Flag("include-states", "Include consumer groups' state information.").
40 | Short('s').
41 | BoolVar(&cmd.includeState)
42 |
43 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
44 | }
45 |
46 | func (c *groups) run(_ *kingpin.ParseContext) error {
47 | manager, ctx, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
48 |
49 | if err != nil {
50 | return err
51 | }
52 |
53 | defer func() {
54 | manager.Close()
55 | cancel()
56 | }()
57 |
58 | groups, err := manager.GetGroups(ctx, c.groupFilter, c.includeState)
59 | if err != nil {
60 | return err
61 | }
62 |
63 | if len(groups) == 0 {
64 | return internal.NotFoundError("consumer group", "group", c.groupFilter)
65 | }
66 |
67 | sort.Sort(kafka.ConsumerGroupDetailsByName(groups))
68 |
69 | switch c.format {
70 | case commands.JSONFormat:
71 | data := make([]interface{}, len(groups))
72 | for i, g := range groups {
73 | data[i] = g.ToJSON(false)
74 | }
75 | return output.PrintAsJSON(data, c.style, c.globalParams.EnableColor)
76 | case commands.TableFormat:
77 | return c.printAsTable(groups)
78 | case commands.TreeFormat:
79 | return c.printAsList(groups, false)
80 | case commands.PlainTextFormat:
81 | return c.printAsList(groups, true)
82 | default:
83 | return nil
84 | }
85 | }
86 |
87 | func (c *groups) printAsList(groups []*kafka.ConsumerGroupDetails, plain bool) error {
88 | l := list.New(plain)
89 | for _, group := range groups {
90 | if c.includeState {
91 | l.AddItem(group.Name)
92 | l.Indent()
93 | l.AddItemF("State: %s", format.GroupStateLabel(group.State, c.globalParams.EnableColor && !plain))
94 | l.AddItemF("Protocol: %s/%s", group.Protocol, group.ProtocolType)
95 | l.AddItemF("Coordinator: %s", group.Coordinator.String())
96 | l.UnIndent()
97 | } else {
98 | l.AddItem(group.Name)
99 | }
100 | }
101 | l.Render()
102 | return nil
103 | }
104 |
105 | func (c *groups) printAsTable(groups []*kafka.ConsumerGroupDetails) error {
106 | var table *tabular.Table
107 | if c.includeState {
108 | table = tabular.NewTable(c.globalParams.EnableColor,
109 | tabular.C("Name").Align(tabular.AlignLeft),
110 | tabular.C("State"),
111 | tabular.C("Protocol"),
112 | tabular.C("Protocol Type"),
113 | tabular.C("Coordinator"),
114 | )
115 | } else {
116 | table = tabular.NewTable(c.globalParams.EnableColor, tabular.C("Consumer Group").Align(tabular.AlignLeft))
117 | }
118 |
119 | for _, group := range groups {
120 | if c.includeState {
121 | table.AddRow(group.Name,
122 | format.GroupStateLabel(group.State, c.globalParams.EnableColor),
123 | group.Protocol,
124 | group.ProtocolType,
125 | group.Coordinator.Host)
126 | } else {
127 | table.AddRow(group.Name)
128 | }
129 | }
130 | if c.includeState {
131 | table.AddFooter(fmt.Sprintf("Total: %s", humanize.Comma(int64(len(groups)))), " ", " ", " ", " ")
132 | } else {
133 | table.AddFooter(fmt.Sprintf("Total: %s", humanize.Comma(int64(len(groups)))))
134 | }
135 | table.Render()
136 | return nil
137 | }
138 |
--------------------------------------------------------------------------------
/protobuf/loader.go:
--------------------------------------------------------------------------------
1 | package protobuf
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "regexp"
7 |
8 | "github.com/jhump/protoreflect/desc"
9 | "github.com/jhump/protoreflect/desc/protoparse"
10 | "github.com/jhump/protoreflect/dynamic"
11 |
12 | "github.com/xitonix/trubka/internal"
13 | )
14 |
15 | // Loader the interface to load and list the protocol buffer message types.
16 | type Loader interface {
17 | Load(ctx context.Context, messageName string) error
18 | Get(messageName string) (*dynamic.Message, error)
19 | List(filter *regexp.Regexp) ([]string, error)
20 | }
21 |
22 | // FileLoader is an implementation of Loader interface to load the proto files from the disk.
23 | type FileLoader struct {
24 | files []*desc.FileDescriptor
25 | cache map[string]*desc.MessageDescriptor
26 | factory *dynamic.MessageFactory
27 | root string
28 | }
29 |
30 | // LoadFiles creates a new instance of local file loader.
31 | func LoadFiles(ctx context.Context, verbosity internal.VerbosityLevel, root string) (*FileLoader, error) {
32 | finder, err := newFileFinder(verbosity, root)
33 | if err != nil {
34 | return nil, err
35 | }
36 |
37 | files, err := finder.ls(ctx)
38 | if err != nil {
39 | return nil, fmt.Errorf("failed to load the proto files: %w", err)
40 | }
41 |
42 | importPaths, err := finder.dirs(ctx)
43 | if err != nil {
44 | return nil, fmt.Errorf("failed to load the import paths: %w", err)
45 | }
46 |
47 | if len(files) == 0 {
48 | return nil, fmt.Errorf("no protocol buffer (*.proto) files found in %s", root)
49 | }
50 | resolved, err := protoparse.ResolveFilenames(importPaths, files...)
51 | if err != nil {
52 | return nil, fmt.Errorf("failed to resolve the protocol buffer (*.proto) files: %w", err)
53 | }
54 |
55 | parser := protoparse.Parser{
56 | ImportPaths: importPaths,
57 | IncludeSourceCodeInfo: true,
58 | }
59 |
60 | fileDescriptors, err := parser.ParseFiles(resolved...)
61 | if err != nil {
62 | return nil, fmt.Errorf("failed to parse the protocol buffer (*.proto) files: %w", err)
63 | }
64 |
65 | er := &dynamic.ExtensionRegistry{}
66 | for _, fd := range fileDescriptors {
67 | er.AddExtensionsFromFile(fd)
68 | }
69 |
70 | return &FileLoader{
71 | files: fileDescriptors,
72 | cache: make(map[string]*desc.MessageDescriptor),
73 | factory: dynamic.NewMessageFactoryWithExtensionRegistry(er),
74 | root: root,
75 | }, nil
76 | }
77 |
78 | // Load loads the specified message type into the local cache.
79 | //
80 | // The input parameter must be the fully qualified name of the message type.
81 | // The method will return an error if the specified message type does not exist in the path.
82 | //
83 | // Calling load is not thread safe.
84 | func (f *FileLoader) Load(ctx context.Context, messageName string) error {
85 | _, ok := f.cache[messageName]
86 | if ok {
87 | return nil
88 | }
89 | for _, fd := range f.files {
90 | select {
91 | case <-ctx.Done():
92 | return ctx.Err()
93 | default:
94 | md := fd.FindMessage(messageName)
95 | if md != nil {
96 | f.cache[messageName] = md
97 | return nil
98 | }
99 | }
100 | }
101 | return fmt.Errorf("%s has not been found in %s", messageName, f.root)
102 | }
103 |
104 | // Get creates a new instance of the specified protocol buffer message.
105 | //
106 | // The input parameter must be the fully qualified name of the message type.
107 | // The method will return an error if the specified message type does not exist in the path.
108 | func (f *FileLoader) Get(messageName string) (*dynamic.Message, error) {
109 | if md, ok := f.cache[messageName]; ok {
110 | return f.factory.NewDynamicMessage(md), nil
111 | }
112 | return nil, fmt.Errorf("%s has not been found in %s. Make sure you Load the message first", messageName, f.root)
113 | }
114 |
115 | // List returns a list of all the protocol buffer messages exist in the path.
116 | func (f *FileLoader) List(search *regexp.Regexp) ([]string, error) {
117 | result := make([]string, 0)
118 | for _, fd := range f.files {
119 | messages := fd.GetMessageTypes()
120 | for _, msg := range messages {
121 | name := msg.GetFullyQualifiedName()
122 | if search == nil {
123 | result = append(result, name)
124 | continue
125 | }
126 | if search.Match([]byte(name)) {
127 | result = append(result, name)
128 | }
129 | }
130 | }
131 | return result, nil
132 | }
133 |
--------------------------------------------------------------------------------
/commands/list/group_offsets.go:
--------------------------------------------------------------------------------
1 | package list
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "strconv"
7 |
8 | "github.com/dustin/go-humanize"
9 | "gopkg.in/alecthomas/kingpin.v2"
10 |
11 | "github.com/xitonix/trubka/commands"
12 | "github.com/xitonix/trubka/internal"
13 | "github.com/xitonix/trubka/internal/output"
14 | "github.com/xitonix/trubka/internal/output/format"
15 | "github.com/xitonix/trubka/internal/output/format/list"
16 | "github.com/xitonix/trubka/internal/output/format/tabular"
17 | "github.com/xitonix/trubka/kafka"
18 | )
19 |
20 | type groupOffset struct {
21 | kafkaParams *commands.KafkaParameters
22 | globalParams *commands.GlobalParameters
23 | group string
24 | topicFilter *regexp.Regexp
25 | format string
26 | style string
27 | }
28 |
29 | func addGroupOffsetsSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
30 | cmd := &groupOffset{
31 | kafkaParams: kafkaParams,
32 | globalParams: global,
33 | }
34 | c := parent.Command("group-offsets", "Lists a consumer group's offsets for all the topics within the group.").Action(cmd.run)
35 | c.Arg("group", "The consumer group name to fetch the offsets for.").Required().StringVar(&cmd.group)
36 | c.Flag("topic-filter", "An optional regular expression to filter the topics by.").
37 | Short('t').
38 | RegexpVar(&cmd.topicFilter)
39 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
40 | }
41 |
42 | func (g *groupOffset) run(_ *kingpin.ParseContext) error {
43 | manager, ctx, cancel, err := commands.InitKafkaManager(g.globalParams, g.kafkaParams)
44 |
45 | if err != nil {
46 | return err
47 | }
48 |
49 | defer func() {
50 | manager.Close()
51 | cancel()
52 | }()
53 |
54 | topics, err := manager.GetGroupOffsets(ctx, g.group, g.topicFilter)
55 | if err != nil {
56 | return err
57 | }
58 |
59 | if len(topics) == 0 {
60 | return internal.NotFoundError("topic", "topic", g.topicFilter)
61 | }
62 |
63 | switch g.format {
64 | case commands.JSONFormat:
65 | return output.PrintAsJSON(topics.ToJSON(), g.style, g.globalParams.EnableColor)
66 | case commands.TableFormat:
67 | return g.printAsTable(topics)
68 | case commands.TreeFormat:
69 | return g.printAsList(topics, false)
70 | case commands.PlainTextFormat:
71 | return g.printAsList(topics, true)
72 | default:
73 | return nil
74 | }
75 | }
76 |
77 | func (g *groupOffset) printAsTable(topics kafka.TopicPartitionOffset) error {
78 | for topic, partitionOffsets := range topics {
79 | table := tabular.NewTable(g.globalParams.EnableColor,
80 | tabular.C("Partition").MinWidth(10),
81 | tabular.C("Latest").MinWidth(10).Align(tabular.AlignCenter),
82 | tabular.C("Current").MinWidth(10).Align(tabular.AlignCenter),
83 | tabular.C("Lag").MinWidth(10).Humanize().FAlign(tabular.AlignCenter).Warn(0, true),
84 | )
85 |
86 | table.SetTitle(fmt.Sprintf("Topic: %s", topic))
87 | if len(partitionOffsets) > 0 {
88 | partitions := partitionOffsets.SortPartitions()
89 | var totalLag int64
90 | for _, partition := range partitions {
91 | offsets := partitionOffsets[int32(partition)]
92 | lag := offsets.Lag()
93 | totalLag += lag
94 | latest := humanize.Comma(offsets.Latest)
95 | current := humanize.Comma(offsets.Current)
96 | part := strconv.FormatInt(int64(partition), 10)
97 | table.AddRow(part, latest, current, lag)
98 | }
99 | table.AddFooter(" ", " ", " ", totalLag)
100 | table.Render()
101 | }
102 | }
103 | return nil
104 | }
105 |
106 | func (g *groupOffset) printAsList(topics kafka.TopicPartitionOffset, plain bool) error {
107 | l := list.New(plain)
108 | if !plain {
109 | l.AddItem(g.group)
110 | l.Indent()
111 | }
112 | for topic, partitionOffsets := range topics {
113 | l.AddItem(topic)
114 | var totalLag int64
115 | if len(partitionOffsets) > 0 {
116 | partitions := partitionOffsets.SortPartitions()
117 | l.Indent()
118 | for _, partition := range partitions {
119 | offsets := partitionOffsets[int32(partition)]
120 | lag := offsets.Lag()
121 | totalLag += lag
122 | l.AddItemF("P%d", partition)
123 | l.Indent()
124 | l.AddItemF(" Latest: %s", humanize.Comma(offsets.Latest))
125 | l.AddItemF("Current: %s", humanize.Comma(offsets.Current))
126 | l.AddItemF(" Lag: %v", format.Warn(lag, g.globalParams.EnableColor && !plain, true))
127 | l.UnIndent()
128 | }
129 | l.UnIndent()
130 | }
131 | }
132 | l.UnIndent()
133 | l.Render()
134 | return nil
135 | }
136 |
--------------------------------------------------------------------------------
/kafka/client_mock.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "errors"
5 | "sync"
6 | "time"
7 |
8 | "github.com/Shopify/sarama"
9 | "github.com/araddon/dateparse"
10 | )
11 |
12 | var (
13 | errDeliberate = errors.New("asked by user")
14 | )
15 |
16 | const (
17 | _endOfStream = int64(100)
18 | )
19 |
20 | type clientMock struct {
21 | mux sync.Mutex
22 | counter int
23 | partitionConsumers map[string]map[int32]*partitionConsumerMock
24 | topics []string
25 | partitions []int32
26 | topicNotFound bool
27 | forceTopicsQueryFailure bool
28 | forcePartitionsQueryFailure bool
29 | forceOffsetQueryFailure bool
30 | ready chan interface{}
31 | availableOffsets map[int32]map[int64]int64
32 | numberOfActivePartitions int
33 | }
34 |
35 | func newClientMock(
36 | topics []string,
37 | numberOfPartitions int,
38 | numberOfActivePartitions int,
39 | topicNotFound bool,
40 | forceTopicListFailure bool,
41 | forcePartitionsQueryFailure bool,
42 | forceOffsetQueryFailure bool) *clientMock {
43 | available := make(map[int32]map[int64]int64)
44 | partitions := make([]int32, numberOfPartitions)
45 | for i := 0; i < numberOfPartitions; i++ {
46 | partitions[i] = int32(i)
47 | available[int32(i)] = map[int64]int64{
48 | sarama.OffsetNewest: _endOfStream,
49 | sarama.OffsetOldest: 0,
50 | }
51 | }
52 | if numberOfActivePartitions == 0 {
53 | numberOfActivePartitions = numberOfPartitions
54 | }
55 | cm := &clientMock{
56 | topics: topics,
57 | partitionConsumers: make(map[string]map[int32]*partitionConsumerMock),
58 | partitions: partitions,
59 | topicNotFound: topicNotFound,
60 | forceTopicsQueryFailure: forceTopicListFailure,
61 | forcePartitionsQueryFailure: forcePartitionsQueryFailure,
62 | forceOffsetQueryFailure: forceOffsetQueryFailure,
63 | ready: make(chan interface{}),
64 | availableOffsets: available,
65 | numberOfActivePartitions: numberOfActivePartitions,
66 | }
67 |
68 | for _, topic := range topics {
69 | cm.partitionConsumers[topic] = make(map[int32]*partitionConsumerMock)
70 | for _, partition := range partitions {
71 | cm.partitionConsumers[topic][partition] = newPartitionConsumerMock(
72 | topic,
73 | partition,
74 | sarama.OffsetNewest)
75 | }
76 | }
77 | return cm
78 | }
79 |
80 | func (c *clientMock) Partitions(_ string) ([]int32, error) {
81 | if c.forcePartitionsQueryFailure {
82 | return nil, errDeliberate
83 | }
84 | return c.partitions, nil
85 | }
86 |
87 | func (c *clientMock) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
88 | c.mux.Lock()
89 | defer c.mux.Unlock()
90 | if offset == sarama.OffsetNewest {
91 | offset = _endOfStream
92 | }
93 | if offset == sarama.OffsetOldest {
94 | offset = 0
95 | }
96 | c.partitionConsumers[topic][partition] = newPartitionConsumerMock(topic, partition, offset)
97 | c.counter++
98 | if c.counter == c.numberOfActivePartitions {
99 | close(c.ready)
100 | }
101 |
102 | return c.partitionConsumers[topic][partition], nil
103 | }
104 |
105 | func (c *clientMock) Topics() ([]string, error) {
106 | if c.forceTopicsQueryFailure {
107 | return nil, errDeliberate
108 | }
109 | if c.topicNotFound {
110 | // Simulating the behaviour when the topic was not found on the server
111 | return []string{}, nil
112 | }
113 | return c.topics, nil
114 | }
115 |
116 | func (c *clientMock) GetOffset(_ string, partition int32, offset int64) (int64, error) {
117 | if c.forceOffsetQueryFailure {
118 | return unknownOffset, errDeliberate
119 | }
120 | return c.availableOffsets[partition][offset], nil
121 | }
122 |
123 | func (c *clientMock) Close() error {
124 | return nil
125 | }
126 |
127 | func (c *clientMock) receive(topic string, partition int32, at string) {
128 | t, _ := dateparse.ParseAny(at)
129 | c.partitionConsumers[topic][partition].receive(t)
130 | }
131 |
132 | func (c *clientMock) setAvailableOffset(partition int32, at interface{}, offset int64) {
133 | switch value := at.(type) {
134 | case int64:
135 | c.availableOffsets[partition][value] = offset
136 | case time.Time:
137 | c.availableOffsets[partition][value.UnixNano()/1000000] = offset
138 | case string:
139 | t, _ := dateparse.ParseAny(value)
140 | c.availableOffsets[partition][t.UnixNano()/1000000] = offset
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/kafka/partition_checkpoints.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "strings"
7 | )
8 |
9 | const (
10 | allPartitions int32 = -1
11 | invalidPartition int32 = -2
12 | )
13 |
14 | // PartitionCheckpoints holds a list of explicitly requested offsets (if applicable).
15 | //
16 | // If no partition offset has been explicitly asked by the user, the global checkpoint will be stored as
17 | // the only value in the map under `allPartitions` key. The global checkpoint is applicable to all the partitions.
18 | type PartitionCheckpoints struct {
19 | partitionCheckpoints map[int32]*checkpointPair
20 | exclusive bool
21 | from string
22 | to string
23 | }
24 |
25 | // NewPartitionCheckpoints creates a new instance of partition checkpoints.
26 | func NewPartitionCheckpoints(from, to []string, exclusive bool) (*PartitionCheckpoints, error) {
27 | var (
28 | checkpoints = map[int32]*checkpointPair{
29 | allPartitions: {
30 | from: newPredefinedCheckpoint(false),
31 | },
32 | }
33 | )
34 |
35 | for _, raw := range from {
36 | cp, partition, err := parse(raw, false)
37 | if err != nil {
38 | return nil, err
39 | }
40 | checkpoints[partition] = &checkpointPair{
41 | from: cp,
42 | }
43 | }
44 |
45 | for _, raw := range to {
46 | cp, partition, err := parse(raw, true)
47 | if err != nil {
48 | return nil, err
49 | }
50 |
51 | if _, ok := checkpoints[partition]; !ok {
52 | checkpoints[partition] = &checkpointPair{
53 | from: checkpoints[allPartitions].from,
54 | to: cp,
55 | }
56 | }
57 |
58 | if partition == allPartitions {
59 | for partition, pair := range checkpoints {
60 | if partition == allPartitions || pair.to == nil {
61 | pair.to = cp
62 | }
63 | }
64 | continue
65 | }
66 | checkpoints[partition].to = cp
67 | }
68 |
69 | for _, cp := range checkpoints {
70 | if cp.from.after(cp.to) {
71 | return nil, fmt.Errorf("start offset '%v' must be before stop offset '%v'", cp.from.String(), cp.to.String())
72 | }
73 | }
74 |
75 | return &PartitionCheckpoints{
76 | partitionCheckpoints: checkpoints,
77 | exclusive: exclusive,
78 | from: strings.Join(from, ","),
79 | to: strings.Join(to, ","),
80 | }, nil
81 | }
82 |
83 | // From returns the comma separated string of all the start checkpoints.
84 | func (p *PartitionCheckpoints) From() string {
85 | return p.from
86 | }
87 |
88 | // To returns the comma separated string of all the stop checkpoints.
89 | func (p *PartitionCheckpoints) To() string {
90 | return p.to
91 | }
92 |
93 | // get returns the checkpoint for the specified partition.
94 | //
95 | // In `exclusive` mode, if the partition checkpoint has not explicitly defined by the user (using # syntax) this function returns `nil`.
96 | func (p *PartitionCheckpoints) get(partition int32) *checkpointPair {
97 | if pair, ok := p.partitionCheckpoints[partition]; ok {
98 | return pair
99 | }
100 |
101 | // We are in exclusive mode and there are explicitly defined checkpoints in the map.
102 | if p.exclusive && len(p.partitionCheckpoints) > 1 {
103 | // User explicitly asked for other partitions, but not this one.
104 | // We don't want to start consuming from this partition if it has not been asked by the user.
105 | return nil
106 | }
107 |
108 | return p.partitionCheckpoints[allPartitions]
109 | }
110 |
111 | func parse(raw string, isStopOffset bool) (*checkpoint, int32, error) {
112 | parts := strings.Split(raw, "#")
113 | switch len(parts) {
114 | case 1:
115 | cp, err := parseCheckpoint(raw, isStopOffset)
116 | if err != nil {
117 | return nil, invalidPartition, err
118 | }
119 | return cp, allPartitions, nil
120 | case 2:
121 | partition, err := parseInt(strings.TrimSpace(parts[0]), "partition")
122 | if err != nil {
123 | return nil, invalidPartition, err
124 | }
125 |
126 | offset := strings.TrimSpace(parts[1])
127 | cp, err := parseCheckpoint(offset, isStopOffset)
128 | if err != nil {
129 | return nil, invalidPartition, err
130 | }
131 | return cp, int32(partition), nil
132 | default:
133 | return nil, invalidPartition, fmt.Errorf("invalid start/stop value: %s", raw)
134 | }
135 | }
136 |
137 | func parseInt(value string, entity string) (int64, error) {
138 | parsed, err := strconv.ParseInt(value, 10, 64)
139 | if err != nil {
140 | return 0, fmt.Errorf("invalid %s value", entity)
141 | }
142 | if parsed < 0 {
143 | return 0, fmt.Errorf("%s cannot be a negative value", entity)
144 | }
145 | return parsed, nil
146 | }
147 |
--------------------------------------------------------------------------------
/commands/produce/proto.go:
--------------------------------------------------------------------------------
1 | package produce
2 |
3 | import (
4 | "context"
5 | "encoding/base64"
6 | "encoding/hex"
7 | "fmt"
8 | "strings"
9 | "time"
10 |
11 | "github.com/jhump/protoreflect/dynamic"
12 | "gopkg.in/alecthomas/kingpin.v2"
13 |
14 | "github.com/xitonix/trubka/commands"
15 | "github.com/xitonix/trubka/commands/produce/template"
16 | "github.com/xitonix/trubka/internal"
17 | "github.com/xitonix/trubka/protobuf"
18 | )
19 |
20 | type proto struct {
21 | kafkaParams *commands.KafkaParameters
22 | globalParams *commands.GlobalParameters
23 | message string
24 | key string
25 | topic string
26 | proto string
27 | count uint64
28 | protoRoot string
29 | random bool
30 | protoMessage *dynamic.Message
31 | highlightStyle string
32 | highlighter *internal.JSONHighlighter
33 | decodeFrom string
34 | parser *template.Parser
35 | sleep time.Duration
36 | }
37 |
38 | func addProtoSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
39 |
40 | cmd := &proto{
41 | kafkaParams: kafkaParams,
42 | globalParams: global,
43 | parser: template.NewParser(),
44 | }
45 | c := parent.Command("proto", "Publishes protobuf messages to Kafka.").Action(cmd.run)
46 | c.Arg("topic", "The topic to publish to.").Required().StringVar(&cmd.topic)
47 | c.Arg("proto", "The proto to publish to.").Required().StringVar(&cmd.proto)
48 | c.Arg("content", "The JSON/Base64/Hex representation of the message. You can pipe the content in, or pass it as the command's second argument.").StringVar(&cmd.message)
49 | c.Flag("decode-from", "The encoding of the message content. The default value is no encoding (json).").
50 | Short('D').
51 | Default(internal.JSONEncoding).
52 | EnumVar(&cmd.decodeFrom, internal.JSONEncoding, internal.Base64Encoding, internal.HexEncoding)
53 | c.Flag("proto-root", "The path to the folder where your *.proto files live.").
54 | Short('r').
55 | Required().
56 | StringVar(&cmd.protoRoot)
57 | addProducerFlags(c, &cmd.sleep, &cmd.key, &cmd.random, &cmd.count)
58 | c.Flag("style", fmt.Sprintf("The highlighting style of the Json message content. Applicable to --content-type=%s only. Set to 'none' to disable.", internal.JSONEncoding)).
59 | Default(internal.DefaultHighlightStyle).
60 | EnumVar(&cmd.highlightStyle,
61 | internal.HighlightStyles...)
62 | }
63 |
64 | func (c *proto) run(_ *kingpin.ParseContext) error {
65 | value, err := getValue(c.message)
66 | if err != nil {
67 | return err
68 | }
69 |
70 | ctx, cancel := context.WithCancel(context.Background())
71 | defer cancel()
72 | go func() {
73 | internal.WaitForCancellationSignal()
74 | cancel()
75 | }()
76 |
77 | loader, err := protobuf.LoadFiles(ctx, c.globalParams.Verbosity, c.protoRoot)
78 | if err != nil {
79 | return err
80 | }
81 |
82 | err = loader.Load(ctx, c.proto)
83 | if err != nil {
84 | return err
85 | }
86 |
87 | message, err := loader.Get(c.proto)
88 | if err != nil {
89 | return err
90 | }
91 |
92 | c.protoMessage = message
93 | c.highlighter = internal.NewJSONHighlighter(c.highlightStyle, c.globalParams.EnableColor)
94 |
95 | return produce(ctx, c.kafkaParams, c.globalParams, c.topic, c.key, value, c.serializeProto, c.count, c.sleep)
96 | }
97 |
98 | func (c *proto) serializeProto(value string) (result []byte, err error) {
99 | var isJSON bool
100 | switch strings.ToLower(c.decodeFrom) {
101 | case internal.Base64Encoding:
102 | result, err = base64.StdEncoding.DecodeString(value)
103 | case internal.HexEncoding:
104 | value = strings.ReplaceAll(value, " ", "")
105 | result, err = hex.DecodeString(value)
106 | default:
107 | isJSON = true
108 | if c.random {
109 | value, err = c.parser.Parse(value)
110 | if err != nil {
111 | return nil, err
112 | }
113 | }
114 |
115 | err = c.protoMessage.UnmarshalJSON([]byte(value))
116 | if err != nil {
117 | if !c.random {
118 | return nil, fmt.Errorf("failed to parse the input as json. If the schema has been produced using -g flag, you must use the same flag (-g) to enable template parsing when publishing to Kafka: %w", err)
119 | }
120 | return nil, err
121 | }
122 |
123 | result, err = c.protoMessage.Marshal()
124 | }
125 | if err == nil {
126 | c.printContent(value, isJSON)
127 | }
128 | return
129 | }
130 |
131 | func (c *proto) printContent(value string, json bool) {
132 | if c.globalParams.Verbosity < internal.Verbose {
133 | return
134 | }
135 | if json {
136 | fmt.Printf("%s\n", c.highlighter.Highlight([]byte(value)))
137 | } else {
138 | fmt.Printf("%s\n", value)
139 | }
140 | }
141 |
--------------------------------------------------------------------------------
/kafka/checkpoint_test.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/araddon/dateparse"
7 | )
8 |
9 | func TestParseCheckpoint(t *testing.T) {
10 | timeValue := "2020-07-15T14:24:23.703+10:00"
11 | timestamp, _ := dateparse.ParseAny(timeValue)
12 | testCases := []struct {
13 | title string
14 | raw string
15 | expected *checkpoint
16 | expectError string
17 | isStopCheckpoint bool
18 | }{
19 | {
20 | title: "empty input",
21 | expectError: "checkpoint value cannot be empty",
22 | },
23 | {
24 | title: "oldest",
25 | raw: "oldest",
26 | expected: newPredefinedCheckpoint(true),
27 | },
28 | {
29 | title: "beginning",
30 | raw: "beginning",
31 | expected: newPredefinedCheckpoint(true),
32 | },
33 | {
34 | title: "earliest",
35 | raw: "earliest",
36 | expected: newPredefinedCheckpoint(true),
37 | },
38 | {
39 | title: "start",
40 | raw: "start",
41 | expected: newPredefinedCheckpoint(true),
42 | },
43 | {
44 | title: "newest",
45 | raw: "newest",
46 | expected: newPredefinedCheckpoint(false),
47 | },
48 | {
49 | title: "latest",
50 | raw: "latest",
51 | expected: newPredefinedCheckpoint(false),
52 | },
53 | {
54 | title: "end",
55 | raw: "end",
56 | expected: newPredefinedCheckpoint(false),
57 | },
58 | {
59 | title: "local",
60 | raw: "local",
61 | expected: newLocalCheckpoint(),
62 | },
63 | {
64 | title: "stored",
65 | raw: "stored",
66 | expected: newLocalCheckpoint(),
67 | },
68 | {
69 | title: "time based",
70 | raw: timeValue,
71 | expected: newTimeCheckpoint(timestamp),
72 | },
73 | {
74 | title: "explicit",
75 | raw: "100",
76 | expected: newExplicitCheckpoint(100),
77 | },
78 | {
79 | title: "invalid value",
80 | raw: "invalid",
81 | expectError: "invalid offset value",
82 | },
83 | {
84 | title: "empty input for stop offset",
85 | expectError: "checkpoint value cannot be empty",
86 | },
87 | {
88 | title: "oldest stop offset",
89 | raw: "oldest",
90 | expectError: "is not an acceptable stop condition",
91 | isStopCheckpoint: true,
92 | },
93 | {
94 | title: "beginning stop offset",
95 | raw: "beginning",
96 | expectError: "is not an acceptable stop condition",
97 | isStopCheckpoint: true,
98 | },
99 | {
100 | title: "earliest stop offset",
101 | raw: "earliest",
102 | expectError: "is not an acceptable stop condition",
103 | isStopCheckpoint: true,
104 | },
105 | {
106 | title: "start stop offset",
107 | raw: "start",
108 | expectError: "is not an acceptable stop condition",
109 | isStopCheckpoint: true,
110 | },
111 | {
112 | title: "newest stop offset",
113 | raw: "newest",
114 | expectError: "is not an acceptable stop condition",
115 | isStopCheckpoint: true,
116 | },
117 | {
118 | title: "latest stop offset",
119 | raw: "latest",
120 | expectError: "is not an acceptable stop condition",
121 | isStopCheckpoint: true,
122 | },
123 | {
124 | title: "end stop offset",
125 | raw: "end",
126 | expectError: "is not an acceptable stop condition",
127 | isStopCheckpoint: true,
128 | },
129 | {
130 | title: "local stop offset",
131 | raw: "local",
132 | expectError: "is not an acceptable stop condition",
133 | isStopCheckpoint: true,
134 | },
135 | {
136 | title: "stored stop offset",
137 | raw: "stored",
138 | expectError: "is not an acceptable stop condition",
139 | isStopCheckpoint: true,
140 | },
141 | {
142 | title: "time based stop offset",
143 | raw: timeValue,
144 | expected: newTimeCheckpoint(timestamp),
145 | },
146 | {
147 | title: "explicit stop offset",
148 | raw: "100",
149 | expected: newExplicitCheckpoint(100),
150 | },
151 | {
152 | title: "invalid value",
153 | raw: "invalid",
154 | expectError: "invalid offset value",
155 | isStopCheckpoint: true,
156 | },
157 | }
158 | for _, tC := range testCases {
159 | t.Run(tC.title, func(t *testing.T) {
160 | actual, err := parseCheckpoint(tC.raw, tC.isStopCheckpoint)
161 | if !checkError(err, tC.expectError) {
162 | t.Errorf("Expected error: %q, Actual: %s", tC.expectError, err)
163 | }
164 | if err := compareCheckpoints("parsed", actual, tC.expected); err != nil {
165 | t.Errorf("%s", err)
166 | }
167 | })
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/commands/common.go:
--------------------------------------------------------------------------------
1 | package commands
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "fmt"
7 | "os"
8 | "os/signal"
9 | "sort"
10 | "strings"
11 | "syscall"
12 |
13 | "gopkg.in/alecthomas/kingpin.v2"
14 |
15 | "github.com/xitonix/trubka/internal"
16 | "github.com/xitonix/trubka/internal/output/format"
17 | "github.com/xitonix/trubka/internal/output/format/list"
18 | "github.com/xitonix/trubka/internal/output/format/tabular"
19 | "github.com/xitonix/trubka/kafka"
20 | )
21 |
22 | const (
23 | // PlainTextFormat plain text format.
24 | PlainTextFormat = "plain"
25 | // TableFormat tabular format.
26 | TableFormat = "table"
27 | // TreeFormat tree format.
28 | TreeFormat = "tree"
29 | // JSONFormat json format.
30 | JSONFormat = "json"
31 | )
32 |
33 | // InitKafkaManager initialises the Kafka manager.
34 | func InitKafkaManager(globalParams *GlobalParameters, kafkaParams *KafkaParameters) (*kafka.Manager, context.Context, context.CancelFunc, error) {
35 | brokers := GetBrokers(kafkaParams.Brokers)
36 | manager, err := kafka.NewManager(brokers,
37 | globalParams.Verbosity,
38 | kafka.WithClusterVersion(kafkaParams.Version),
39 | kafka.WithTLS(kafkaParams.TLS),
40 | kafka.WithClusterVersion(kafkaParams.Version),
41 | kafka.WithSASL(kafkaParams.SASLMechanism,
42 | kafkaParams.SASLUsername,
43 | kafkaParams.SASLPassword,
44 | kafkaParams.SASLHandshakeVersion))
45 |
46 | if err != nil {
47 | return nil, nil, nil, err
48 | }
49 |
50 | ctx, cancel := context.WithCancel(context.Background())
51 |
52 | go func() {
53 | signals := make(chan os.Signal, 1)
54 | signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
55 | <-signals
56 | cancel()
57 | }()
58 |
59 | return manager, ctx, cancel, nil
60 | }
61 |
62 | // GetBrokers returns the list of the brokers.
63 | func GetBrokers(commaSeparated string) []string {
64 | brokers := strings.Split(commaSeparated, ",")
65 | for i := 0; i < len(brokers); i++ {
66 | brokers[i] = strings.TrimSpace(brokers[i])
67 | }
68 | return brokers
69 | }
70 |
71 | // AddFormatFlag adds the format flag to the specified command.
72 | func AddFormatFlag(c *kingpin.CmdClause, format *string, style *string) {
73 | c.Flag("format", "Sets the output format.").
74 | Default(TableFormat).
75 | NoEnvar().
76 | Short('f').
77 | EnumVar(format, PlainTextFormat, TableFormat, TreeFormat, JSONFormat)
78 |
79 | c.Flag("style", fmt.Sprintf("The highlighting style of the Json output. Applicable to --format=%s only. Disabled (none) by default.", JSONFormat)).
80 | Default("none").
81 | EnumVar(style, internal.HighlightStyles...)
82 | }
83 |
84 | // PrintConfigTable prints the configurations in tabular format.
85 | func PrintConfigTable(entries []*kafka.ConfigEntry) {
86 | sort.Sort(kafka.ConfigEntriesByName(entries))
87 | table := tabular.NewTable(true,
88 | tabular.C("Name").Align(tabular.AlignLeft).MaxWidth(100),
89 | tabular.C("Value").Align(tabular.AlignLeft).FAlign(tabular.AlignRight).MaxWidth(100),
90 | )
91 | table.SetTitle(format.WithCount("Configurations", len(entries)))
92 | for _, config := range entries {
93 | parts := strings.Split(config.Value, ",")
94 | table.AddRow(config.Name, strings.Join(parts, "\n"))
95 | }
96 | table.AddFooter("", fmt.Sprintf("Total: %d", len(entries)))
97 | table.Render()
98 | }
99 |
100 | // PrintConfigList prints the configurations as a list.
101 | func PrintConfigList(l list.List, entries []*kafka.ConfigEntry, plain bool) {
102 | sort.Sort(kafka.ConfigEntriesByName(entries))
103 | l.AddItem("Configurations")
104 | l.Indent()
105 | for _, config := range entries {
106 | if plain {
107 | l.AddItemF("%s: %v", config.Name, config.Value)
108 | continue
109 | }
110 | parts := strings.Split(config.Value, ",")
111 | if len(parts) == 1 {
112 | l.AddItemF("%s: %v", config.Name, config.Value)
113 | continue
114 | }
115 | l.AddItem(config.Name)
116 | l.Indent()
117 | for _, val := range parts {
118 | if !internal.IsEmpty(val) {
119 | l.AddItem(val)
120 | }
121 | }
122 | l.UnIndent()
123 | }
124 | }
125 |
126 | // AskForConfirmation asks the user for confirmation. The user must type in "yes/y", "no/n" or "exit/quit/q"
127 | // and then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as
128 | // confirmations. If the input is not recognized, it will ask again. The function does not return
129 | // until it gets a valid response from the user.
130 | func AskForConfirmation(s string) bool {
131 | scanner := bufio.NewScanner(os.Stdin)
132 | msg := fmt.Sprintf("%s [y/n]?: ", s)
133 | for fmt.Print(msg); scanner.Scan(); fmt.Print(msg) {
134 | r := strings.ToLower(strings.TrimSpace(scanner.Text()))
135 | switch r {
136 | case "y", "yes":
137 | return true
138 | case "n", "no", "q", "quit", "exit":
139 | return false
140 | }
141 | }
142 | return false
143 | }
144 |
--------------------------------------------------------------------------------
/internal/printer.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "sync"
7 | "time"
8 | )
9 |
10 | const (
11 | loggingWriterKey = "___trubka__logging__writer__key___"
12 | loggingTimestampLayout = "2006/01/02 15:04:05 "
13 | )
14 |
15 | // Printer represents a printer type.
16 | type Printer interface {
17 | Errorf(level VerbosityLevel, format string, args ...interface{})
18 | Error(level VerbosityLevel, msg string)
19 | Infof(level VerbosityLevel, format string, args ...interface{})
20 | Info(level VerbosityLevel, msg string)
21 | Warningf(level VerbosityLevel, format string, args ...interface{})
22 | Warning(level VerbosityLevel, msg string)
23 | WriteEvent(topic string, bytes []byte)
24 | Close()
25 | Level() VerbosityLevel
26 | }
27 |
28 | // SyncPrinter is an implementation of Printer interface to synchronously write to specified io.Writer instances.
29 | type SyncPrinter struct {
30 | currentLevel VerbosityLevel
31 | wg sync.WaitGroup
32 | targets map[string]chan string
33 | uniqueTargets map[io.Writer]chan string
34 | }
35 |
36 | // NewPrinter creates a new synchronised writer.
37 | func NewPrinter(currentLevel VerbosityLevel, logOutput io.Writer) *SyncPrinter {
38 | logInput := make(chan string, 100)
39 | return &SyncPrinter{
40 | currentLevel: currentLevel,
41 | uniqueTargets: map[io.Writer]chan string{
42 | logOutput: logInput,
43 | },
44 | targets: map[string]chan string{
45 | loggingWriterKey: logInput,
46 | },
47 | }
48 | }
49 |
50 | // Start starts the underlying message processors.
51 | func (p *SyncPrinter) Start(messageOutputs map[string]io.Writer) {
52 | for topic, writer := range messageOutputs {
53 | input, ok := p.uniqueTargets[writer]
54 | if !ok {
55 | input = make(chan string)
56 | p.uniqueTargets[writer] = input
57 | }
58 |
59 | p.targets[topic] = input
60 | }
61 |
62 | for w, in := range p.uniqueTargets {
63 | p.wg.Add(1)
64 | go func(writer io.Writer, input chan string) {
65 | defer p.wg.Done()
66 | for msg := range input {
67 | _, err := fmt.Fprintln(writer, msg)
68 | if err != nil {
69 | fmt.Printf("Failed to write the entry: %s\n", err)
70 | }
71 | }
72 | }(w, in)
73 | }
74 | }
75 |
76 | // Close closes the internal synchronisation channels.
77 | //
78 | // Writing into a closed printer will panic.
79 | func (p *SyncPrinter) Close() {
80 | for _, inputChannel := range p.uniqueTargets {
81 | close(inputChannel)
82 | }
83 | p.wg.Wait()
84 | }
85 |
86 | // Info writes a new line to the Logging io.Writer synchronously if the verbosity level is greater than or equal to the current level.
87 | func (p *SyncPrinter) Info(level VerbosityLevel, msg string) {
88 | p.log(level, msg)
89 | }
90 |
91 | // Infof formats according to a format specifier and writes a new line to the Logging io.Writer synchronously,
92 | // if the verbosity level is greater than or equal to the current level.
93 | func (p *SyncPrinter) Infof(level VerbosityLevel, format string, a ...interface{}) {
94 | p.log(level, fmt.Sprintf(format, a...))
95 | }
96 |
97 | // Warning writes a new line to the Logging io.Writer synchronously if the verbosity level is greater than or equal to the current level.
98 | func (p *SyncPrinter) Warning(level VerbosityLevel, msg string) {
99 | p.log(level, msg)
100 | }
101 |
102 | // Warningf formats according to a format specifier and writes a new line to the Logging io.Writer synchronously,
103 | // if the verbosity level is greater than or equal to the current level.
104 | func (p *SyncPrinter) Warningf(level VerbosityLevel, format string, a ...interface{}) {
105 | p.log(level, fmt.Sprintf(format, a...))
106 | }
107 |
108 | // Error writes a new line to the Logging io.Writer synchronously if the verbosity level is greater than or equal to the current level.
109 | func (p *SyncPrinter) Error(level VerbosityLevel, msg string) {
110 | p.log(level, msg)
111 | }
112 |
113 | // Errorf formats according to a format specifier and writes a new line to the Logging io.Writer synchronously,
114 | // if the verbosity level is greater than or equal to the current level.
115 | func (p *SyncPrinter) Errorf(level VerbosityLevel, format string, a ...interface{}) {
116 | p.log(level, fmt.Sprintf(format, a...))
117 | }
118 |
119 | // WriteEvent writes the event content to the relevant message io.Writer.
120 | func (p *SyncPrinter) WriteEvent(topic string, bytes []byte) {
121 | if len(bytes) == 0 {
122 | return
123 | }
124 | p.targets[topic] <- string(bytes)
125 | }
126 |
127 | // Level returns the current verbosity level
128 | func (p *SyncPrinter) Level() VerbosityLevel {
129 | return p.currentLevel
130 | }
131 |
132 | func (p *SyncPrinter) log(level VerbosityLevel, msg string) {
133 | if p.currentLevel < level {
134 | return
135 | }
136 | p.targets[loggingWriterKey] <- time.Now().Format(loggingTimestampLayout) + msg
137 | }
138 |
--------------------------------------------------------------------------------
/kafka/broker_meta.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "fmt"
5 | "sort"
6 |
7 | "github.com/xitonix/trubka/internal"
8 | )
9 |
10 | // API represents a Kafka API.
11 | type API struct {
12 | // Name method name.
13 | Name string `json:"name"`
14 | // Key the key of the API method.
15 | Key int16 `json:"key"`
16 | // MinVersion minimum version the broker supports.
17 | MinVersion int16 `json:"min_version"`
18 | // MaxVersion maximum version the broker supports.
19 | MaxVersion int16 `json:"max_version"`
20 | }
21 |
22 | func newAPI(name string, key, minVer, maxVer int16) *API {
23 | if internal.IsEmpty(name) {
24 | name = "UNKNOWN"
25 | }
26 | return &API{
27 | Name: name,
28 | Key: key,
29 | MinVersion: minVer,
30 | MaxVersion: maxVer,
31 | }
32 | }
33 |
34 | // String returns the string representation of the API.
35 | func (a *API) String() string {
36 | return fmt.Sprintf("v%d ≤ [%2d] %s ≤ v%d", a.MinVersion, a.Key, a.Name, a.MaxVersion)
37 | }
38 |
39 | // APIByCode sorts the API list by code.
40 | type APIByCode []*API
41 |
42 | func (a APIByCode) Len() int {
43 | return len(a)
44 | }
45 |
46 | func (a APIByCode) Swap(i, j int) {
47 | a[i], a[j] = a[j], a[i]
48 | }
49 |
50 | func (a APIByCode) Less(i, j int) bool {
51 | return a[i].Key < a[j].Key
52 | }
53 |
54 | // BrokerMeta holds a Kafka broker's metadata.
55 | type BrokerMeta struct {
56 | // Details the broker details.
57 | Details *Broker
58 | // ConsumerGroups a list of the consumer groups being managed by the broker.
59 | ConsumerGroups []string
60 | // Logs broker logs.
61 | Logs []*LogFile
62 | // API a list of the APIs supported by the broker.
63 | APIs []*API
64 | }
65 |
66 | // ToJSON returns an object ready to be serialised into json string.
67 | func (b *BrokerMeta) ToJSON(withLogs, withAPIs, includeZeros bool) interface{} {
68 | if b == nil {
69 | return nil
70 | }
71 | type log struct {
72 | Path string `json:"path"`
73 | Entries []*LogEntry `json:"entries"`
74 | }
75 | output := struct {
76 | Details *Broker `json:"details"`
77 | ConsumerGroups []string `json:"consumer_groups,omitempty"`
78 | Logs []*log `json:"logs,omitempty"`
79 | APIs []*API `json:"api,omitempty"`
80 | }{
81 | Details: b.Details,
82 | ConsumerGroups: b.ConsumerGroups,
83 | }
84 |
85 | if withLogs {
86 | for _, logs := range b.Logs {
87 | sorted := logs.SortByPermanentSize()
88 | log := &log{
89 | Path: logs.Path,
90 | Entries: []*LogEntry{},
91 | }
92 | for _, entry := range sorted {
93 | if !includeZeros && entry.Permanent == 0 {
94 | continue
95 | }
96 | log.Entries = append(log.Entries, entry)
97 | }
98 | output.Logs = append(output.Logs, log)
99 | }
100 | }
101 |
102 | if withAPIs {
103 | sort.Sort(APIByCode(b.APIs))
104 | output.APIs = b.APIs
105 | }
106 |
107 | return output
108 | }
109 |
110 | type aggregatedTopicSize map[string]*LogEntry
111 |
112 | // LogFile represents a broker log file.
113 | type LogFile struct {
114 | // Path the path on the server where the log is being stored.
115 | Path string `json:"path"`
116 | // Entries the log entries.
117 | Entries aggregatedTopicSize `json:"entries"`
118 | }
119 |
120 | func newLogFile(path string) *LogFile {
121 | return &LogFile{
122 | Path: path,
123 | Entries: make(aggregatedTopicSize),
124 | }
125 | }
126 |
127 | func (l *LogFile) set(topic string, size int64, isTemp bool) {
128 | if _, ok := l.Entries[topic]; !ok {
129 | l.Entries[topic] = &LogEntry{
130 | Topic: topic,
131 | }
132 | }
133 | if isTemp {
134 | l.Entries[topic].Temporary += uint64(size)
135 | } else {
136 | l.Entries[topic].Permanent += uint64(size)
137 | }
138 | }
139 |
140 | // SortByPermanentSize sorts the log entries by permanent log size in descending order.
141 | func (l *LogFile) SortByPermanentSize() []*LogEntry {
142 | result := l.toSlice()
143 | sort.Sort(logsByPermanentSize(result))
144 | return result
145 | }
146 |
147 | func (l *LogFile) toSlice() []*LogEntry {
148 | result := make([]*LogEntry, len(l.Entries))
149 | var i int
150 | for _, l := range l.Entries {
151 | result[i] = l
152 | i++
153 | }
154 | return result
155 | }
156 |
157 | type logsByPermanentSize []*LogEntry
158 |
159 | func (l logsByPermanentSize) Len() int {
160 | return len(l)
161 | }
162 |
163 | func (l logsByPermanentSize) Swap(i, j int) {
164 | l[i], l[j] = l[j], l[i]
165 | }
166 |
167 | func (l logsByPermanentSize) Less(i, j int) bool {
168 | return l[i].Permanent > l[j].Permanent
169 | }
170 |
171 | // LogEntry represents a broker log entry.
172 | type LogEntry struct {
173 | // Topic the topic.
174 | Topic string `json:"topic"`
175 | // Permanent the size of the permanently stored logs in bytes.
176 | Permanent uint64 `json:"permanent"`
177 | // Temporary the size of the temporary logs in bytes.
178 | Temporary uint64 `json:"temporary"`
179 | }
180 |
--------------------------------------------------------------------------------
/commands/describe/group.go:
--------------------------------------------------------------------------------
1 | package describe
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "gopkg.in/alecthomas/kingpin.v2"
8 |
9 | "github.com/xitonix/trubka/commands"
10 | "github.com/xitonix/trubka/internal/output"
11 | "github.com/xitonix/trubka/internal/output/format"
12 | "github.com/xitonix/trubka/internal/output/format/list"
13 | "github.com/xitonix/trubka/internal/output/format/tabular"
14 | "github.com/xitonix/trubka/kafka"
15 | )
16 |
17 | type group struct {
18 | kafkaParams *commands.KafkaParameters
19 | globalParams *commands.GlobalParameters
20 | includeMembers bool
21 | group string
22 | format string
23 | style string
24 | }
25 |
26 | func addGroupSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
27 | cmd := &group{
28 | kafkaParams: kafkaParams,
29 | globalParams: global,
30 | }
31 | c := parent.Command("group", "Describes a consumer group.").Action(cmd.run)
32 | c.Arg("group", "The consumer group name to describe.").Required().StringVar(&cmd.group)
33 | c.Flag("include-members", "Lists the group members and partition assignments in the output.").
34 | NoEnvar().
35 | Short('m').
36 | BoolVar(&cmd.includeMembers)
37 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
38 | }
39 |
40 | func (c *group) run(_ *kingpin.ParseContext) error {
41 | manager, ctx, cancel, err := commands.InitKafkaManager(c.globalParams, c.kafkaParams)
42 |
43 | if err != nil {
44 | return err
45 | }
46 |
47 | defer func() {
48 | manager.Close()
49 | cancel()
50 | }()
51 |
52 | cgd, err := manager.DescribeGroup(ctx, c.group, c.includeMembers)
53 | if err != nil {
54 | return err
55 | }
56 |
57 | switch c.format {
58 | case commands.JSONFormat:
59 | data := cgd.ToJSON(c.includeMembers)
60 | return output.PrintAsJSON(data, c.style, c.globalParams.EnableColor)
61 | case commands.TableFormat:
62 | return c.printAsTable(cgd)
63 | case commands.TreeFormat:
64 | return c.printAsList(cgd, false)
65 | case commands.PlainTextFormat:
66 | return c.printAsList(cgd, true)
67 | }
68 | return nil
69 | }
70 |
71 | func (c *group) printAsList(details *kafka.ConsumerGroupDetails, plain bool) error {
72 | l := list.New(plain)
73 | l.AddItemF("Coordinator: %s", details.Coordinator.String())
74 | l.AddItemF(" State: %s", format.GroupStateLabel(details.State, c.globalParams.EnableColor && !plain))
75 | l.AddItemF(" Protocol: %s/%s", details.Protocol, details.ProtocolType)
76 | if c.includeMembers && len(details.Members) > 0 {
77 | l.AddItemF("Members")
78 | l.Indent()
79 | for member, md := range details.Members {
80 | l.AddItemF("%s (%s)", member, md.ClientHost)
81 | if len(details.Members[member].Assignments) == 0 {
82 | continue
83 | }
84 | tps := details.Members[member].Assignments
85 | sortedTopics := tps.SortedTopics()
86 | l.Indent()
87 | for _, topic := range sortedTopics {
88 | l.AddItemF("%s: %s", topic, tps.SortedPartitionsString(topic))
89 | }
90 | l.UnIndent()
91 | }
92 | l.UnIndent()
93 | }
94 | l.Render()
95 | return nil
96 | }
97 |
98 | func (c *group) printAsTable(details *kafka.ConsumerGroupDetails) error {
99 | table := tabular.NewTable(c.globalParams.EnableColor,
100 | tabular.C("Coordinator"),
101 | tabular.C("State"),
102 | tabular.C("Protocol"),
103 | tabular.C("Protocol Type"),
104 | )
105 |
106 | table.AddRow(
107 | details.Coordinator.Host,
108 | format.GroupStateLabel(details.State, c.globalParams.EnableColor),
109 | details.Protocol,
110 | details.ProtocolType,
111 | )
112 | table.Render()
113 |
114 | if c.includeMembers && len(details.Members) > 0 {
115 | c.printMemberDetailsTable(details.Members)
116 | }
117 | return nil
118 | }
119 |
120 | func (c *group) printMemberDetailsTable(members map[string]*kafka.GroupMemberDetails) {
121 | table := tabular.NewTable(c.globalParams.EnableColor,
122 | tabular.C("ID").HAlign(tabular.AlignLeft).FAlign(tabular.AlignRight),
123 | tabular.C("Client Host"),
124 | tabular.C("Assignments").Align(tabular.AlignLeft),
125 | )
126 |
127 | table.SetTitle(format.WithCount("Members", len(members)))
128 | for name, desc := range members {
129 | sortedTopics := desc.Assignments.SortedTopics()
130 | var buf strings.Builder
131 | for i, topic := range sortedTopics {
132 | buf.WriteString(format.Underline(topic))
133 | partitions := desc.Assignments.SortedPartitions(topic)
134 | for j, p := range partitions {
135 | if j%20 == 0 {
136 | buf.WriteString("\n")
137 | }
138 | buf.WriteString(fmt.Sprintf("%d ", p))
139 | }
140 | if i < len(sortedTopics)-1 {
141 | buf.WriteString("\n\n")
142 | }
143 | }
144 | table.AddRow(
145 | format.SpaceIfEmpty(name),
146 | format.SpaceIfEmpty(desc.ClientHost),
147 | format.SpaceIfEmpty(buf.String()),
148 | )
149 | }
150 | table.AddFooter(fmt.Sprintf("Total: %d", len(members)), " ", " ")
151 | table.Render()
152 | }
153 |
--------------------------------------------------------------------------------
/internal/plain_text_marshaller.go:
--------------------------------------------------------------------------------
1 | package internal
2 |
3 | import (
4 | "bytes"
5 | "encoding/base64"
6 | "encoding/hex"
7 | "encoding/json"
8 | "fmt"
9 | "strings"
10 | "time"
11 | )
12 |
13 | const (
14 | // JSONEncoding compact Json output.
15 | JSONEncoding = "json"
16 | // JSONIndentEncoding indented Json output.
17 | JSONIndentEncoding = "json-indent"
18 | )
19 |
20 | const (
21 | // PlainTextEncoding plain text encoding.
22 | PlainTextEncoding = "plain"
23 | // Base64Encoding base64 encoding.
24 | Base64Encoding = "base64"
25 | // HexEncoding hex encoding.
26 | HexEncoding = "hex"
27 | )
28 |
29 | // HighlightStyles contains the available Json highlighting styles.
30 | var HighlightStyles = []string{
31 | "autumn",
32 | "dracula",
33 | "emacs",
34 | "friendly",
35 | "fruity",
36 | "github",
37 | "lovelace",
38 | "monokai",
39 | "monokailight",
40 | "native",
41 | "paraiso-dark",
42 | "paraiso-light",
43 | "pygments",
44 | "rrt",
45 | "solarized-dark",
46 | "solarized-light",
47 | "swapoff",
48 | "tango",
49 | "trac",
50 | "vim",
51 | "none",
52 | }
53 |
54 | // DefaultHighlightStyle default Json highlighting style across the app.
55 | const DefaultHighlightStyle = "fruity"
56 |
57 | // PlainTextMarshaller represents plain text marshaller.
58 | type PlainTextMarshaller struct {
59 | inclusions *MessageMetadata
60 | enableColor bool
61 | inputEncoding string
62 | outputEncoding string
63 | jsonProcessor *JSONMessageProcessor
64 | isJSON bool
65 | }
66 |
67 | // NewPlainTextMarshaller creates a new instance of a plain text marshaller.
68 | func NewPlainTextMarshaller(
69 | inputEncoding string,
70 | outputEncoding string,
71 | inclusions *MessageMetadata,
72 | enableColor bool,
73 | highlightStyle string) *PlainTextMarshaller {
74 | outputEncoding = strings.TrimSpace(strings.ToLower(outputEncoding))
75 | return &PlainTextMarshaller{
76 | inputEncoding: strings.TrimSpace(strings.ToLower(inputEncoding)),
77 | outputEncoding: outputEncoding,
78 | inclusions: inclusions,
79 | enableColor: enableColor,
80 | jsonProcessor: NewJSONMessageProcessor(
81 | outputEncoding,
82 | inclusions,
83 | enableColor,
84 | highlightStyle),
85 | isJSON: outputEncoding == JSONEncoding || outputEncoding == JSONIndentEncoding,
86 | }
87 | }
88 |
89 | // Marshal marshals the Kafka message into plain text.
90 | func (m *PlainTextMarshaller) Marshal(msg, key []byte, ts time.Time, topic string, partition int32, offset int64) ([]byte, error) {
91 | result, mustEncode, err := m.decode(msg)
92 | if err != nil {
93 | return nil, err
94 | }
95 |
96 | if mustEncode {
97 | result, err = m.encode(result, key, ts, topic, partition, offset)
98 | if err != nil {
99 | return nil, err
100 | }
101 | if m.isJSON {
102 | return result, nil
103 | }
104 | }
105 |
106 | result = m.inclusions.Render(key, result, ts, topic, partition, offset, m.outputEncoding == Base64Encoding)
107 |
108 | return result, nil
109 | }
110 |
111 | func (m *PlainTextMarshaller) decode(msg []byte) ([]byte, bool, error) {
112 | switch m.inputEncoding {
113 | case HexEncoding:
114 | if m.outputEncoding == HexEncoding {
115 | return msg, false, nil
116 | }
117 | buf := make([]byte, hex.DecodedLen(len(msg)))
118 | _, err := hex.Decode(buf, msg)
119 | if err != nil {
120 | return nil, false, err
121 | }
122 | return buf, true, nil
123 | case Base64Encoding:
124 | if m.outputEncoding == Base64Encoding {
125 | return msg, false, nil
126 | }
127 | buf := make([]byte, base64.StdEncoding.DecodedLen(len(msg)))
128 | _, err := base64.StdEncoding.Decode(buf, msg)
129 | if err != nil {
130 | return nil, false, err
131 | }
132 | return buf, true, nil
133 | default:
134 | return msg, true, nil
135 | }
136 | }
137 |
138 | func (m *PlainTextMarshaller) encode(decoded, key []byte, ts time.Time, topic string, partition int32, offset int64) ([]byte, error) {
139 | switch m.outputEncoding {
140 | case HexEncoding:
141 | return m.marshalHex(decoded)
142 | case Base64Encoding:
143 | return m.marshalBase64(decoded)
144 | case JSONIndentEncoding, JSONEncoding:
145 | result, err := m.marshalJSON(decoded)
146 | if err != nil {
147 | return nil, err
148 | }
149 | return m.jsonProcessor.Process(result, key, ts, topic, partition, offset)
150 | default:
151 | return decoded, nil
152 | }
153 | }
154 |
155 | func (m *PlainTextMarshaller) marshalHex(msg []byte) ([]byte, error) {
156 | out := []byte(fmt.Sprintf("%X", msg))
157 | return out, nil
158 | }
159 |
160 | func (m *PlainTextMarshaller) marshalBase64(msg []byte) ([]byte, error) {
161 | buf := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
162 | base64.StdEncoding.Encode(buf, msg)
163 | return buf, nil
164 | }
165 |
166 | func (m *PlainTextMarshaller) marshalJSON(msg []byte) ([]byte, error) {
167 | var (
168 | buf bytes.Buffer
169 | err error
170 | )
171 | if m.outputEncoding == JSONIndentEncoding {
172 | err = json.Indent(&buf, msg, "", JSONIndentation)
173 | } else {
174 | err = json.Compact(&buf, msg)
175 | }
176 | if err != nil {
177 | return nil, err
178 | }
179 | return buf.Bytes(), nil
180 | }
181 |
--------------------------------------------------------------------------------
/kafka/local_offset_manager.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "fmt"
7 | "os"
8 | "path/filepath"
9 | "regexp"
10 | "strings"
11 |
12 | "github.com/kirsle/configdir"
13 | "github.com/peterbourgon/diskv"
14 |
15 | "github.com/xitonix/trubka/internal"
16 | )
17 |
18 | const (
19 | localOffsetRoot = "trubka"
20 | offsetFileExtension = ".tpo"
21 | )
22 |
23 | // LocalOffsetManager represents a type to manage local offset storage.
24 | type LocalOffsetManager struct {
25 | root string
26 | db *diskv.Diskv
27 | internal.Printer
28 | }
29 |
30 | // NewLocalOffsetManager creates a new instance of a local offset manager.
31 | func NewLocalOffsetManager(printer internal.Printer) *LocalOffsetManager {
32 | root := configdir.LocalConfig(localOffsetRoot)
33 | flatTransform := func(s string) []string { return []string{} }
34 | return &LocalOffsetManager{
35 | Printer: printer,
36 | root: root,
37 | db: diskv.New(diskv.Options{
38 | BasePath: root,
39 | Transform: flatTransform,
40 | CacheSizeMax: 1024 * 1024,
41 | }),
42 | }
43 | }
44 |
45 | // GetOffsetFileOrRoot returns the file path to store the topic offsets if a topic has been specified.
46 | //
47 | // If the topic value is empty, this method will return the root path for storing offsets under the specified environment.
48 | func (l *LocalOffsetManager) GetOffsetFileOrRoot(environment string, topic string) (string, error) {
49 | if internal.IsEmpty(environment) {
50 | return "", ErrEmptyEnvironment
51 | }
52 |
53 | singleTopicMode := !internal.IsEmpty(topic) && !strings.EqualFold(topic, "all")
54 | offsetPath := configdir.LocalConfig(localOffsetRoot, environment)
55 | if singleTopicMode {
56 | offsetPath = filepath.Join(offsetPath, topic+offsetFileExtension)
57 | }
58 | _, err := os.Stat(offsetPath)
59 | if err != nil {
60 | if os.IsNotExist(err) {
61 | return "", fmt.Errorf("no consumer offset has been found in %s", offsetPath)
62 | }
63 | return "", fmt.Errorf("failed to access the requested local offset : %w", err)
64 | }
65 | return offsetPath, nil
66 | }
67 |
68 | // ReadTopicOffsets returns the locally stored offsets of the given topic for the specified environment if exists.
69 | //
70 | // If there is no local offsets, the method will return an empty partition-offset map.
71 | func (l *LocalOffsetManager) ReadTopicOffsets(topic string, environment string) (PartitionOffset, error) {
72 | file, err := l.setDBPath(topic, environment)
73 | if err != nil {
74 | return nil, err
75 | }
76 |
77 | stored := make(map[int32]int64)
78 | l.Infof(internal.VeryVerbose, "Reading the local offsets of %s topic from %s", topic, l.db.BasePath)
79 | val, err := l.db.Read(file)
80 | if err != nil {
81 | if os.IsNotExist(err) {
82 | return PartitionOffset{}, nil
83 | }
84 | return nil, err
85 | }
86 |
87 | buff := bytes.NewBuffer(val)
88 | dec := gob.NewDecoder(buff)
89 | err = dec.Decode(&stored)
90 | if err != nil {
91 | return nil, fmt.Errorf("failed to deserialize the value from local offset store for topic %s: %w", topic, err)
92 | }
93 |
94 | return ToPartitionOffset(stored, false), nil
95 | }
96 |
97 | // List lists all the environments and the topics stored locally
98 | func (l *LocalOffsetManager) List(topicFilter *regexp.Regexp, envFilter *regexp.Regexp) (map[string][]string, error) {
99 | result := make(map[string][]string)
100 | root := configdir.LocalConfig(localOffsetRoot)
101 | l.Infof(internal.Verbose, "Searching for local offsets in %s", root)
102 | err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
103 | if err != nil {
104 | return err
105 | }
106 |
107 | if info.IsDir() || !strings.HasSuffix(path, offsetFileExtension) {
108 | return nil
109 | }
110 | environment := filepath.Base(filepath.Dir(path))
111 | if envFilter != nil && !envFilter.Match([]byte(environment)) {
112 | l.Infof(internal.SuperVerbose, "The provided environment filter (%s) does not match with %s environment", envFilter.String(), environment)
113 | return nil
114 | }
115 | file := filepath.Base(path)
116 | topic := strings.TrimSuffix(file, offsetFileExtension)
117 | if topicFilter != nil && !topicFilter.Match([]byte(topic)) {
118 | l.Infof(internal.SuperVerbose, "The provided topic filter (%s) does not match with %s topic", topicFilter.String(), topic)
119 | return nil
120 | }
121 | if _, ok := result[environment]; !ok {
122 | result[environment] = make([]string, 0)
123 | }
124 | result[environment] = append(result[environment], topic)
125 | return nil
126 | })
127 | if err != nil {
128 | return nil, err
129 | }
130 | return result, nil
131 | }
132 |
133 | func (l *LocalOffsetManager) setDBPath(topic string, environment string) (string, error) {
134 | if internal.IsEmpty(environment) {
135 | return "", ErrEmptyEnvironment
136 | }
137 | if internal.IsEmpty(topic) {
138 | return "", ErrEmptyTopic
139 | }
140 |
141 | l.db.BasePath = filepath.Join(l.root, environment)
142 |
143 | file := topic
144 | if !strings.HasSuffix(file, offsetFileExtension) {
145 | file += offsetFileExtension
146 | }
147 |
148 | return file, nil
149 | }
150 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | env:
3 | BINARY: trubka
4 | MAINTAINER: Alex Golshani
5 | VENDOR: Alex Golshani
6 | URL: https://github.com/xitonix/trubka
7 | DESCRIPTION: A CLI tool for Kafka
8 | LICENSE: Apache License 2.0
9 | on:
10 | release:
11 | types: [ published ]
12 | jobs:
13 | mac:
14 | name: Mac
15 | runs-on: macos-13
16 | steps:
17 | - name: Set up Go
18 | uses: actions/setup-go@v2
19 | with:
20 | go-version: 1.24.2
21 | - name: Check out code
22 | uses: actions/checkout@v2
23 | - name: Download dependencies
24 | run: go mod download -x
25 | env:
26 | GO111MODULE: on
27 | - name: Package release
28 | run: release/release_darwin.bash
29 | env:
30 | GO111MODULE: on
31 | id: package
32 | - name: Upload binary
33 | uses: actions/upload-release-asset@v1.0.2
34 | env:
35 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
36 | with:
37 | upload_url: ${{ github.event.release.upload_url }}
38 | asset_path: ./${{ steps.package.outputs.file }}
39 | asset_name: ${{ steps.package.outputs.file }}
40 | asset_content_type: application/gzip
41 | - name: Create Brew PR
42 | env:
43 | HOMEBREW_GITHUB_API_TOKEN: ${{ secrets.BREW_TOKEN }}
44 | run: |
45 | brew tap ${{ github.actor }}/${{ env.BINARY }}
46 | brew bump-formula-pr -f --version=${{ steps.package.outputs.version }} --no-browse --no-audit \
47 | --sha256=${{ steps.package.outputs.sha }} \
48 | --url="https://github.com/${{ github.actor }}/${{ env.BINARY }}/releases/download/${{steps.package.outputs.version}}/${{ steps.package.outputs.file }}" \
49 | ${{ github.actor }}/${{ env.BINARY }}/${{ env.BINARY }}
50 | linux:
51 | name: Linux
52 | runs-on: ubuntu-latest
53 | steps:
54 | - name: Set up Go
55 | uses: actions/setup-go@v2
56 | with:
57 | go-version: 1.24.2
58 | - name: Check out code
59 | uses: actions/checkout@v2
60 | - name: Download dependencies
61 | run: go mod download -x
62 | env:
63 | GO111MODULE: on
64 | - name: Package release
65 | run: release/release_linux.bash
66 | env:
67 | GO111MODULE: on
68 | id: package
69 | - name: Upload binary
70 | uses: actions/upload-release-asset@v1.0.2
71 | env:
72 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
73 | with:
74 | upload_url: ${{ github.event.release.upload_url }}
75 | asset_path: ./${{ steps.package.outputs.file }}
76 | asset_name: ${{ steps.package.outputs.file }}
77 | asset_content_type: application/gzip
78 | - name: Upload RPM
79 | uses: actions/upload-release-asset@v1.0.2
80 | env:
81 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
82 | with:
83 | upload_url: ${{ github.event.release.upload_url }}
84 | asset_path: ./${{ steps.package.outputs.rpm }}
85 | asset_name: ${{ steps.package.outputs.rpm }}
86 | asset_content_type: application/octet-stream
87 | - name: Upload DEB
88 | uses: actions/upload-release-asset@v1.0.2
89 | env:
90 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
91 | with:
92 | upload_url: ${{ github.event.release.upload_url }}
93 | asset_path: ./${{ steps.package.outputs.deb }}
94 | asset_name: ${{ steps.package.outputs.deb }}
95 | asset_content_type: application/octet-stream
96 | windows:
97 | name: Windows
98 | runs-on: windows-latest
99 | steps:
100 | - name: Set up Go
101 | uses: actions/setup-go@v2
102 | with:
103 | go-version: 1.24.2
104 | - name: Check out code
105 | uses: actions/checkout@v2
106 | - name: Download dependencies
107 | run: go mod download -x
108 | env:
109 | GO111MODULE: on
110 | - name: Package release
111 | run: .\release.ps1 -version ($Env:GITHUB_REF).trimstart('refs/tags/v') -binary $Env:BINARY
112 | working-directory: .\release\windows
113 | id: package
114 | env:
115 | GO111MODULE: on
116 | - name: Upload binary
117 | uses: actions/upload-release-asset@v1.0.2
118 | env:
119 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
120 | with:
121 | upload_url: ${{ github.event.release.upload_url }}
122 | asset_path: .\release\windows\${{ steps.package.outputs.file }}
123 | asset_name: ${{ steps.package.outputs.file }}
124 | asset_content_type: application/gzip
125 | - name: Upload TAR Archive
126 | uses: actions/upload-release-asset@v1.0.2
127 | env:
128 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
129 | with:
130 | upload_url: ${{ github.event.release.upload_url }}
131 | asset_path: .\release\windows\${{ steps.package.outputs.archive }}
132 | asset_name: ${{ steps.package.outputs.archive }}
133 | asset_content_type: application/gzip
134 |
--------------------------------------------------------------------------------
/kafka/local_offset_store.go:
--------------------------------------------------------------------------------
1 | package kafka
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "errors"
7 | "fmt"
8 | "os"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | "github.com/kirsle/configdir"
14 | "github.com/peterbourgon/diskv"
15 |
16 | "github.com/xitonix/trubka/internal"
17 | )
18 |
19 | type progress struct {
20 | topic string
21 | partition int32
22 | offset int64
23 | }
24 |
25 | // LocalOffsetStore represents a type to manage partition offsets locally.
26 | type LocalOffsetStore struct {
27 | db *diskv.Diskv
28 | printer internal.Printer
29 | wg sync.WaitGroup
30 | writeErrors chan error
31 | in chan *progress
32 | checksum map[string]interface{}
33 | }
34 |
35 | // NewLocalOffsetStore creates a new instance of local offset store
36 | func NewLocalOffsetStore(printer internal.Printer, environment string) (*LocalOffsetStore, error) {
37 | environment = strings.ToLower(strings.TrimSpace(environment))
38 | if len(environment) == 0 {
39 | return nil, errors.New("empty environment value is not acceptable")
40 | }
41 | root := configdir.LocalConfig(localOffsetRoot, environment)
42 | err := configdir.MakePath(root)
43 | if err != nil {
44 | return nil, fmt.Errorf("failed to create the application cache folder: %w", err)
45 | }
46 | printer.Infof(internal.Verbose, "Initialising local offset store at %s", root)
47 |
48 | flatTransform := func(s string) []string { return []string{} }
49 |
50 | db := diskv.New(diskv.Options{
51 | BasePath: root,
52 | Transform: flatTransform,
53 | CacheSizeMax: 1024 * 1024,
54 | })
55 |
56 | return &LocalOffsetStore{
57 | db: db,
58 | printer: printer,
59 | writeErrors: make(chan error),
60 | in: make(chan *progress, 500),
61 | checksum: make(map[string]interface{}),
62 | }, nil
63 | }
64 |
65 | func (s *LocalOffsetStore) read(topic string) (PartitionOffset, error) {
66 | if internal.IsEmpty(topic) {
67 | return nil, ErrEmptyTopic
68 | }
69 | file := topic + offsetFileExtension
70 | stored := make(map[int32]int64)
71 | s.printer.Infof(internal.VeryVerbose, "Reading the local offsets of %s topic from %s", topic, s.db.BasePath)
72 | val, err := s.db.Read(file)
73 | if err != nil {
74 | if os.IsNotExist(err) {
75 | return PartitionOffset{}, nil
76 | }
77 | return nil, err
78 | }
79 |
80 | buff := bytes.NewBuffer(val)
81 | dec := gob.NewDecoder(buff)
82 | err = dec.Decode(&stored)
83 | if err != nil {
84 | return nil, fmt.Errorf("failed to deserialize the value from local offset store for topic %s: %w", topic, err)
85 | }
86 |
87 | return ToPartitionOffset(stored, false), nil
88 | }
89 |
90 | func (s *LocalOffsetStore) start(loaded TopicPartitionOffset) {
91 | s.wg.Add(1)
92 | ticker := time.NewTicker(3 * time.Second)
93 | offsets := make(TopicPartitionOffset)
94 | for t, lpo := range loaded {
95 | partOffsets := make(PartitionOffset)
96 | lpo.copyTo(partOffsets)
97 | offsets[t] = partOffsets
98 | }
99 | go func() {
100 | defer s.wg.Done()
101 | for {
102 | select {
103 | case <-ticker.C:
104 | s.writeOffsetsToDisk(offsets)
105 | case p, more := <-s.in:
106 | if !more {
107 | ticker.Stop()
108 | s.printer.Info(internal.Verbose, "Flushing the offsets to disk.")
109 | s.writeOffsetsToDisk(offsets)
110 | return
111 | }
112 | _, ok := offsets[p.topic]
113 | if !ok {
114 | offsets[p.topic] = make(PartitionOffset)
115 | }
116 | offsets[p.topic][p.partition] = Offset{Current: p.offset}
117 | }
118 | }
119 | }()
120 | }
121 |
122 | func (s *LocalOffsetStore) commit(topic string, partition int32, offset int64) error {
123 | s.in <- &progress{
124 | topic: topic,
125 | partition: partition,
126 | offset: offset,
127 | }
128 | return nil
129 | }
130 |
131 | // Returns the channel on which the write errors will be received.
132 | // You must listen to this channel to avoid deadlock.
133 | func (s *LocalOffsetStore) errors() <-chan error {
134 | return s.writeErrors
135 | }
136 |
137 | func (s *LocalOffsetStore) close() {
138 | if s == nil || s.db == nil {
139 | return
140 | }
141 | s.printer.Info(internal.SuperVerbose, "Closing the local offset store.")
142 | close(s.in)
143 | s.wg.Wait()
144 | close(s.writeErrors)
145 | s.printer.Info(internal.SuperVerbose, "The local offset store has been closed successfully.")
146 | }
147 |
148 | func (s *LocalOffsetStore) writeOffsetsToDisk(topicPartitionOffsets TopicPartitionOffset) {
149 | for topic, partitionOffsets := range topicPartitionOffsets {
150 | cs, buff, err := partitionOffsets.marshal()
151 | if err != nil {
152 | s.writeErrors <- fmt.Errorf("failed to serialise the offsets of topic %s: %w", topic, err)
153 | return
154 | }
155 | if cs == "" {
156 | return
157 | }
158 | if _, ok := s.checksum[cs]; ok {
159 | return
160 | }
161 | s.checksum[cs] = nil
162 | s.printer.Infof(internal.SuperVerbose, "Writing the offset(s) of topic %s to the disk.", topic)
163 | for p, offset := range partitionOffsets {
164 | if offset.Current >= 0 {
165 | s.printer.Infof(internal.Chatty, " P%02d: %d", p, offset.Current)
166 | }
167 | }
168 | err = s.db.Write(topic+offsetFileExtension, buff)
169 | if err != nil {
170 | s.writeErrors <- fmt.Errorf("failed to write the offsets of topic %s to the disk %s: %w", topic, cs, err)
171 | }
172 | }
173 | }
174 |
--------------------------------------------------------------------------------
/commands/describe/topic.go:
--------------------------------------------------------------------------------
1 | package describe
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "sort"
7 | "strings"
8 |
9 | "github.com/dustin/go-humanize"
10 | "gopkg.in/alecthomas/kingpin.v2"
11 |
12 | "github.com/xitonix/trubka/commands"
13 | "github.com/xitonix/trubka/internal/output"
14 | "github.com/xitonix/trubka/internal/output/format"
15 | "github.com/xitonix/trubka/internal/output/format/list"
16 | "github.com/xitonix/trubka/internal/output/format/tabular"
17 | "github.com/xitonix/trubka/kafka"
18 | )
19 |
20 | type topic struct {
21 | kafkaParams *commands.KafkaParameters
22 | globalParams *commands.GlobalParameters
23 | topic string
24 | loadConfigs bool
25 | includeOffsets bool
26 | format string
27 | style string
28 | }
29 |
30 | func addTopicSubCommand(parent *kingpin.CmdClause, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
31 | cmd := &topic{
32 | kafkaParams: kafkaParams,
33 | globalParams: global,
34 | }
35 | c := parent.Command("topic", "Describes a Kafka topic.").Action(cmd.run)
36 | c.Arg("topic", "The topic to describe.").Required().StringVar(&cmd.topic)
37 | c.Flag("load-config", "Loads the topic's configurations from the server.").
38 | NoEnvar().
39 | Short('c').BoolVar(&cmd.loadConfigs)
40 | c.Flag("include-offsets", "Queries the server to read the latest available offset of each partition.").
41 | NoEnvar().
42 | Short('o').BoolVar(&cmd.includeOffsets)
43 | commands.AddFormatFlag(c, &cmd.format, &cmd.style)
44 | }
45 |
46 | func (t *topic) run(_ *kingpin.ParseContext) error {
47 | manager, ctx, cancel, err := commands.InitKafkaManager(t.globalParams, t.kafkaParams)
48 |
49 | if err != nil {
50 | return err
51 | }
52 |
53 | defer func() {
54 | manager.Close()
55 | cancel()
56 | }()
57 |
58 | meta, err := manager.DescribeTopic(ctx, t.topic, t.loadConfigs, t.includeOffsets)
59 | if err != nil {
60 | return err
61 | }
62 |
63 | if len(meta.Partitions) == 0 {
64 | return fmt.Errorf("topic %s not found", t.topic)
65 | }
66 |
67 | sort.Sort(kafka.PartitionMetaByID(meta.Partitions))
68 | if t.loadConfigs {
69 | sort.Sort(kafka.ConfigEntriesByName(meta.ConfigEntries))
70 | }
71 |
72 | switch t.format {
73 | case commands.JSONFormat:
74 | return output.PrintAsJSON(meta, t.style, t.globalParams.EnableColor)
75 | case commands.TableFormat:
76 | return t.printAsTable(meta)
77 | case commands.TreeFormat:
78 | return t.printAsList(meta, false)
79 | case commands.PlainTextFormat:
80 | return t.printAsList(meta, true)
81 | default:
82 | return nil
83 | }
84 | }
85 |
86 | func (t *topic) printAsList(meta *kafka.TopicMetadata, plain bool) error {
87 | var totalOffsets int64
88 | l := list.New(plain)
89 | l.AddItem("Partitions")
90 | l.Indent()
91 | for _, pm := range meta.Partitions {
92 | l.AddItemF("%d", pm.ID)
93 | l.Indent()
94 | if t.includeOffsets {
95 | l.AddItemF("Offset: %s", humanize.Comma(pm.Offset))
96 | totalOffsets += pm.Offset
97 | }
98 | l.AddItemF("Leader: %s", pm.Leader.String())
99 | l.AddItemF("ISRs: %s", t.brokersToLine(pm.ISRs...))
100 | l.AddItemF("Replicas: %s", t.brokersToLine(pm.Replicas...))
101 | if len(pm.OfflineReplicas) > 0 {
102 | l.AddItemF("Offline Replicas: %s", t.brokersToLine(pm.OfflineReplicas...))
103 | }
104 | l.UnIndent()
105 | }
106 | l.UnIndent()
107 | if t.loadConfigs {
108 | commands.PrintConfigList(l, meta.ConfigEntries, plain)
109 | }
110 | l.Render()
111 |
112 | return nil
113 | }
114 |
115 | func (t *topic) printAsTable(meta *kafka.TopicMetadata) error {
116 | table := tabular.NewTable(t.globalParams.EnableColor,
117 | tabular.C("Partition"),
118 | tabular.C("Offset").FAlign(tabular.AlignCenter),
119 | tabular.C("Leader").Align(tabular.AlignLeft),
120 | tabular.C("Replicas").Align(tabular.AlignLeft),
121 | tabular.C("Offline Replicas").Align(tabular.AlignLeft),
122 | tabular.C("ISRs").Align(tabular.AlignLeft),
123 | )
124 | table.SetTitle(format.WithCount("Partitions", len(meta.Partitions)))
125 | var totalOffsets int64
126 | for _, pm := range meta.Partitions {
127 | offset := "-"
128 | if t.includeOffsets {
129 | offset = humanize.Comma(pm.Offset)
130 | totalOffsets += pm.Offset
131 | }
132 | table.AddRow(
133 | pm.ID,
134 | offset,
135 | format.SpaceIfEmpty(pm.Leader.MarkedHostName()),
136 | format.SpaceIfEmpty(t.brokersToList(pm.Replicas...)),
137 | format.SpaceIfEmpty(t.brokersToList(pm.OfflineReplicas...)),
138 | format.SpaceIfEmpty(t.brokersToList(pm.ISRs...)),
139 | )
140 | }
141 |
142 | total := " "
143 | if t.includeOffsets {
144 | total = humanize.Comma(totalOffsets)
145 | }
146 | table.AddFooter(fmt.Sprintf("Total: %d", len(meta.Partitions)), total, " ", " ", " ", " ")
147 | table.SetCaption(kafka.ControllerBrokerLabel + " CONTROLLER NODES")
148 | table.Render()
149 |
150 | if t.loadConfigs {
151 | commands.PrintConfigTable(meta.ConfigEntries)
152 | }
153 |
154 | return nil
155 | }
156 |
157 | func (t *topic) brokersToList(brokers ...*kafka.Broker) string {
158 | if len(brokers) == 1 {
159 | return brokers[0].Host
160 | }
161 | var buf bytes.Buffer
162 | for i, b := range brokers {
163 | buf.WriteString(b.String())
164 | if i < len(brokers)-1 {
165 | buf.WriteString("\n")
166 | }
167 | }
168 | return buf.String()
169 | }
170 |
171 | func (t *topic) brokersToLine(brokers ...*kafka.Broker) string {
172 | result := make([]string, len(brokers))
173 | for i, b := range brokers {
174 | result[i] = b.String()
175 | }
176 | return strings.Join(result, ", ")
177 | }
178 |
--------------------------------------------------------------------------------
/commands/produce/produce.go:
--------------------------------------------------------------------------------
1 | package produce
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "os"
10 | "strings"
11 | "time"
12 |
13 | "gopkg.in/alecthomas/kingpin.v2"
14 |
15 | "github.com/xitonix/trubka/commands"
16 | "github.com/xitonix/trubka/internal"
17 | "github.com/xitonix/trubka/internal/output/format"
18 | "github.com/xitonix/trubka/kafka"
19 | )
20 |
21 | type valueSerializer func(raw string) ([]byte, error)
22 |
23 | // AddCommands adds the produce command to the app.
24 | func AddCommands(app *kingpin.Application, global *commands.GlobalParameters, kafkaParams *commands.KafkaParameters) {
25 | parent := app.Command("produce", "A command to publish messages to kafka.")
26 | addPlainSubCommand(parent, global, kafkaParams)
27 | addProtoSubCommand(parent, global, kafkaParams)
28 | addSchemaSubCommand(parent, global)
29 | }
30 |
31 | func addProducerFlags(cmd *kingpin.CmdClause, sleep *time.Duration, key *string, random *bool, count *uint64) {
32 | cmd.Flag("key", "The partition key of the message. If not set, a random value will be selected.").
33 | Short('k').
34 | StringVar(key)
35 | cmd.Flag("generate-random-data", "Replaces the random generator place holder functions in the content (if any) with random values.").
36 | Short('g').
37 | BoolVar(random)
38 | cmd.Flag("count", "The number of messages to publish. Set to zero to produce indefinitely.").
39 | Default("1").
40 | Short('c').
41 | Uint64Var(count)
42 | cmd.Flag("sleep", "The amount of time to wait before publishing each message to Kafka. Examples 500ms, 1s, 1m or 1h5m.").
43 | HintOptions("500ms", "1s", "1m").
44 | Default("0").
45 | Short('s').
46 | DurationVar(sleep)
47 | }
48 |
49 | func initialiseProducer(kafkaParams *commands.KafkaParameters, verbosity internal.VerbosityLevel) (*kafka.Producer, error) {
50 |
51 | saramaLogWriter := io.Discard
52 | if verbosity >= internal.Chatty {
53 | saramaLogWriter = os.Stdout
54 | }
55 |
56 | brokers := commands.GetBrokers(kafkaParams.Brokers)
57 | producer, err := kafka.NewProducer(
58 | brokers,
59 | kafka.WithClusterVersion(kafkaParams.Version),
60 | kafka.WithTLS(kafkaParams.TLS),
61 | kafka.WithLogWriter(saramaLogWriter),
62 | kafka.WithSASL(kafkaParams.SASLMechanism,
63 | kafkaParams.SASLUsername,
64 | kafkaParams.SASLPassword,
65 | kafkaParams.SASLHandshakeVersion))
66 |
67 | if err != nil {
68 | return nil, err
69 | }
70 | return producer, nil
71 | }
72 |
73 | func produce(ctx context.Context,
74 | kafkaParams *commands.KafkaParameters,
75 | globalParams *commands.GlobalParameters,
76 | topic string,
77 | key, value string,
78 | serialize valueSerializer,
79 | count uint64,
80 | sleep time.Duration) error {
81 | producer, err := initialiseProducer(kafkaParams, globalParams.Verbosity)
82 | if err != nil {
83 | return err
84 | }
85 |
86 | defer func() {
87 | if globalParams.Verbosity >= internal.VeryVerbose {
88 | fmt.Println("Closing the kafka publisher.")
89 | }
90 | err := producer.Close()
91 | if err != nil {
92 | fmt.Println(format.Red("Failed to close the publisher", globalParams.EnableColor))
93 | }
94 | }()
95 |
96 | if globalParams.Verbosity >= internal.Verbose {
97 | msg := "message"
98 | switch {
99 | case count == 0:
100 | msg = "indefinite number of messages"
101 | case count == 1:
102 | msg = "a single message"
103 | case count > 1:
104 | msg = fmt.Sprintf("%d messages", count)
105 | }
106 | fmt.Printf("Publishing %s to Kafka\n", msg)
107 | }
108 |
109 | randomPk := len(key) == 0
110 | counter := uint64(1)
111 | capped := count > 0
112 | mustSleep := sleep > 0 && (count == 0 || count > 1)
113 | for {
114 | select {
115 | case <-ctx.Done():
116 | return nil
117 | default:
118 | if randomPk {
119 | key = fmt.Sprintf("%d%d", time.Now().UnixNano(), counter)
120 | }
121 | vBytes, err := serialize(value)
122 | if err != nil {
123 | return err
124 | }
125 | partition, offset, err := producer.Produce(topic, []byte(key), vBytes)
126 | if err != nil {
127 | return fmt.Errorf("failed to publish to kafka: %w", err)
128 | }
129 | if globalParams.Verbosity >= internal.VeryVerbose {
130 | fmt.Printf("Message has been published to the offset %d of partition %d (PK: %s).\n",
131 | offset,
132 | partition,
133 | key)
134 | }
135 | if capped && counter >= count {
136 | return nil
137 | }
138 | counter++
139 | if mustSleep {
140 | if globalParams.Verbosity >= internal.SuperVerbose {
141 | fmt.Printf("Waiting for %s before producing the next message.\n", sleep)
142 | }
143 | time.Sleep(sleep)
144 | }
145 | }
146 | }
147 | }
148 |
149 | func getValue(flagValue string) (string, error) {
150 | if !internal.IsEmpty(flagValue) {
151 | return flagValue, nil
152 | }
153 | value, err := readFromShellPipe()
154 | if err != nil {
155 | return "", err
156 | }
157 | if internal.IsEmpty(value) {
158 | return "", errors.New("the message content cannot be empty. Either pipe the content in or pass it as the second argument")
159 | }
160 | return value, nil
161 | }
162 |
163 | func readFromShellPipe() (string, error) {
164 | info, err := os.Stdin.Stat()
165 | if err != nil {
166 | return "", fmt.Errorf("failed to read the message content from shell: %w", err)
167 | }
168 |
169 | if info.Mode()&os.ModeCharDevice != 0 || info.Size() <= 0 {
170 | return "", nil
171 | }
172 | reader := bufio.NewReader(os.Stdin)
173 | var output []rune
174 |
175 | for {
176 | input, _, err := reader.ReadRune()
177 | if err != nil && err == io.EOF {
178 | break
179 | }
180 | output = append(output, input)
181 | }
182 | return strings.TrimRight(string(output), "\n"), nil
183 | }
184 |
--------------------------------------------------------------------------------
/application.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "crypto/tls"
5 | "crypto/x509"
6 | "errors"
7 | "fmt"
8 | "os"
9 |
10 | "gopkg.in/alecthomas/kingpin.v2"
11 |
12 | "github.com/xitonix/trubka/commands"
13 | "github.com/xitonix/trubka/commands/consume"
14 | "github.com/xitonix/trubka/commands/create"
15 | "github.com/xitonix/trubka/commands/deletion"
16 | "github.com/xitonix/trubka/commands/describe"
17 | "github.com/xitonix/trubka/commands/list"
18 | "github.com/xitonix/trubka/commands/produce"
19 | "github.com/xitonix/trubka/internal"
20 | "github.com/xitonix/trubka/kafka"
21 | )
22 |
23 | func newApplication() error {
24 | app := kingpin.New("trubka", "A CLI tool for Kafka.").DefaultEnvars()
25 | global := &commands.GlobalParameters{}
26 | bindAppFlags(app, global)
27 | commands.AddVersionCommand(app, version, commit, built, runtimeVer)
28 | kafkaParams := bindKafkaFlags(app)
29 | list.AddCommands(app, global, kafkaParams)
30 | describe.AddCommands(app, global, kafkaParams)
31 | deletion.AddCommands(app, global, kafkaParams)
32 | consume.AddCommands(app, global, kafkaParams)
33 | create.AddCommands(app, global, kafkaParams)
34 | produce.AddCommands(app, global, kafkaParams)
35 | _, err := app.Parse(os.Args[1:])
36 | return err
37 | }
38 |
39 | func bindAppFlags(app *kingpin.Application, global *commands.GlobalParameters) {
40 | app.Flag("colour", "Enables colours in the standard output. To disable, use --no-colour.").
41 | Default("true").
42 | BoolVar(&global.EnableColor)
43 |
44 | app.Flag("color", "Enables colours in the standard output. To disable, use --no-color.").
45 | Default("true").
46 | Hidden().
47 | BoolVar(&global.EnableColor)
48 |
49 | app.PreAction(func(context *kingpin.ParseContext) error {
50 | enabledColor = global.EnableColor
51 | return nil
52 | })
53 |
54 | var verbosity int
55 | app.Flag("verbose", "The verbosity level of Trubka.").
56 | Short('v').
57 | NoEnvar().
58 | PreAction(func(context *kingpin.ParseContext) error {
59 | global.Verbosity = internal.ToVerbosityLevel(verbosity)
60 | return nil
61 | }).
62 | CounterVar(&verbosity)
63 | }
64 |
65 | func bindKafkaFlags(app *kingpin.Application) *commands.KafkaParameters {
66 | params := &commands.KafkaParameters{}
67 | app.Flag("brokers", "The comma separated list of Kafka brokers in server:port format.").
68 | Short('b').
69 | StringVar(¶ms.Brokers)
70 | app.Flag("kafka-version", "Kafka cluster version.").
71 | Default(kafka.DefaultClusterVersion).
72 | StringVar(¶ms.Version)
73 |
74 | bindSASLFlags(app, params)
75 |
76 | tlsParams := bindTLSFlags(app)
77 | app.PreAction(func(ctx *kingpin.ParseContext) error {
78 | if !tlsParams.Enabled {
79 | return nil
80 | }
81 | tlsConfig, err := configureTLS(tlsParams)
82 | if err != nil {
83 | return err
84 | }
85 | params.TLS = tlsConfig
86 | return nil
87 | })
88 | return params
89 | }
90 |
91 | func bindTLSFlags(app *kingpin.Application) *commands.TLSParameters {
92 | t := &commands.TLSParameters{}
93 | app.Flag("tls", "Enables TLS (Unverified by default). Mutual authentication can also be enabled by providing client key and certificate.").
94 | BoolVar(&t.Enabled)
95 | app.Flag("ca-cert", `Trusted root certificates for verifying the server. If not set, Trubka will skip server certificate and domain verification.`).
96 | ExistingFileVar(&t.CACert)
97 | app.Flag("client-cert", `Client certification file to enable mutual TLS authentication. Client key must also be provided.`).
98 | ExistingFileVar(&t.ClientCert)
99 | app.Flag("client-key", `Client private key file to enable mutual TLS authentication. Client certificate must also be provided.`).
100 | ExistingFileVar(&t.ClientKey)
101 | return t
102 | }
103 |
104 | func bindSASLFlags(app *kingpin.Application, params *commands.KafkaParameters) {
105 | app.Flag("sasl-mechanism", "SASL authentication mechanism.").
106 | Default(kafka.SASLMechanismNone).
107 | EnumVar(¶ms.SASLMechanism,
108 | kafka.SASLMechanismNone,
109 | kafka.SASLMechanismPlain,
110 | kafka.SASLMechanismSCRAM256,
111 | kafka.SASLMechanismSCRAM512)
112 | app.Flag("sasl-username", "SASL authentication username. Will be ignored if --sasl-mechanism is set to none.").
113 | StringVar(¶ms.SASLUsername)
114 | app.Flag("sasl-password", "SASL authentication password. Will be ignored if --sasl-mechanism is set to none.").
115 | StringVar(¶ms.SASLPassword)
116 | app.Flag("sasl-version", "SASL handshake version. Will be ignored if --sasl-mechanism is set to none.").
117 | Default(string(kafka.SASLHandshakeV1)).
118 | EnumVar(¶ms.SASLHandshakeVersion, string(kafka.SASLHandshakeV0), string(kafka.SASLHandshakeV1))
119 | }
120 |
121 | func configureTLS(params *commands.TLSParameters) (*tls.Config, error) {
122 | tlsConf := tls.Config{}
123 |
124 | // Mutual authentication is enabled. Both client key and certificate are needed.
125 | if !internal.IsEmpty(params.ClientCert) {
126 | if internal.IsEmpty(params.ClientKey) {
127 | return nil, errors.New("TLS client key is missing. Mutual authentication cannot be used")
128 | }
129 | certificate, err := tls.LoadX509KeyPair(params.ClientCert, params.ClientKey)
130 | if err != nil {
131 | return nil, fmt.Errorf("failed to load the client TLS key pair: %w", err)
132 | }
133 | tlsConf.Certificates = []tls.Certificate{certificate}
134 | }
135 |
136 | if internal.IsEmpty(params.CACert) {
137 | // Server cert verification will be disabled.
138 | // Only standard trusted certificates are used to verify the server certs.
139 | tlsConf.InsecureSkipVerify = true
140 | return &tlsConf, nil
141 | }
142 | certPool := x509.NewCertPool()
143 | ca, err := os.ReadFile(params.CACert)
144 | if err != nil {
145 | return nil, fmt.Errorf("failed to read the CA certificate: %w", err)
146 | }
147 |
148 | if ok := certPool.AppendCertsFromPEM(ca); !ok {
149 | return nil, errors.New("failed to append the CA certificate to the pool")
150 | }
151 |
152 | tlsConf.RootCAs = certPool
153 |
154 | return &tlsConf, nil
155 | }
156 |
--------------------------------------------------------------------------------