├── .codecov.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── codecov.yaml │ └── go.yaml ├── .gitignore ├── .golangci.yml ├── LICENSE ├── Makefile ├── README.md ├── TESTING.md ├── client.go ├── client_int_test.go ├── client_options.go ├── client_test.go ├── commands.go ├── conn.go ├── consumer.go ├── consumer_multitopic.go ├── consumer_multitopic_test.go ├── consumer_registry.go ├── consumer_test.go ├── dialer.go ├── go.mod ├── go.sum ├── helper.go ├── logger.go ├── logger_test.go ├── message.go ├── message_test.go ├── messaging_test.go ├── producer.go ├── producer_registry.go ├── producer_test.go ├── proto ├── PulsarApi.pb.go ├── PulsarApi.proto └── generate.go ├── requests.go ├── sequencer.go ├── topic.go └── topic_test.go /.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: 25% 6 | threshold: 5% 7 | patch: 8 | default: 9 | informational: true 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Screenshots** 20 | If applicable, add screenshots to help explain your problem. 21 | 22 | **Software:** 23 | - Pulsar Server Version: 24 | - Pulsar-Go Version: 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yaml: -------------------------------------------------------------------------------- 1 | name: codecov 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: 9 | - opened 10 | - reopened 11 | - synchronize 12 | - ready_for_review 13 | 14 | jobs: 15 | codecov: 16 | timeout-minutes: 15 17 | 18 | services: 19 | pulsar: 20 | image: apachepulsar/pulsar-standalone:2.8.4 21 | ports: 22 | - 6650:6650 23 | - 8080:8080 24 | 25 | name: Coverage 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Set up Go 1.x 29 | uses: actions/setup-go@v4 30 | with: 31 | go-version: "1.24" 32 | id: go 33 | 34 | - name: Check out code into the Go module directory 35 | uses: actions/checkout@v3 36 | with: 37 | fetch-depth: 0 38 | 39 | - name: Get dependencies 40 | run: go version && go mod download 41 | 42 | # sleep to make sure that pulsar container is ready 43 | - name: Run tests with coverage 44 | run: sleep 20 && make test-integration-coverage 45 | 46 | - name: Upload coverage to Codecov 47 | uses: codecov/codecov-action@v3 48 | with: 49 | token: ${{ secrets.CODECOV_TOKEN }} 50 | file: ./.testCoverage 51 | -------------------------------------------------------------------------------- /.github/workflows/go.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | build: 11 | if: "!contains(github.event.commits[0].message, '[skip ci]')" 12 | timeout-minutes: 15 13 | 14 | name: Build 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | go: ['1.24'] 19 | 20 | steps: 21 | - name: Set up Go 1.x 22 | uses: actions/setup-go@v4 23 | with: 24 | go-version: ${{ matrix.go }} 25 | id: go 26 | 27 | - name: Check out code into the Go module directory 28 | uses: actions/checkout@v3 29 | 30 | - name: Install linters 31 | run: make install-linters 32 | 33 | - name: Get dependencies 34 | run: go mod download 35 | 36 | - name: Run tests 37 | run: make test 38 | 39 | - name: Run linter 40 | run: make lint 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.exe 2 | .idea 3 | .vscode 4 | *.iml 5 | *.local 6 | /*.log 7 | *.out 8 | *.prof 9 | *.test 10 | .DS_Store 11 | *.dmp 12 | *.db 13 | 14 | .bench* 15 | .testCoverage 16 | 17 | data 18 | docker-compose.yml 19 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 5m 3 | modules-download-mode: readonly 4 | build-tags: 5 | - integration 6 | 7 | linters: 8 | enable: 9 | - asasalint # check for pass []any as any in variadic func(...any) 10 | - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers 11 | - bidichk # Checks for dangerous unicode character sequences 12 | - bodyclose # checks whether HTTP response body is closed successfully 13 | - contextcheck # check the function whether use a non-inherited context 14 | - cyclop # checks function and package cyclomatic complexity 15 | - decorder # check declaration order and count of types, constants, variables and functions 16 | - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) 17 | - durationcheck # check for two durations multiplied together 18 | - err113 # Golang linter to check the errors handling expressions 19 | - errcheck # checking for unchecked errors 20 | - errname # Checks that errors are prefixed with the `Err` and error types are suffixed with the `Error` 21 | - errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 22 | - gci # controls golang package import order and makes it always deterministic 23 | - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid 24 | - gocognit # Computes and checks the cognitive complexity of functions 25 | - goconst # Finds repeated strings that could be replaced by a constant 26 | - gocritic # Provides diagnostics that check for bugs, performance and style issues 27 | - gocyclo # Computes and checks the cyclomatic complexity of functions 28 | - godot # Check if comments end in a period 29 | - gofmt # checks whether code was gofmt-ed 30 | - goimports # Check import statements are formatted according to the 'goimport' command 31 | - gosimple # Linter for Go source code that specializes in simplifying a code 32 | - govet # reports suspicious constructs, such as Printf calls with wrong arguments 33 | - grouper # An analyzer to analyze expression groups 34 | - ineffassign # Detects when assignments to existing variables are not used 35 | - ireturn # accept Interfaces, Return Concrete Types 36 | - maintidx # measures the maintainability index of each function 37 | - makezero # Finds slice declarations with non-zero initial length 38 | - mirror # reports wrong mirror patterns of bytes/strings usage 39 | - misspell # Finds commonly misspelled English words in comments 40 | - nakedret # Finds naked returns in functions 41 | - nilerr # Finds the code that returns nil even if it checks that the error is not nil 42 | - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value 43 | - noctx # Finds sending http request without context.Context 44 | - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative 45 | - prealloc # Finds slice declarations that could potentially be preallocated 46 | - predeclared # find code that shadows one of Go's predeclared identifiers 47 | - reassign # Checks that package variables are not reassigned 48 | - revive # drop-in replacement of golint 49 | - staticcheck # drop-in replacement of go vet 50 | - stylecheck # Stylecheck is a replacement for golint 51 | - testifylint # Checks usage of github.com/stretchr/testify 52 | - thelper # checks the consistency of test helpers 53 | - tparallel # detects inappropriate usage of t.Parallel() 54 | - typecheck # parses and type-checks Go code 55 | - unconvert # Remove unnecessary type conversions 56 | - unparam # Reports unused function parameters 57 | - unused # Checks Go code for unused constants, variables, functions and types 58 | - usestdlibvars # detect the possibility to use variables/constants from the Go standard library 59 | - usetesting # Reports uses of functions with replacement inside the testing package 60 | - wastedassign # finds wasted assignment statements 61 | - whitespace # detects leading and trailing whitespace 62 | 63 | # TODO - containedctx # detects struct contained context.Context field 64 | # TODO - funlen # Tool for detection of long functions 65 | # TODO - nestif # Reports deeply nested if statements 66 | # TODO - wrapcheck # Checks that errors returned from external packages are wrapped 67 | 68 | issues: 69 | exclude-rules: 70 | - linters: 71 | - err113 72 | text: "do not define dynamic errors" 73 | 74 | linters-settings: 75 | cyclop: 76 | max-complexity: 13 77 | revive: 78 | rules: 79 | - name: var-naming 80 | disabled: true 81 | stylecheck: 82 | # should not use underscores in package names 83 | checks: [ "all", "-ST1003" ] 84 | whitespace: 85 | multi-if: true # Enforces newlines (or comments) after every multi-line if statement 86 | multi-func: true # Enforces newlines (or comments) after every multi-line function signature 87 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Cornel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOLANGCI_VERSION = v1.64.6 2 | 3 | help: ## show help, shown by default if no target is specified 4 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 5 | 6 | lint: ## run code linters 7 | golangci-lint run 8 | 9 | test: ## run tests 10 | go test -race ./... 11 | 12 | test-integration: ## run integration tests 13 | go test -tags integration -race -p 1 ./... 14 | 15 | test-integration-coverage: ## run integration tests with coverage 16 | go test -tags integration -race -p 1 ./... -coverprofile .testCoverage -covermode=atomic -coverpkg=./... 17 | go tool cover -func .testCoverage | grep total | awk '{print "Total coverage: "$$3}' 18 | 19 | test-coverage: ## run unit tests and create test coverage 20 | CGO_ENABLED=0 go test ./... -coverprofile .testCoverage -covermode=atomic -coverpkg=./... 21 | go tool cover -func .testCoverage | grep total | awk '{print "Total coverage: "$$3}' 22 | 23 | test-coverage-web: test-coverage ## run unit tests and show test coverage in browser 24 | go tool cover -func .testCoverage | grep total | awk '{print "Total coverage: "$$3}' 25 | go tool cover -html=.testCoverage 26 | 27 | install-linters: ## install all used linters 28 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_VERSION} 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apache Pulsar Golang Client Library 2 | 3 | [![Build status](https://github.com/cornelk/pulsar-go/actions/workflows/go.yaml/badge.svg?branch=main)](https://github.com/cornelk/pulsar-go/actions) 4 | [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/cornelk/pulsar-go) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/cornelk/pulsar-go)](https://goreportcard.com/report/github.com/cornelk/pulsar-go) 6 | [![codecov](https://codecov.io/gh/cornelk/pulsar-go/branch/main/graph/badge.svg?token=NS5UY28V3A)](https://codecov.io/gh/cornelk/pulsar-go) 7 | 8 | 9 | An alternative Golang client library for the [Apache Pulsar](https://pulsar.apache.org/) project. 10 | 11 | ## Benefits over other Pulsar Go libraries 12 | 13 | * Faster message processing 14 | * Pure Golang, works without use of Cgo 15 | * Idiomatic and cleaner Go 16 | * Better stability 17 | * Allows specifying of initial positions for topic pattern subscriptions 18 | * Higher test coverage 19 | * Pluggable logger interface 20 | 21 | ## Status 22 | 23 | The library is in an early state of development, the API is not stable yet. 24 | Any help or input is welcome. 25 | 26 | ## Alternative libraries 27 | 28 | * [apache/pulsar-client-go](https://github.com/apache/pulsar-client-go) 29 | the official Golang Client that inspired the creation of this alternative Client. 30 | 31 | * [apache/pulsar/pulsar-client-go](https://github.com/apache/pulsar-client-go) 32 | Cgo based Client library that will be deprecated. 33 | 34 | * [Comcast/pulsar-client-go](https://github.com/Comcast/pulsar-client-go) 35 | an older Client that appears to not be maintained anymore and lacking features like Batching. 36 | -------------------------------------------------------------------------------- /TESTING.md: -------------------------------------------------------------------------------- 1 | ## How to run the integration tests 2 | 3 | Set up a `docker-compose.yml` with following content: 4 | 5 | ```yaml 6 | version: '3.5' 7 | 8 | services: 9 | pulsar: 10 | image: apachepulsar/pulsar:2.5.2 11 | container_name: pulsar_test 12 | ports: 13 | - "6650:6650" 14 | - "9080:8080" 15 | volumes: 16 | - ./data/pulsar:/pulsar/data 17 | command: "bin/pulsar standalone" 18 | ``` 19 | 20 | Start the container: 21 | 22 | `docker-compose up -d` 23 | 24 | Run the integration test: 25 | 26 | `go test -tags integration -v .` 27 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | // Package pulsar implements a Apache Pulsar Client. 2 | package pulsar 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "fmt" 8 | "net/url" 9 | "reflect" 10 | "regexp" 11 | "strings" 12 | "sync" 13 | "sync/atomic" 14 | "time" 15 | 16 | pb "github.com/cornelk/pulsar-go/proto" 17 | "google.golang.org/protobuf/proto" 18 | ) 19 | 20 | // Client constants that get sent to Pulsar. 21 | const ( 22 | libraryVersion = "0.01" // TODO use git version tag 23 | protocolVersion = int32(pb.ProtocolVersion_v15) 24 | ) 25 | 26 | // Client implements a Pulsar client. 27 | type Client struct { 28 | log Logger 29 | host string 30 | cmds commands 31 | dialer dialer 32 | 33 | cancel context.CancelFunc 34 | ctx context.Context // passed to consumers/producers 35 | closing atomic.Bool 36 | 37 | conn *conn 38 | connMutex sync.RWMutex // protects conn init/close access 39 | 40 | req *requests 41 | 42 | consumers *consumerRegistry 43 | producers *producerRegistry 44 | 45 | connected chan struct{} 46 | stopped chan struct{} 47 | } 48 | 49 | // NewClient creates a new Pulsar client. 50 | func NewClient(serverURL string, opts ...ClientOption) (*Client, error) { 51 | conf := applyOptions(opts) 52 | 53 | if !strings.Contains(serverURL, "://") { 54 | serverURL = "pulsar://" + serverURL 55 | } 56 | 57 | u, err := url.Parse(serverURL) 58 | if err != nil { 59 | return nil, fmt.Errorf("parsing URL: %w", err) 60 | } 61 | 62 | if u.Port() == "" { 63 | // Use default port. 64 | u.Host += ":6650" 65 | } 66 | 67 | ctx, cancel := context.WithCancel(context.Background()) 68 | c := &Client{ 69 | log: conf.Logger, 70 | host: u.Host, 71 | dialer: conf.dialer, 72 | 73 | cancel: cancel, 74 | ctx: ctx, 75 | 76 | req: newRequests(), 77 | 78 | consumers: newConsumerRegistry(), 79 | producers: newProducerRegistry(), 80 | 81 | connected: make(chan struct{}, 1), 82 | stopped: make(chan struct{}, 1), 83 | } 84 | 85 | if c.log == nil || (reflect.ValueOf(c.log).Kind() == reflect.Ptr && reflect.ValueOf(c.log).IsNil()) { 86 | c.log = newLogger() 87 | } 88 | c.cmds = c.newCommandMap() 89 | 90 | return c, nil 91 | } 92 | 93 | // Dial connects to the Pulsar server. 94 | // This needs to be called before a Consumer or Producer can be created. 95 | func (c *Client) Dial(ctx context.Context) error { 96 | conn, err := c.dialer(ctx, c.log, c.host) 97 | if err != nil { 98 | c.log.Errorf("Dialing failed: %s", err.Error()) 99 | return err 100 | } 101 | 102 | c.connMutex.Lock() 103 | c.conn = conn 104 | c.connMutex.Unlock() 105 | 106 | if err = sendConnectCommand(conn); err != nil { 107 | return err 108 | } 109 | 110 | go c.readCommands() 111 | 112 | select { 113 | case <-ctx.Done(): 114 | return ctx.Err() 115 | case <-c.connected: 116 | return nil 117 | } 118 | } 119 | 120 | // NewProducer creates a new Producer, returning after the connection 121 | // has been made. 122 | func (c *Client) NewProducer(ctx context.Context, config ProducerConfig) (*Producer, error) { 123 | if c.closing.Load() { 124 | return nil, ErrClientClosing 125 | } 126 | 127 | // TODO check connected state 128 | 129 | b := c.newBrokerConnection() 130 | 131 | id := c.producers.newID() 132 | prod, err := newProducer(c, b, config, id) 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | c.producers.add(id, prod) 138 | c.topicLookup(prod.topic.CompleteName, prod.topicReady) 139 | 140 | select { 141 | case <-ctx.Done(): 142 | return nil, ctx.Err() 143 | case <-prod.connected: 144 | return prod, nil 145 | } 146 | } 147 | 148 | func (c *Client) createNewConsumer(config ConsumerConfig) (*consumer, error) { 149 | b := c.newBrokerConnection() 150 | 151 | id := c.consumers.newID() 152 | cons, err := newConsumer(c, b, config, id) 153 | if err != nil { 154 | return nil, err 155 | } 156 | 157 | c.consumers.add(id, cons) 158 | return cons, nil 159 | } 160 | 161 | // NewConsumer creates a new Consumer, returning after the connection 162 | // has been made. 163 | // nolint: ireturn 164 | func (c *Client) NewConsumer(ctx context.Context, config ConsumerConfig) (Consumer, error) { 165 | if err := config.Validate(); err != nil { 166 | return nil, fmt.Errorf("validating config: %w", err) 167 | } 168 | if c.closing.Load() { 169 | return nil, ErrClientClosing 170 | } 171 | 172 | // TODO check connected state 173 | 174 | if config.TopicPattern != "" { 175 | if config.TopicPatternDiscoveryInterval <= 0 { 176 | config.TopicPatternDiscoveryInterval = 30000 177 | } 178 | 179 | b := c.newBrokerConnection() 180 | multi, err := newMultiTopicConsumer(c, b, config) 181 | if err != nil { 182 | return nil, err 183 | } 184 | 185 | go c.nameSpaceTopicLookup(multi, config) 186 | return multi, nil 187 | } 188 | 189 | cons, err := c.createNewConsumer(config) 190 | if err != nil { 191 | return nil, err 192 | } 193 | c.topicLookup(cons.topic, cons.topicLookupFinished) 194 | 195 | select { 196 | case <-ctx.Done(): 197 | return nil, ctx.Err() 198 | case err = <-cons.connected: 199 | return cons, err 200 | } 201 | } 202 | 203 | func (c *Client) newBrokerConnection() brokerConnection { 204 | return brokerConnection{ 205 | ctx: c.ctx, 206 | log: c.log, 207 | conn: c.conn, 208 | req: c.req, 209 | } 210 | } 211 | 212 | func (c *Client) topicLookup(topic string, topicReady requestCallback) { 213 | reqID := c.req.newID() 214 | cmd := newPartitionedMetadataCommand(reqID, topic) 215 | respHandler := func(resp *command) error { 216 | if resp.err != nil { 217 | return resp.err 218 | } 219 | 220 | partitions := resp.PartitionMetadataResponse.GetPartitions() 221 | if partitions != 0 { 222 | return errors.New("partitioned topics are not supported") // TODO support 223 | } 224 | 225 | return nil 226 | } 227 | 228 | if err := c.conn.SendCallbackCommand(c.req, reqID, cmd, respHandler); err != nil { 229 | c.log.Errorf("Getting partitioned meta data failed: %s", err.Error()) 230 | return 231 | } 232 | 233 | reqID = c.req.newID() 234 | c.req.addCallbackCustom(reqID, topicReady, topic) 235 | if err := c.sendLookupTopicCommand(topic, reqID); err != nil { 236 | c.log.Errorf("Sending lookup topic command failed: %s", err.Error()) 237 | return 238 | } 239 | } 240 | 241 | func (c *Client) nameSpaceTopicLookup(multi *multiTopicConsumer, config ConsumerConfig) { 242 | topic, err := NewTopic(config.TopicPattern) 243 | if err != nil { 244 | c.log.Errorf("Processing topic name failed: %s", err.Error()) 245 | return 246 | } 247 | pattern, err := regexp.Compile(topic.CompleteName) 248 | if err != nil { 249 | c.log.Errorf("Compiling topic regexp pattern failed: %s", err.Error()) 250 | return 251 | } 252 | 253 | config.MessageChannel = multi.incomingMessages 254 | config.TopicPattern = "" 255 | knownTopics := map[string]struct{}{} 256 | 257 | tick := time.NewTicker(time.Duration(config.TopicPatternDiscoveryInterval) * time.Millisecond) 258 | defer tick.Stop() 259 | 260 | for { 261 | var newTopics []string 262 | 263 | reqID := c.req.newID() 264 | cmd := newGetTopicsOfNamespaceCommand(reqID, topic.Namespace) 265 | 266 | respHandler := func(resp *command) error { 267 | if resp.err != nil { 268 | return resp.err 269 | } 270 | 271 | for _, name := range resp.GetTopicsOfNamespaceResponse.Topics { 272 | t, err := NewTopic(name) 273 | if err != nil { 274 | c.log.Errorf("Processing topic name failed: %s", err.Error()) 275 | continue 276 | } 277 | 278 | if !pattern.MatchString(t.CompleteName) { 279 | continue 280 | } 281 | 282 | if _, ok := knownTopics[t.CompleteName]; !ok { 283 | newTopics = append(newTopics, t.CompleteName) 284 | knownTopics[t.CompleteName] = struct{}{} 285 | } 286 | } 287 | 288 | return nil 289 | } 290 | 291 | // TODO handle deleted topics 292 | 293 | if err = c.conn.SendCallbackCommand(c.req, reqID, cmd, respHandler); err != nil { 294 | c.log.Errorf("Getting topics of namespace failed: %s", err.Error()) 295 | return 296 | } 297 | 298 | if err = c.subscribeToTopics(multi, config, newTopics); err != nil { 299 | return 300 | } 301 | 302 | select { 303 | case <-tick.C: 304 | case <-c.ctx.Done(): 305 | return 306 | } 307 | } 308 | } 309 | 310 | func (c *Client) subscribeToTopics(multi *multiTopicConsumer, config ConsumerConfig, topics []string) error { 311 | var err error 312 | for _, topic := range topics { 313 | if config.InitialPositionCallback != nil { 314 | config.InitialPosition, config.StartMessageID, err = config.InitialPositionCallback(topic) 315 | if err != nil { 316 | c.log.Errorf("Initial position callback failed: %s", err.Error()) 317 | continue 318 | } 319 | } 320 | 321 | config.Topic = topic 322 | cons, err := c.createNewConsumer(config) 323 | if err != nil { 324 | c.log.Errorf("Creating consumer failed: %s", err.Error()) 325 | return err 326 | } 327 | cons.multi = multi 328 | multi.addConsumer(cons.consumerID, cons) 329 | c.topicLookup(cons.topic, cons.topicLookupFinished) 330 | } 331 | return nil 332 | } 333 | 334 | // CloseConsumer closes a specific consumer. 335 | func (c *Client) CloseConsumer(consumerID uint64) error { 336 | cons, ok := c.consumers.getAndDelete(consumerID) 337 | if !ok { 338 | return fmt.Errorf("consumer %d not found", consumerID) 339 | } 340 | 341 | var err error 342 | cons.stateMu.Lock() 343 | if cons.state == consumerReady || cons.state == consumerSubscribed { 344 | cons.state = consumerClosing 345 | cons.stateMu.Unlock() 346 | 347 | reqID := c.req.newID() 348 | cmd := newCloseConsumerCommand(consumerID, reqID) 349 | err = c.conn.SendCallbackCommand(c.req, reqID, cmd) 350 | 351 | cons.stateMu.Lock() 352 | cons.state = consumerClosed 353 | } 354 | cons.stateMu.Unlock() 355 | 356 | return err 357 | } 358 | 359 | // CloseProducer closes a specific producer. 360 | func (c *Client) CloseProducer(producerID uint64) error { 361 | _, ok := c.producers.getAndDelete(producerID) 362 | if !ok { 363 | return fmt.Errorf("producer %d not found", producerID) 364 | } 365 | 366 | reqID := c.req.newID() 367 | cmd := newCloseProducerCommand(producerID, reqID) 368 | return c.conn.SendCallbackCommand(c.req, reqID, cmd) 369 | } 370 | 371 | // Close closes all consumers, producers and the client connection. 372 | func (c *Client) Close() error { 373 | if !c.closing.CompareAndSwap(false, true) { 374 | return nil 375 | } 376 | 377 | c.cancel() 378 | 379 | c.connMutex.Lock() 380 | if c.conn == nil { 381 | c.connMutex.Unlock() 382 | return nil 383 | } 384 | c.connMutex.Unlock() 385 | 386 | for _, cons := range c.consumers.all() { 387 | _ = c.CloseConsumer(cons.consumerID) 388 | } 389 | 390 | for _, prods := range c.producers.all() { 391 | _ = c.CloseProducer(prods.producerID) 392 | } 393 | 394 | err := c.conn.close() 395 | 396 | <-c.stopped 397 | 398 | return err 399 | } 400 | 401 | func (c *Client) sendLookupTopicCommand(topic string, reqID uint64) error { 402 | base := &pb.BaseCommand{ 403 | Type: pb.BaseCommand_LOOKUP.Enum(), 404 | LookupTopic: &pb.CommandLookupTopic{ 405 | Topic: proto.String(topic), 406 | RequestId: proto.Uint64(reqID), 407 | Authoritative: proto.Bool(false), 408 | }, 409 | } 410 | return c.conn.WriteCommand(base, nil) 411 | } 412 | 413 | func (c *Client) readCommands() { 414 | defer close(c.stopped) 415 | 416 | for { 417 | cmd, err := c.conn.readCommand() 418 | if err != nil { 419 | if errors.Is(err, ErrNetClosing) { 420 | return 421 | } 422 | 423 | c.log.Errorf("Reading command failed: %s", err.Error()) 424 | return 425 | } 426 | 427 | if err = c.processReceivedCommand(cmd); err != nil { 428 | c.log.Errorf("Processing received command %+v failed: %s", cmd, err.Error()) 429 | } 430 | } 431 | } 432 | 433 | func (c *Client) processReceivedCommand(cmd *command) error { 434 | c.log.Debugf("Received command: %+v", cmd) 435 | 436 | handler, ok := c.cmds[*cmd.Type] 437 | if !ok { 438 | return fmt.Errorf("unsupported command %q", cmd.GetType()) 439 | } 440 | 441 | if handler == nil { 442 | return nil 443 | } 444 | 445 | return handler(cmd) 446 | } 447 | 448 | func newPartitionedMetadataCommand(reqID uint64, topic string) *pb.BaseCommand { 449 | return &pb.BaseCommand{ 450 | Type: pb.BaseCommand_PARTITIONED_METADATA.Enum(), 451 | PartitionMetadata: &pb.CommandPartitionedTopicMetadata{ 452 | Topic: proto.String(topic), 453 | RequestId: proto.Uint64(reqID), 454 | }, 455 | } 456 | } 457 | 458 | // Topics returns the topics of a namespace. 459 | // Defaults to DefaultNamespace if no namespace is given. 460 | func (c *Client) Topics(namespace string) ([]*Topic, error) { 461 | if namespace == "" { 462 | namespace = DefaultNamespace 463 | } 464 | 465 | reqID := c.req.newID() 466 | cmd := newGetTopicsOfNamespaceCommand(reqID, namespace) 467 | 468 | var topics []*Topic 469 | respHandler := func(resp *command) error { 470 | if resp.err != nil { 471 | return resp.err 472 | } 473 | 474 | for _, name := range resp.GetTopicsOfNamespaceResponse.Topics { 475 | t, err := NewTopic(name) 476 | if err != nil { 477 | return err 478 | } 479 | topics = append(topics, t) 480 | } 481 | return nil 482 | } 483 | 484 | if err := c.conn.SendCallbackCommand(c.req, reqID, cmd, respHandler); err != nil { 485 | return nil, err 486 | } 487 | 488 | return topics, nil 489 | } 490 | -------------------------------------------------------------------------------- /client_int_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "context" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestNewClient(t *testing.T) { 14 | cases := map[string]struct { 15 | serverURL string 16 | expectedURL string 17 | }{ 18 | "full": { 19 | serverURL: "pulsar://example.com:12345", 20 | expectedURL: "example.com:12345", 21 | }, 22 | "host and port": { 23 | serverURL: "example.com:12345", 24 | expectedURL: "example.com:12345", 25 | }, 26 | "host only": { 27 | serverURL: "example.com", 28 | expectedURL: "example.com:6650", 29 | }, 30 | "port only": { 31 | // valid because this will connect to the local host 32 | serverURL: ":12345", 33 | expectedURL: ":12345", 34 | }, 35 | "empty": { 36 | serverURL: "", 37 | expectedURL: ":6650", 38 | }, 39 | } 40 | 41 | for name, c := range cases { 42 | t.Run(name, func(t *testing.T) { 43 | client, err := NewClient(c.serverURL) 44 | require.NoError(t, err) 45 | assert.Equal(t, c.expectedURL, client.host) 46 | }) 47 | } 48 | } 49 | 50 | func TestClientTopics(t *testing.T) { 51 | client := setup(t) 52 | defer func() { 53 | assert.NoError(t, client.Close()) 54 | }() 55 | 56 | topic := randomTopicName() 57 | prodConf := ProducerConfig{ 58 | Topic: topic, 59 | Name: "test-producer", 60 | } 61 | 62 | ctx := context.Background() 63 | _, err := client.NewProducer(ctx, prodConf) 64 | require.NoError(t, err) 65 | 66 | topics, err := client.Topics(DefaultNamespace) 67 | require.NoError(t, err) 68 | 69 | var found bool 70 | for _, t := range topics { 71 | if t.LocalName == topic { 72 | found = true 73 | break 74 | } 75 | } 76 | 77 | assert.True(t, found, "topic not found in list") 78 | } 79 | -------------------------------------------------------------------------------- /client_options.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | // ClientOption ... 4 | type ClientOption func(*clientConfig) 5 | 6 | type clientConfig struct { 7 | Logger Logger 8 | dialer dialer 9 | } 10 | 11 | // WithLogger sets a custom logger. 12 | func WithLogger(logger Logger) ClientOption { 13 | return func(conf *clientConfig) { 14 | conf.Logger = logger 15 | } 16 | } 17 | 18 | // withDialer sets a custom dialer. 19 | // Used for testing. 20 | func withDialer(dialer dialer) ClientOption { 21 | return func(conf *clientConfig) { 22 | conf.dialer = dialer 23 | } 24 | } 25 | 26 | func applyOptions(opts []ClientOption) clientConfig { 27 | conf := clientConfig{ 28 | dialer: defaultDialer, 29 | } 30 | for _, opt := range opts { 31 | opt(&conf) 32 | } 33 | return conf 34 | } 35 | -------------------------------------------------------------------------------- /client_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestClientTopicsCommunication(t *testing.T) { 12 | client, err := NewClient("pulsar://localhost:6650", 13 | WithLogger(newTestLogger(t)), 14 | withDialer(defaultDialer)) 15 | require.NoError(t, err) 16 | require.NotNil(t, client) 17 | 18 | /* 19 | TODO simulate communication 20 | === RUN TestClientTopics 21 | log.go:184: TestClientTopics 2021/10/01 01:08:52 conn.go:72: *** Sending command: type:CONNECT connect: 22 | log.go:184: TestClientTopics 2021/10/01 01:08:52 client.go:407: *** Received command: type:CONNECTED connected: 23 | log.go:184: TestClientTopics 2021/10/01 01:08:52 conn.go:72: *** Sending command: type:PARTITIONED_METADATA partitionMetadata: 24 | log.go:184: TestClientTopics 2021/10/01 01:08:52 client.go:407: *** Received command: type:PARTITIONED_METADATA_RESPONSE partitionMetadataResponse: 25 | log.go:184: TestClientTopics 2021/10/01 01:08:52 conn.go:72: *** Sending command: type:LOOKUP lookupTopic: 26 | log.go:184: TestClientTopics 2021/10/01 01:08:52 client.go:407: *** Received command: type:LOOKUP_RESPONSE lookupTopicResponse: 27 | log.go:184: TestClientTopics 2021/10/01 01:08:52 client.go:401: Processing received command type:LOOKUP_RESPONSE lookupTopicResponse: failed: %!w(*errors.errorString=&{topic lookup response not supported: Failed}) 28 | log.go:184: TestClientTopics 2021/10/01 01:09:22 client.go:407: *** Received command: type:PING ping:<> 29 | log.go:184: TestClientTopics 2021/10/01 01:09:22 conn.go:72: *** Sending command: type:PONG pong:<> 30 | log.go:184: TestClientTopics 2021/10/01 01:09:52 client.go:407: *** Received command: type:PING ping:<> 31 | log.go:184: TestClientTopics 2021/10/01 01:09:52 conn.go:72: *** Sending command: type:PONG pong:<> 32 | log.go:184: TestClientTopics 2021/10/01 01:10:22 client.go:407: *** Received command: type:PING ping:<> 33 | */ 34 | } 35 | -------------------------------------------------------------------------------- /commands.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/url" 7 | 8 | pb "github.com/cornelk/pulsar-go/proto" 9 | "google.golang.org/protobuf/proto" 10 | ) 11 | 12 | type command struct { 13 | *pb.BaseCommand 14 | payloadSize uint32 // may be zero 15 | err error // TODO: currently errors are logged multiple times (in callbacks and on client level) 16 | custom string // TODO: improve name 17 | } 18 | 19 | type ( 20 | commandHandler func(cmd *command) error 21 | commands map[pb.BaseCommand_Type]commandHandler 22 | ) 23 | 24 | // noRequestID is used inside this library to signal that a command has no request ID. 25 | const noRequestID = 0 26 | 27 | // ErrClientClosing is returned when a new consumer or producer should be 28 | // created while the client is closing all connections. 29 | var ErrClientClosing = errors.New("client is closing connections") 30 | 31 | func (c *Client) newCommandMap() commands { 32 | return commands{ 33 | pb.BaseCommand_CONNECTED: c.handleConnectedCommand, 34 | pb.BaseCommand_SEND_RECEIPT: c.handleSendReceiptCommand, 35 | pb.BaseCommand_SEND_ERROR: c.handleSendErrorCommand, 36 | pb.BaseCommand_MESSAGE: c.handleMessage, 37 | pb.BaseCommand_SUCCESS: c.handleSuccessCommand, 38 | pb.BaseCommand_ERROR: c.handleErrorCommand, 39 | pb.BaseCommand_CLOSE_CONSUMER: c.handleCloseConsumerCommand, 40 | pb.BaseCommand_PRODUCER_SUCCESS: c.handleProducerSuccessCommand, 41 | pb.BaseCommand_PING: c.handlePingCommand, 42 | pb.BaseCommand_PARTITIONED_METADATA_RESPONSE: c.handlePartitionedMetadataResponseCommand, 43 | pb.BaseCommand_LOOKUP_RESPONSE: c.handleTopicLookupResponse, 44 | pb.BaseCommand_GET_LAST_MESSAGE_ID_RESPONSE: c.handleLastMessageIDResponse, 45 | pb.BaseCommand_GET_TOPICS_OF_NAMESPACE_RESPONSE: c.handleGetTopicsOfNamespaceResponse, 46 | } 47 | } 48 | 49 | // TODO: write unit test to proof that for every handled command we have a way to determine the request ID. 50 | func (cmd *command) requestID() uint64 { 51 | switch cmd.GetType() { 52 | case pb.BaseCommand_CONNECTED, pb.BaseCommand_MESSAGE, pb.BaseCommand_PING: 53 | return noRequestID 54 | case pb.BaseCommand_SEND_RECEIPT: 55 | return cmd.GetSendReceipt().GetSequenceId() 56 | case pb.BaseCommand_SEND_ERROR: 57 | return cmd.GetSendError().GetSequenceId() 58 | case pb.BaseCommand_SUCCESS: 59 | return cmd.GetSuccess().GetRequestId() 60 | case pb.BaseCommand_ERROR: 61 | return cmd.GetError().GetRequestId() 62 | case pb.BaseCommand_CLOSE_CONSUMER: 63 | return cmd.GetCloseConsumer().GetRequestId() 64 | case pb.BaseCommand_PRODUCER_SUCCESS: 65 | return cmd.GetProducerSuccess().GetRequestId() 66 | case pb.BaseCommand_PARTITIONED_METADATA_RESPONSE: 67 | return cmd.GetPartitionMetadataResponse().GetRequestId() 68 | case pb.BaseCommand_LOOKUP_RESPONSE: 69 | return cmd.GetLookupTopicResponse().GetRequestId() 70 | case pb.BaseCommand_GET_LAST_MESSAGE_ID_RESPONSE: 71 | return cmd.GetGetLastMessageIdResponse().GetRequestId() 72 | case pb.BaseCommand_GET_TOPICS_OF_NAMESPACE_RESPONSE: 73 | return cmd.GetGetTopicsOfNamespaceResponse().GetRequestId() 74 | default: 75 | return noRequestID 76 | } 77 | } 78 | 79 | func (c *Client) handleRequestCallback(cmd *command) error { 80 | reqID := cmd.requestID() 81 | if reqID == noRequestID { 82 | return nil 83 | } 84 | 85 | callback, custom := c.req.remove(reqID) 86 | if callback == nil { 87 | return nil 88 | } 89 | 90 | cmd.custom = custom 91 | return callback(cmd) 92 | } 93 | 94 | func (c *Client) handleConnectedCommand(*command) error { 95 | close(c.connected) 96 | return nil 97 | } 98 | 99 | func (c *Client) handleTopicLookupResponse(cmd *command) error { 100 | resp := cmd.LookupTopicResponse 101 | if r := resp.GetResponse(); r != pb.CommandLookupTopicResponse_Connect { 102 | // TODO support redirect 103 | return fmt.Errorf("topic lookup response not supported: %v", r) 104 | } 105 | 106 | // TODO callback in case of error? 107 | u, err := url.Parse(resp.GetBrokerServiceUrl()) 108 | if err != nil { 109 | return fmt.Errorf("parsing URL: %w", err) 110 | } 111 | 112 | if c.host != u.Host { // is same server that client is already connected to? 113 | _ = u // TODO support 114 | } 115 | 116 | return c.handleRequestCallback(cmd) 117 | } 118 | 119 | func (c *Client) handleMessage(base *command) error { 120 | cmd := base.Message 121 | msgMeta, payload, err := c.conn.readMessageMetaData(base.payloadSize) 122 | if err != nil { 123 | return err 124 | } 125 | if msgMeta.GetCompression() != pb.CompressionType_NONE { 126 | return errors.New("compressed messages not supported") // TODO support 127 | } 128 | 129 | consumerID := cmd.GetConsumerId() 130 | cons, ok := c.consumers.get(consumerID) 131 | if !ok { 132 | if c.closing.Load() { 133 | return nil 134 | } 135 | // a message was received for a consumer that is now closed, 136 | // do not thread it as an error. 137 | c.log.Debugf("Consumer '%d' for message does not exist", consumerID) 138 | return nil 139 | } 140 | 141 | id := cmd.MessageId 142 | permits := uint64(1) 143 | if num := msgMeta.GetNumMessagesInBatch(); num > 0 { 144 | // each batch message counts as 1 permit 145 | permits = uint64(num) 146 | 147 | var msg []byte 148 | for i := int32(0); i < num; i++ { 149 | _, msg, payload, err = c.conn.readBatchedMessage(payload) 150 | if err != nil { 151 | return err 152 | } 153 | m := &Message{ 154 | consumerID: consumerID, 155 | Body: msg, 156 | Topic: cons.topic, 157 | ID: &MessageID{ 158 | LedgerId: id.LedgerId, 159 | EntryId: id.EntryId, 160 | Partition: id.Partition, 161 | BatchIndex: proto.Int32(i), 162 | }, 163 | } 164 | cons.incomingMessages <- m 165 | } 166 | } else { 167 | m := &Message{ 168 | Body: payload, 169 | ID: (*MessageID)(id), 170 | } 171 | cons.incomingMessages <- m 172 | } 173 | 174 | return cons.useMessagePermits(permits) 175 | } 176 | 177 | func (c *Client) handleSendReceiptCommand(base *command) error { 178 | cmd := base.SendReceipt 179 | prod, ok := c.producers.get(cmd.GetProducerId()) 180 | if !ok { 181 | return errors.New("producer not found") 182 | } 183 | 184 | seq := cmd.GetSequenceId() 185 | return prod.processSendResult(seq, cmd.MessageId, nil) 186 | } 187 | 188 | func (c *Client) handleSendErrorCommand(base *command) error { 189 | cmd := base.SendError 190 | prod, ok := c.producers.get(cmd.GetProducerId()) 191 | if !ok { 192 | return errors.New("producer not found") 193 | } 194 | 195 | err := fmt.Errorf("%s: %s", cmd.GetError(), cmd.GetMessage()) 196 | seq := cmd.GetSequenceId() 197 | return prod.processSendResult(seq, nil, err) 198 | } 199 | 200 | func (c *Client) handleSuccessCommand(cmd *command) error { 201 | return c.handleRequestCallback(cmd) 202 | } 203 | 204 | func (c *Client) handleErrorCommand(cmd *command) error { 205 | cmd.err = fmt.Errorf("%s: %s", cmd.Error.GetError(), cmd.Error.GetMessage()) 206 | return c.handleRequestCallback(cmd) 207 | } 208 | 209 | func (c *Client) handleProducerSuccessCommand(cmd *command) error { 210 | return c.handleRequestCallback(cmd) 211 | } 212 | 213 | func (c *Client) handlePingCommand(*command) error { 214 | return sendPongCommand(c.conn) 215 | } 216 | 217 | func (c *Client) handlePartitionedMetadataResponseCommand(cmd *command) error { 218 | return c.handleRequestCallback(cmd) 219 | } 220 | 221 | func (c *Client) handleLastMessageIDResponse(cmd *command) error { 222 | return c.handleRequestCallback(cmd) 223 | } 224 | 225 | func (c *Client) handleGetTopicsOfNamespaceResponse(cmd *command) error { 226 | return c.handleRequestCallback(cmd) 227 | } 228 | 229 | func sendConnectCommand(c clientConn) error { 230 | base := &pb.BaseCommand{ 231 | Type: pb.BaseCommand_CONNECT.Enum(), 232 | Connect: &pb.CommandConnect{ 233 | ClientVersion: proto.String("Pulsar Go " + libraryVersion), 234 | AuthMethodName: proto.String(""), 235 | ProtocolVersion: proto.Int32(protocolVersion), 236 | }, 237 | } 238 | return c.WriteCommand(base, nil) 239 | } 240 | 241 | func sendPongCommand(c clientConn) error { 242 | base := &pb.BaseCommand{ 243 | Type: pb.BaseCommand_PONG.Enum(), 244 | Pong: &pb.CommandPong{}, 245 | } 246 | return c.WriteCommand(base, nil) 247 | } 248 | 249 | func newCloseConsumerCommand(consumerID, requestID uint64) *pb.BaseCommand { 250 | return &pb.BaseCommand{ 251 | Type: pb.BaseCommand_CLOSE_CONSUMER.Enum(), 252 | CloseConsumer: &pb.CommandCloseConsumer{ 253 | ConsumerId: proto.Uint64(consumerID), 254 | RequestId: proto.Uint64(requestID), 255 | }, 256 | } 257 | } 258 | 259 | func newCloseProducerCommand(producerID, requestID uint64) *pb.BaseCommand { 260 | return &pb.BaseCommand{ 261 | Type: pb.BaseCommand_CLOSE_PRODUCER.Enum(), 262 | CloseProducer: &pb.CommandCloseProducer{ 263 | ProducerId: proto.Uint64(producerID), 264 | RequestId: proto.Uint64(requestID), 265 | }, 266 | } 267 | } 268 | 269 | func newGetTopicsOfNamespaceCommand(requestID uint64, namespace string) *pb.BaseCommand { 270 | return &pb.BaseCommand{ 271 | Type: pb.BaseCommand_GET_TOPICS_OF_NAMESPACE.Enum(), 272 | GetTopicsOfNamespace: &pb.CommandGetTopicsOfNamespace{ 273 | RequestId: proto.Uint64(requestID), 274 | Namespace: proto.String(publicTenant + "/" + namespace), 275 | Mode: pb.CommandGetTopicsOfNamespace_PERSISTENT.Enum(), 276 | }, 277 | } 278 | } 279 | 280 | func newGetLastMessageIDCommand(consumerID, requestID uint64) *pb.BaseCommand { 281 | return &pb.BaseCommand{ 282 | Type: pb.BaseCommand_GET_LAST_MESSAGE_ID.Enum(), 283 | GetLastMessageId: &pb.CommandGetLastMessageId{ 284 | ConsumerId: proto.Uint64(consumerID), 285 | RequestId: proto.Uint64(requestID), 286 | }, 287 | } 288 | } 289 | 290 | // The server sent a close consumer command. assign the Consumer a new ID 291 | // and reconnect it. 292 | func (c *Client) handleCloseConsumerCommand(base *command) error { 293 | cmd := base.CloseConsumer 294 | 295 | consumerID := cmd.GetConsumerId() 296 | cons, ok := c.consumers.get(consumerID) 297 | if !ok { 298 | return fmt.Errorf("consumer %d not found", consumerID) 299 | } 300 | 301 | c.consumers.delete(consumerID) 302 | newID := c.consumers.newID() 303 | c.consumers.add(newID, cons) 304 | 305 | cons.clearReceiverQueue() 306 | cons.reset(newID) 307 | 308 | if cons.multi != nil { 309 | cons.multi.changeConsumerID(cons, consumerID, newID) 310 | } 311 | 312 | go c.topicLookup(cons.topic, cons.topicLookupFinished) 313 | return nil 314 | } 315 | -------------------------------------------------------------------------------- /conn.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/binary" 7 | "errors" 8 | "fmt" 9 | "hash/crc32" 10 | "io" 11 | "net" 12 | "sync" 13 | 14 | pb "github.com/cornelk/pulsar-go/proto" 15 | "google.golang.org/protobuf/proto" 16 | ) 17 | 18 | const ( 19 | // maxFrameSize is the maximum size that Pulsar allows for messages. 20 | maxFrameSize = 5 * 1024 * 1024 21 | magicCrc32c uint16 = 0x0e01 22 | ) 23 | 24 | var ( 25 | crcOnce sync.Once // guards init of crcTable via newConn 26 | crcTable *crc32.Table 27 | ) 28 | 29 | // conn represents a connection to a Pulsar broker. 30 | // The functions are not safe for concurrent use. 31 | type conn struct { 32 | log Logger 33 | 34 | closer io.Closer 35 | reader bufio.Reader 36 | writer bufio.Writer 37 | writeMutex sync.Mutex // protects writer writing 38 | } 39 | 40 | type clientConn interface { 41 | SendCallbackCommand(req *requests, reqID uint64, cmd proto.Message, callbacks ...requestCallback) error 42 | WriteCommand(cmd proto.Message, payload []byte) error 43 | } 44 | 45 | type brokerConnection struct { 46 | ctx context.Context 47 | log Logger 48 | conn clientConn 49 | req *requests 50 | } 51 | 52 | // ErrNetClosing is returned when a network descriptor is used after 53 | // it has been closed. 54 | var ErrNetClosing = errors.New("use of closed network connection") 55 | 56 | // newConn returns a new Pulsar broker connection. 57 | func newConn(log Logger, con io.ReadWriteCloser) *conn { 58 | crcOnce.Do(func() { 59 | crcTable = crc32.MakeTable(crc32.Castagnoli) 60 | }) 61 | return &conn{ 62 | log: log, 63 | closer: con, 64 | reader: *bufio.NewReader(con), 65 | writer: *bufio.NewWriter(con), 66 | } 67 | } 68 | 69 | // close the connection. 70 | // Any blocked Read or Write operations will be unblocked and return errors. 71 | func (c *conn) close() error { 72 | return c.closer.Close() 73 | } 74 | 75 | // WriteCommand sends a command to the Pulsar broker. 76 | func (c *conn) WriteCommand(cmd proto.Message, payload []byte) error { 77 | c.log.Debugf("Sending command: %+v", cmd) 78 | 79 | serialized, err := proto.Marshal(cmd) 80 | if err != nil { 81 | return fmt.Errorf("marshalling failed: %w", err) 82 | } 83 | 84 | cmdSize := uint32(len(serialized)) 85 | 86 | c.writeMutex.Lock() 87 | defer c.writeMutex.Unlock() 88 | 89 | b := make([]byte, 4) 90 | // write size of the frame, counting everything that comes after it 91 | binary.BigEndian.PutUint32(b, cmdSize+4+uint32(len(payload))) 92 | if _, err = c.writer.Write(b); err != nil { 93 | return fmt.Errorf("writing frame size failed: %w", err) 94 | } 95 | 96 | // write size of the protobuf-serialized command 97 | binary.BigEndian.PutUint32(b, cmdSize) 98 | if _, err = c.writer.Write(b); err != nil { 99 | return fmt.Errorf("writing command size failed: %w", err) 100 | } 101 | 102 | // write the protobuf-serialized command 103 | if _, err = c.writer.Write(serialized); err != nil { 104 | return fmt.Errorf("writing marshalled command failed: %w", err) 105 | } 106 | 107 | if len(payload) > 0 { 108 | // write the payload 109 | if _, err = c.writer.Write(payload); err != nil { 110 | return fmt.Errorf("writing command payload failed: %w", err) 111 | } 112 | } 113 | 114 | if err = c.writer.Flush(); err != nil { 115 | return fmt.Errorf("flushing connection failed: %w", err) 116 | } 117 | return nil 118 | } 119 | 120 | // readCommand reads a command and returns the optional payload size that can 121 | // be read after the command. 122 | func (c *conn) readCommand() (*command, error) { 123 | b := make([]byte, 4+4) 124 | _, err := io.ReadFull(&c.reader, b) 125 | if err != nil { 126 | var e *net.OpError 127 | if errors.As(err, &e) { 128 | if e.Err.Error() == ErrNetClosing.Error() { 129 | return nil, ErrNetClosing 130 | } 131 | } 132 | 133 | return nil, fmt.Errorf("reading header failed: %w", err) 134 | } 135 | 136 | // 4 byte totalSize 137 | frameSize := binary.BigEndian.Uint32(b) 138 | if frameSize > maxFrameSize { 139 | return nil, fmt.Errorf("frame size exceeds maximum: %d", frameSize) 140 | } 141 | 142 | // 4 byte commandSize 143 | cmdSize := binary.BigEndian.Uint32(b[4:]) 144 | 145 | // read commandSize bytes of message 146 | data := make([]byte, cmdSize) 147 | _, err = io.ReadFull(&c.reader, data) 148 | if err != nil { 149 | return nil, fmt.Errorf("reading body failed: %w", err) 150 | } 151 | 152 | cmd := &command{BaseCommand: &pb.BaseCommand{}} 153 | if err = proto.Unmarshal(data, cmd.BaseCommand); err != nil { 154 | return nil, fmt.Errorf("unmarshalling failed: %w", err) 155 | } 156 | 157 | cmd.payloadSize = frameSize - cmdSize - 4 158 | return cmd, nil 159 | } 160 | 161 | // readMessageMetaData reads the message metadata with the given payload 162 | // size that has been returned from command header. 163 | func (c *conn) readMessageMetaData(payloadSize uint32) (msgMeta *pb.MessageMetadata, payload []byte, err error) { 164 | if payloadSize < 10 || payloadSize > maxFrameSize { 165 | return nil, nil, fmt.Errorf("invalid payload size %d", payloadSize) 166 | } 167 | 168 | b := make([]byte, payloadSize) 169 | _, err = io.ReadFull(&c.reader, b) 170 | if err != nil { 171 | return nil, nil, fmt.Errorf("reading data failed: %w", err) 172 | } 173 | 174 | // 2-byte byte array (0x0e01) identifying the current format 175 | magicNumber := binary.BigEndian.Uint16(b) 176 | if magicNumber != magicCrc32c { 177 | return nil, nil, errors.New("header does not contain magic") 178 | } 179 | 180 | // CRC32-C checksum of size and payload 181 | checksum := binary.BigEndian.Uint32(b[2:6]) 182 | computedChecksum := crc32.Checksum(b[2+4:], crcTable) 183 | if checksum != computedChecksum { 184 | return nil, nil, errors.New("checksum mismatch") 185 | } 186 | 187 | // size of the message metadata 188 | size := binary.BigEndian.Uint32(b[6:10]) 189 | if size == 0 || size > maxFrameSize { 190 | return nil, nil, fmt.Errorf("invalid message metadata size %d", size) 191 | } 192 | 193 | msgMeta = &pb.MessageMetadata{} 194 | if err = proto.Unmarshal(b[10:10+size], msgMeta); err != nil { 195 | return nil, nil, fmt.Errorf("unmarshalling failed: %w", err) 196 | } 197 | 198 | return msgMeta, b[10+size:], nil 199 | } 200 | 201 | // readBatchedMessage reads a batched message from the given payload buffer. 202 | func (c *conn) readBatchedMessage(b []byte) (meta *pb.SingleMessageMetadata, msg, remaining []byte, err error) { 203 | size := binary.BigEndian.Uint32(b) 204 | if int(size) > len(b)-4 { 205 | return nil, nil, nil, 206 | fmt.Errorf("message size %d exceeds buffer length %d", size, len(b)-4) 207 | } 208 | 209 | meta = &pb.SingleMessageMetadata{} 210 | if err = proto.Unmarshal(b[4:4+size], meta); err != nil { 211 | return nil, nil, nil, fmt.Errorf("unmarshalling failed: %w", err) 212 | } 213 | 214 | ps := uint32(meta.GetPayloadSize()) 215 | end := 4 + size + ps 216 | return meta, b[4+size : end], b[end:], nil 217 | } 218 | 219 | // SendCallbackCommand sends a command to the server that accepts callbacks. 220 | // It will execute all optional callback handlers. 221 | // The function returns after the server responded to the command. 222 | func (c *conn) SendCallbackCommand(req *requests, reqID uint64, cmd proto.Message, callbacks ...requestCallback) error { 223 | // The server response will be processed asynchronously by the client. 224 | // In order to block this function until we have received the response and 225 | // executed all callbacks, we use a channel. The channel capacity is 1, 226 | // so we never block the response handling for other messages. 227 | callbackErr := make(chan error, 1) 228 | req.addCallback(reqID, func(resp *command) (err error) { 229 | defer func() { callbackErr <- err }() 230 | 231 | for _, callback := range callbacks { 232 | if err := callback(resp); err != nil { 233 | return err 234 | } 235 | } 236 | 237 | return resp.err 238 | }) 239 | 240 | if err := c.WriteCommand(cmd, nil); err != nil { 241 | return err 242 | } 243 | 244 | return <-callbackErr 245 | } 246 | -------------------------------------------------------------------------------- /consumer.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "math" 9 | "regexp" 10 | "sync" 11 | 12 | pb "github.com/cornelk/pulsar-go/proto" 13 | "google.golang.org/protobuf/proto" 14 | ) 15 | 16 | // SubscriptionType ... 17 | type SubscriptionType pb.CommandSubscribe_SubType 18 | 19 | // Subscription type options. 20 | const ( 21 | ExclusiveSubscription = SubscriptionType(pb.CommandSubscribe_Exclusive) 22 | SharedSubscription = SubscriptionType(pb.CommandSubscribe_Shared) 23 | ) 24 | 25 | // InitialPosition ... 26 | type InitialPosition pb.CommandSubscribe_InitialPosition 27 | 28 | // InitialPositionCallback declares a callback that allows a client to specify 29 | // a start position or message for every discovered topic when using topic 30 | // pattern subscriptions. 31 | type InitialPositionCallback func(topic string) (position InitialPosition, StartMessageID []byte, err error) 32 | 33 | // Subscription initial position options. 34 | const ( 35 | // LatestPosition starts reading from the topic end, only getting 36 | // messages published after the reader was created. 37 | LatestPosition = InitialPosition(pb.CommandSubscribe_Latest) 38 | // EarliestPosition starts reading from the earliest message 39 | // available in the topic. 40 | EarliestPosition = InitialPosition(pb.CommandSubscribe_Earliest) 41 | ) 42 | 43 | // ConsumerConfig is a configuration object used to create new instances 44 | // of Consumer. 45 | type ConsumerConfig struct { 46 | // The topic name to read messages from. 47 | Topic string 48 | 49 | // A regular expression for topics to read messages from. 50 | TopicPattern string 51 | 52 | // Interval in ms in which the client checks for topic changes 53 | // that match the set topic pattern and updates the subscriptions. 54 | // Default is 30000 55 | TopicPatternDiscoveryInterval int 56 | 57 | // A unique name for the subscription. If not specified, a random name 58 | // will be used. 59 | Subscription string 60 | 61 | // A unique name for the Consumer. If not specified, a random name 62 | // will be used. 63 | Name string 64 | 65 | // Select the subscription type to be used when subscribing to the topic. 66 | // Default is `Subscribe_Exclusive` 67 | Type SubscriptionType 68 | 69 | // Signal whether the subscription will initialize on latest 70 | // or earliest position. 71 | InitialPosition InitialPosition 72 | 73 | // Callback function for every discovered topic when using a topic 74 | // pattern to allow the client to specify an initial position and 75 | // start message ID for the topic. 76 | InitialPositionCallback InitialPositionCallback 77 | 78 | // If specified, the subscription will position the cursor 79 | // on the particular message id and will send messages from 80 | // that point. 81 | StartMessageID []byte 82 | 83 | // Include the message StartMessageID in the read messages. 84 | // If StartMessageID is not set but InitialPosition is set 85 | // to LatestPosition, the latest message ID of the topic 86 | // will be sent. 87 | StartMessageIDInclusive bool 88 | 89 | // Signal whether the subscription should be backed by a 90 | // durable cursor or not. For Readers, set to false, for 91 | // Consumers set Durable to true and specify a Subscription. 92 | // If Durable is true, StartMessageID will be ignored, as it 93 | // will be determined by the broker. 94 | Durable bool 95 | 96 | // If true, the subscribe operation will cause a topic to be 97 | // created if it does not exist already (and if topic auto-creation 98 | // is allowed by broker. 99 | // If false, the subscribe operation will fail if the topic 100 | // does not exist. 101 | ForceTopicCreation bool 102 | 103 | // MessageChannel sets a channel that receives all messages that the 104 | // consumer receives. If not set, a default channel for 1000 messages 105 | // will be created. 106 | MessageChannel chan *Message 107 | } 108 | 109 | // Consumer provides a high-level API for consuming messages from Pulsar. 110 | type Consumer interface { 111 | // Close closes the subscription and unregisters from the Client. 112 | Close() error 113 | 114 | AckMessage(*Message) error 115 | // ReadMessage reads and return the next message from the Pulsar. 116 | ReadMessage(context.Context) (*Message, error) 117 | SeekMessage(*Message) error 118 | // HasNext returns whether there is a message available to read 119 | HasNext() bool 120 | 121 | // LastMessageID returns the last message ID of the topic. 122 | // If the topic is empty, EntryId will be math.MaxUint64 123 | LastMessageID() (*MessageID, error) 124 | } 125 | 126 | type consumerCloser interface { 127 | CloseConsumer(consumerID uint64) error 128 | } 129 | 130 | type consumerState int 131 | 132 | const ( 133 | consumerInit consumerState = iota 134 | consumerReady 135 | consumerSubscribed 136 | consumerClosing 137 | consumerClosed 138 | ) 139 | 140 | type consumer struct { 141 | ctx context.Context 142 | conn clientConn 143 | req *requests 144 | 145 | // subscribe options 146 | topic string 147 | subscription *string 148 | name *string 149 | subType pb.CommandSubscribe_SubType 150 | initialPosition pb.CommandSubscribe_InitialPosition 151 | startMessageID *pb.MessageIdData 152 | startMessageIDInclusive bool 153 | startMessageIDSeekDone bool 154 | forceTopicCreation *bool 155 | durable *bool 156 | incomingMessages chan *Message 157 | 158 | consumerID uint64 159 | connected chan error 160 | state consumerState 161 | stateMu sync.RWMutex 162 | closer consumerCloser 163 | multi *multiTopicConsumer // set if managed by a multi topic consumer 164 | 165 | messagePermits uint32 166 | usedPermits uint32 167 | permitsMu sync.Mutex 168 | } 169 | 170 | // Validate method validates the config properties. 171 | func (config *ConsumerConfig) Validate() error { 172 | if config.StartMessageID != nil { 173 | id := &pb.MessageIdData{} 174 | if err := proto.Unmarshal(config.StartMessageID, id); err != nil { 175 | return fmt.Errorf("start message id unmarshalling: %w", err) 176 | } 177 | } 178 | if config.TopicPattern == "" { 179 | if config.Topic == "" { 180 | return errors.New("topic is not set") 181 | } 182 | } else { 183 | if config.Topic != "" { 184 | return errors.New("topic and topic pattern are exclusive") 185 | } 186 | if _, err := regexp.Compile(config.TopicPattern); err != nil { 187 | return fmt.Errorf("topic pattern regular expression compiling: %w", err) 188 | } 189 | if config.TopicPatternDiscoveryInterval < 0 { 190 | return errors.New("invalid topic pattern discovery interval set") 191 | } 192 | if config.StartMessageID != nil { 193 | return errors.New("start message id not valid for pattern consumer") 194 | } 195 | } 196 | return nil 197 | } 198 | 199 | // newConsumer creates and returns a new Consumer configured with config. 200 | func newConsumer(closer consumerCloser, conn brokerConnection, config ConsumerConfig, consumerID uint64) (*consumer, error) { 201 | c := &consumer{ 202 | ctx: conn.ctx, 203 | conn: conn.conn, 204 | req: conn.req, 205 | 206 | topic: config.Topic, 207 | subType: pb.CommandSubscribe_SubType(config.Type), 208 | initialPosition: pb.CommandSubscribe_InitialPosition(config.InitialPosition), 209 | startMessageIDInclusive: config.StartMessageIDInclusive, 210 | forceTopicCreation: proto.Bool(config.ForceTopicCreation), 211 | durable: proto.Bool(config.Durable), 212 | 213 | consumerID: consumerID, 214 | connected: make(chan error, 1), 215 | state: consumerInit, 216 | closer: closer, 217 | } 218 | 219 | if config.MessageChannel == nil { 220 | c.incomingMessages = make(chan *Message, 1000) 221 | } else { 222 | c.incomingMessages = config.MessageChannel 223 | } 224 | if cap(c.incomingMessages) > math.MaxUint32 { 225 | c.messagePermits = math.MaxUint32 226 | } else { 227 | c.messagePermits = uint32(cap(c.incomingMessages)) 228 | } 229 | 230 | if !config.Durable { 231 | if config.StartMessageID != nil { 232 | id := &pb.MessageIdData{} 233 | if err := proto.Unmarshal(config.StartMessageID, id); err != nil { 234 | return nil, err 235 | } 236 | c.startMessageID = id 237 | } else { 238 | if config.InitialPosition == EarliestPosition { 239 | c.startMessageID = earliestMessageID 240 | } else { 241 | c.startMessageID = latestMessageID 242 | } 243 | } 244 | } 245 | 246 | if config.Name == "" { 247 | c.name = proto.String(randomConsumerName()) 248 | } else { 249 | c.name = proto.String(config.Name) 250 | } 251 | if config.Subscription == "" { 252 | c.subscription = proto.String(randomSubscriptionName()) 253 | } else { 254 | c.subscription = proto.String(config.Subscription) 255 | } 256 | 257 | return c, nil 258 | } 259 | 260 | func (c *consumer) topicLookupFinished(cmd *command) error { 261 | if cmd.err != nil { 262 | return cmd.err 263 | } 264 | 265 | id := c.req.newID() 266 | 267 | if c.startMessageIDInclusive && c.startMessageID == latestMessageID && !c.startMessageIDSeekDone { 268 | c.req.addCallback(id, c.getLastMessageID) 269 | } else { 270 | c.req.addCallback(id, c.subscribedSendFlowCommand) 271 | } 272 | 273 | c.stateMu.Lock() 274 | c.state = consumerReady 275 | c.stateMu.Unlock() 276 | 277 | return c.sendSubscribeCommand(cmd.custom, id) 278 | } 279 | 280 | func (c *consumer) getLastMessageID(base *command) error { 281 | if base.err != nil { 282 | c.connected <- base.err 283 | return base.err 284 | } 285 | 286 | reqID := c.req.newID() 287 | cmd := newGetLastMessageIDCommand(c.consumerID, reqID) 288 | c.req.addCallback(reqID, c.seekToLastMessageID) 289 | return c.conn.WriteCommand(cmd, nil) 290 | } 291 | 292 | func (c *consumer) seekToLastMessageID(cmd *command) error { 293 | c.startMessageIDSeekDone = true 294 | 295 | if cmd.err != nil { 296 | return cmd.err 297 | } 298 | if *cmd.GetLastMessageIdResponse.LastMessageId.EntryId == math.MaxUint64 { 299 | return c.subscribedSendFlowCommand(cmd) // empty topic 300 | } 301 | 302 | reqID := c.req.newID() 303 | return c.sendSeek(reqID, cmd.GetLastMessageIdResponse.LastMessageId) 304 | } 305 | 306 | func (c *consumer) Close() error { 307 | return c.closer.CloseConsumer(c.consumerID) 308 | } 309 | 310 | func (c *consumer) reset(newConsumerID uint64) { 311 | c.consumerID = newConsumerID 312 | } 313 | 314 | func (c *consumer) ReadMessage(ctx context.Context) (*Message, error) { 315 | select { 316 | case <-ctx.Done(): 317 | return nil, ctx.Err() 318 | case <-c.ctx.Done(): 319 | return nil, c.ctx.Err() 320 | 321 | case m, ok := <-c.incomingMessages: 322 | if !ok { 323 | return nil, io.EOF 324 | } 325 | 326 | return m, nil 327 | } 328 | } 329 | 330 | func (c *consumer) useMessagePermits(permits uint64) error { 331 | hasPermits := false 332 | c.permitsMu.Lock() 333 | if permits <= math.MaxUint32 && uint64(c.usedPermits)+permits <= math.MaxUint32 { 334 | c.usedPermits += uint32(permits) 335 | hasPermits = c.usedPermits < c.messagePermits 336 | } 337 | c.permitsMu.Unlock() 338 | 339 | if hasPermits { 340 | return nil 341 | } 342 | return c.sendFlowCommand() 343 | } 344 | 345 | func (c *consumer) HasNext() bool { 346 | return len(c.incomingMessages) > 0 347 | } 348 | 349 | func (c *consumer) SeekMessage(msg *Message) error { 350 | id := &pb.MessageIdData{ 351 | LedgerId: msg.ID.LedgerId, 352 | EntryId: msg.ID.EntryId, 353 | } 354 | 355 | reqID := c.req.newID() 356 | 357 | ch := make(chan error, 1) 358 | c.req.addCallback(reqID, func(cmd *command) error { 359 | ch <- cmd.err 360 | return cmd.err 361 | }) 362 | 363 | c.startMessageID = id 364 | if err := c.sendSeek(reqID, id); err != nil { 365 | return err 366 | } 367 | 368 | err := <-ch 369 | 370 | // the server issues a close consumer command, wait for the 371 | // automatic reconnection 372 | e := <-c.connected 373 | if err == nil { 374 | err = e 375 | } 376 | 377 | return err 378 | } 379 | 380 | func (c *consumer) sendSeek(reqID uint64, id *pb.MessageIdData) error { 381 | base := &pb.BaseCommand{ 382 | Type: pb.BaseCommand_SEEK.Enum(), 383 | Seek: &pb.CommandSeek{ 384 | ConsumerId: proto.Uint64(c.consumerID), 385 | RequestId: proto.Uint64(reqID), 386 | MessageId: id, 387 | }, 388 | } 389 | return c.conn.WriteCommand(base, nil) 390 | } 391 | 392 | func (c *consumer) AckMessage(msg *Message) error { 393 | return c.sendAck(msg.ID) 394 | } 395 | 396 | func (c *consumer) sendAck(id *MessageID) error { 397 | base := &pb.BaseCommand{ 398 | Type: pb.BaseCommand_ACK.Enum(), 399 | Ack: &pb.CommandAck{ 400 | ConsumerId: proto.Uint64(c.consumerID), 401 | AckType: pb.CommandAck_Individual.Enum(), 402 | MessageId: []*pb.MessageIdData{(*pb.MessageIdData)(id)}, 403 | }, 404 | } 405 | return c.conn.WriteCommand(base, nil) 406 | } 407 | 408 | func (c *consumer) sendSubscribeCommand(topic string, reqID uint64) error { 409 | base := &pb.BaseCommand{ 410 | Type: pb.BaseCommand_SUBSCRIBE.Enum(), 411 | Subscribe: &pb.CommandSubscribe{ 412 | Topic: &topic, 413 | Subscription: c.subscription, 414 | SubType: &c.subType, 415 | ConsumerId: proto.Uint64(c.consumerID), 416 | RequestId: proto.Uint64(reqID), 417 | ConsumerName: c.name, 418 | Durable: c.durable, 419 | StartMessageId: c.startMessageID, 420 | ReadCompacted: proto.Bool(false), 421 | InitialPosition: &c.initialPosition, 422 | ForceTopicCreation: c.forceTopicCreation, 423 | }, 424 | } 425 | return c.conn.WriteCommand(base, nil) 426 | } 427 | 428 | func (c *consumer) subscribedSendFlowCommand(cmd *command) error { 429 | defer func() { 430 | c.connected <- cmd.err 431 | }() 432 | if cmd.err != nil { 433 | return cmd.err 434 | } 435 | c.stateMu.Lock() 436 | c.state = consumerSubscribed 437 | c.stateMu.Unlock() 438 | return c.sendFlowCommand() 439 | } 440 | 441 | func (c *consumer) sendFlowCommand() error { 442 | c.permitsMu.Lock() 443 | c.usedPermits = 0 444 | c.permitsMu.Unlock() 445 | 446 | base := &pb.BaseCommand{ 447 | Type: pb.BaseCommand_FLOW.Enum(), 448 | Flow: &pb.CommandFlow{ 449 | ConsumerId: proto.Uint64(c.consumerID), 450 | MessagePermits: proto.Uint32(c.messagePermits), 451 | }, 452 | } 453 | return c.conn.WriteCommand(base, nil) 454 | } 455 | 456 | func (c *consumer) clearReceiverQueue() { 457 | for { 458 | select { 459 | case _, ok := <-c.incomingMessages: 460 | if !ok { 461 | return 462 | } 463 | default: 464 | return 465 | } 466 | } 467 | } 468 | 469 | func (c *consumer) LastMessageID() (*MessageID, error) { 470 | reqID := c.req.newID() 471 | cmd := newGetLastMessageIDCommand(c.consumerID, reqID) 472 | 473 | var msgID *MessageID 474 | respHandler := func(resp *command) error { 475 | if resp.err == nil && resp.GetLastMessageIdResponse != nil { 476 | msgID = (*MessageID)(resp.GetLastMessageIdResponse.LastMessageId) 477 | } 478 | return resp.err 479 | } 480 | 481 | err := c.conn.SendCallbackCommand(c.req, reqID, cmd, respHandler) 482 | if err != nil { 483 | return nil, err 484 | } 485 | 486 | return msgID, nil 487 | } 488 | -------------------------------------------------------------------------------- /consumer_multitopic.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "regexp" 8 | "sync/atomic" 9 | ) 10 | 11 | // ErrConsumerOfMessageNotFound is returned when the consumer for a received 12 | // message is not found. 13 | var ErrConsumerOfMessageNotFound = errors.New("consumer of message not found") 14 | 15 | type multiTopicConsumer struct { 16 | ctx context.Context 17 | closing atomic.Bool 18 | 19 | topicPattern *regexp.Regexp 20 | incomingMessages chan *Message 21 | consumers *consumerRegistry 22 | closer consumerCloser 23 | } 24 | 25 | func newMultiTopicConsumer(closer consumerCloser, conn brokerConnection, config ConsumerConfig) (*multiTopicConsumer, error) { 26 | c := &multiTopicConsumer{ 27 | ctx: conn.ctx, 28 | consumers: newConsumerRegistry(), 29 | closer: closer, 30 | } 31 | 32 | if config.MessageChannel == nil { 33 | c.incomingMessages = make(chan *Message, 1000) 34 | } else { 35 | c.incomingMessages = config.MessageChannel 36 | } 37 | 38 | var err error 39 | c.topicPattern, err = regexp.Compile(config.TopicPattern) 40 | if err != nil { 41 | return nil, err 42 | } 43 | 44 | return c, nil 45 | } 46 | 47 | func (c *multiTopicConsumer) addConsumer(id uint64, consumer *consumer) { 48 | c.consumers.add(id, consumer) 49 | } 50 | 51 | func (c *multiTopicConsumer) changeConsumerID(consumer *consumer, oldID, newID uint64) { 52 | c.consumers.add(newID, consumer) 53 | c.consumers.delete(oldID) 54 | } 55 | 56 | func (c *multiTopicConsumer) Close() error { 57 | if !c.closing.CompareAndSwap(false, true) { 58 | return nil 59 | } 60 | 61 | c.consumers.mu.RLock() 62 | defer c.consumers.mu.RLock() 63 | 64 | for _, con := range c.consumers.consumers { 65 | _ = c.closer.CloseConsumer(con.consumerID) 66 | } 67 | 68 | return nil 69 | } 70 | 71 | func (c *multiTopicConsumer) ReadMessage(ctx context.Context) (*Message, error) { 72 | select { 73 | case <-ctx.Done(): 74 | return nil, ctx.Err() 75 | case <-c.ctx.Done(): 76 | return nil, c.ctx.Err() 77 | 78 | case m, ok := <-c.incomingMessages: 79 | if !ok { 80 | return nil, io.EOF 81 | } 82 | 83 | return m, nil 84 | } 85 | } 86 | 87 | func (c *multiTopicConsumer) SeekMessage(_ *Message) error { 88 | return errors.New("seek not supported in multi topic consumer") 89 | } 90 | 91 | func (c *multiTopicConsumer) AckMessage(msg *Message) error { 92 | consumer, ok := c.consumers.get(msg.consumerID) 93 | if !ok { 94 | if c.closing.Load() { 95 | return nil 96 | } 97 | return ErrConsumerOfMessageNotFound 98 | } 99 | 100 | return consumer.AckMessage(msg) 101 | } 102 | 103 | func (c *multiTopicConsumer) HasNext() bool { 104 | return len(c.incomingMessages) > 0 105 | } 106 | 107 | func (c *multiTopicConsumer) LastMessageID() (*MessageID, error) { 108 | return nil, errors.New("last message id not supported in multi topic consumer") 109 | } 110 | -------------------------------------------------------------------------------- /consumer_multitopic_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "context" 7 | "sort" 8 | "strings" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | func TestConsumerTopicPattern(t *testing.T) { 17 | client := setup(t) 18 | defer func() { 19 | assert.NoError(t, client.Close()) 20 | }() 21 | 22 | topic := randomTopicName() 23 | 24 | producer1, _ := newTestProducer(t, client, topic+"-1") 25 | msg1 := sendMessage(t, producer1, "hello world 1") 26 | 27 | producer2, _ := newTestProducer(t, client, topic+"-2") 28 | msg2 := sendMessage(t, producer2, "hello world 2") 29 | 30 | consConf := ConsumerConfig{ 31 | TopicPattern: topic + "-.*", 32 | InitialPosition: EarliestPosition, 33 | } 34 | 35 | ctx := context.Background() 36 | consumer, err := client.NewConsumer(ctx, consConf) 37 | require.NoError(t, err) 38 | 39 | m1, err := consumer.ReadMessage(ctx) 40 | require.NoError(t, err) 41 | require.NotNil(t, m1) 42 | m2, err := consumer.ReadMessage(ctx) 43 | require.NoError(t, err) 44 | require.NotNil(t, m2) 45 | 46 | assert.NoError(t, consumer.AckMessage(m1)) 47 | assert.NoError(t, consumer.AckMessage(m2)) 48 | 49 | messages := []string{string(m1.Body), string(m2.Body)} 50 | m := map[string]*Message{ 51 | string(m1.Body): m1, 52 | string(m2.Body): m2, 53 | } 54 | sort.Strings(messages) 55 | assert.Equal(t, []string{string(msg1.Body), string(msg2.Body)}, messages) 56 | 57 | t1, err := NewTopic(m[messages[0]].Topic) 58 | require.NoError(t, err) 59 | assert.Equal(t, topic+"-1", t1.LocalName) 60 | 61 | t2, err := NewTopic(m[messages[1]].Topic) 62 | require.NoError(t, err) 63 | assert.NotNil(t, t2) 64 | assert.Equal(t, topic+"-2", t2.LocalName) 65 | } 66 | 67 | func TestConsumerTopicPatternDiscovery(t *testing.T) { 68 | client := setup(t) 69 | defer func() { 70 | assert.NoError(t, client.Close()) 71 | }() 72 | 73 | topic := randomTopicName() 74 | 75 | consConf := ConsumerConfig{ 76 | TopicPattern: topic + "-.*", 77 | TopicPatternDiscoveryInterval: 500, 78 | InitialPosition: EarliestPosition, 79 | } 80 | 81 | ctx := context.Background() 82 | consumer, err := client.NewConsumer(ctx, consConf) 83 | require.NoError(t, err) 84 | 85 | time.Sleep(time.Second) 86 | 87 | producer, _ := newTestProducer(t, client, topic+"-1") 88 | msg := sendMessage(t, producer, "hello world") 89 | 90 | m, err := consumer.ReadMessage(ctx) 91 | require.NoError(t, err) 92 | require.NotNil(t, m) 93 | 94 | require.NoError(t, consumer.AckMessage(m)) 95 | assert.Equal(t, msg.Body, m.Body) 96 | } 97 | 98 | func TestConsumerTopicPatternInitialPosition(t *testing.T) { 99 | client := setup(t) 100 | defer func() { 101 | assert.NoError(t, client.Close()) 102 | }() 103 | 104 | topic := randomTopicName() 105 | 106 | cb := func(topic string) (position InitialPosition, StartMessageID []byte, err error) { 107 | if strings.HasSuffix(topic, "1") { 108 | return EarliestPosition, nil, nil 109 | } 110 | return LatestPosition, nil, nil 111 | } 112 | 113 | consConf := ConsumerConfig{ 114 | TopicPattern: topic + "-.*", 115 | TopicPatternDiscoveryInterval: 500, 116 | InitialPositionCallback: cb, 117 | StartMessageIDInclusive: true, 118 | } 119 | 120 | ctx := context.Background() 121 | consumer, err := client.NewConsumer(ctx, consConf) 122 | require.NoError(t, err) 123 | 124 | time.Sleep(time.Second) 125 | 126 | producer1, _ := newTestProducer(t, client, topic+"-1") 127 | sendMessage(t, producer1, "hello world 1a") 128 | sendMessage(t, producer1, "hello world 1b") 129 | producer2, _ := newTestProducer(t, client, topic+"-2") 130 | sendMessage(t, producer2, "hello world 2a") 131 | sendMessage(t, producer2, "hello world 2b") 132 | 133 | m1, err := consumer.ReadMessage(ctx) 134 | require.NoError(t, err) 135 | require.NotNil(t, m1) 136 | require.NoError(t, consumer.AckMessage(m1)) 137 | 138 | m2, err := consumer.ReadMessage(ctx) 139 | require.NoError(t, err) 140 | require.NotNil(t, m2) 141 | require.NoError(t, consumer.AckMessage(m2)) 142 | 143 | m3, err := consumer.ReadMessage(ctx) 144 | require.NoError(t, err) 145 | require.NotNil(t, m3) 146 | require.NoError(t, consumer.AckMessage(m3)) 147 | 148 | messages := []string{string(m1.Body), string(m2.Body), string(m3.Body)} 149 | sort.Strings(messages) 150 | assert.Equal(t, []string{"hello world 1a", "hello world 1b", "hello world 2b"}, messages) 151 | } 152 | -------------------------------------------------------------------------------- /consumer_registry.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | type consumerRegistry struct { 9 | consumerIDs uint64 10 | mu sync.RWMutex 11 | consumers map[uint64]*consumer 12 | } 13 | 14 | func newConsumerRegistry() *consumerRegistry { 15 | return &consumerRegistry{ 16 | mu: sync.RWMutex{}, 17 | consumers: map[uint64]*consumer{}, 18 | } 19 | } 20 | 21 | func (r *consumerRegistry) newID() uint64 { 22 | id := atomic.AddUint64(&r.consumerIDs, 1) 23 | return id 24 | } 25 | 26 | func (r *consumerRegistry) add(id uint64, consumer *consumer) { 27 | r.mu.Lock() 28 | r.consumers[id] = consumer 29 | r.mu.Unlock() 30 | } 31 | 32 | func (r *consumerRegistry) delete(id uint64) { 33 | r.mu.Lock() 34 | delete(r.consumers, id) 35 | r.mu.Unlock() 36 | } 37 | 38 | func (r *consumerRegistry) get(id uint64) (consumer *consumer, ok bool) { 39 | r.mu.RLock() 40 | consumer, ok = r.consumers[id] 41 | r.mu.RUnlock() 42 | return consumer, ok 43 | } 44 | 45 | func (r *consumerRegistry) getAndDelete(id uint64) (consumer *consumer, ok bool) { 46 | r.mu.Lock() 47 | consumer, ok = r.consumers[id] 48 | if ok { 49 | delete(r.consumers, id) 50 | } 51 | r.mu.Unlock() 52 | return consumer, ok 53 | } 54 | 55 | func (r *consumerRegistry) all() []*consumer { 56 | r.mu.RLock() 57 | consumers := make([]*consumer, 0, len(r.consumers)) 58 | for _, cons := range r.consumers { 59 | consumers = append(consumers, cons) 60 | } 61 | r.mu.RUnlock() 62 | return consumers 63 | } 64 | -------------------------------------------------------------------------------- /consumer_test.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestConsumerConfigValidate(t *testing.T) { 10 | conf := &ConsumerConfig{ 11 | Topic: randomTopicName(), 12 | } 13 | 14 | err := conf.Validate() 15 | require.NoError(t, err) 16 | } 17 | -------------------------------------------------------------------------------- /dialer.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "context" 5 | "net" 6 | ) 7 | 8 | type dialer func(ctx context.Context, log Logger, host string) (*conn, error) 9 | 10 | func defaultDialer(ctx context.Context, log Logger, host string) (*conn, error) { 11 | dial := net.Dialer{} 12 | netConn, err := dial.DialContext(ctx, "tcp", host) 13 | if err != nil { 14 | return nil, err 15 | } 16 | 17 | c := newConn(log, netConn) 18 | return c, nil 19 | } 20 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cornelk/pulsar-go 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/stretchr/testify v1.10.0 7 | google.golang.org/protobuf v1.36.5 8 | ) 9 | 10 | require ( 11 | github.com/davecgh/go-spew v1.1.1 // indirect 12 | github.com/kr/pretty v0.1.0 // indirect 13 | github.com/pmezard/go-difflib v1.0.0 // indirect 14 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect 15 | gopkg.in/yaml.v3 v3.0.1 // indirect 16 | ) 17 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 4 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 5 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 6 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 7 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 8 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 9 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 10 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 11 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 12 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 13 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 14 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 15 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 16 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 17 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 18 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 19 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 20 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 21 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 22 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 23 | -------------------------------------------------------------------------------- /helper.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "math" 5 | "math/rand" 6 | "strings" 7 | "time" 8 | 9 | pb "github.com/cornelk/pulsar-go/proto" 10 | "google.golang.org/protobuf/proto" 11 | ) 12 | 13 | var earliestMessageID = &pb.MessageIdData{ 14 | LedgerId: proto.Uint64(math.MaxUint64), 15 | EntryId: proto.Uint64(math.MaxUint64), 16 | Partition: proto.Int32(-1), 17 | } 18 | 19 | var latestMessageID = &pb.MessageIdData{ 20 | LedgerId: proto.Uint64(math.MaxInt64), 21 | EntryId: proto.Uint64(math.MaxInt64), 22 | Partition: proto.Int32(-1), 23 | } 24 | 25 | // randomString returns a random string of n characters. 26 | func randomString(src rand.Source, n int) string { 27 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 28 | const ( 29 | letterIdxBits = 6 // 6 bits to represent a letter index 30 | letterIdxMask = 1<= 0; { 37 | if remain == 0 { 38 | cache, remain = src.Int63(), letterIdxMax 39 | } 40 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 41 | sb.WriteByte(letterBytes[idx]) 42 | i-- 43 | } 44 | cache >>= letterIdxBits 45 | remain-- 46 | } 47 | 48 | return sb.String() 49 | } 50 | 51 | func randomConsumerName() string { 52 | rnd := rand.NewSource(time.Now().UnixNano()) 53 | s := "consumer-" + randomString(rnd, 8) 54 | return s 55 | } 56 | 57 | func randomSubscriptionName() string { 58 | rnd := rand.NewSource(time.Now().UnixNano()) 59 | s := "sub-" + randomString(rnd, 8) 60 | return s 61 | } 62 | 63 | func randomTopicName() string { 64 | rnd := rand.NewSource(time.Now().UnixNano()) 65 | s := "topic-" + randomString(rnd, 8) 66 | return s 67 | } 68 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "io" 5 | "log" 6 | ) 7 | 8 | // Logger ... 9 | type Logger interface { 10 | Debugf(format string, args ...any) 11 | Errorf(format string, args ...any) 12 | } 13 | 14 | type logger struct { 15 | logger *log.Logger 16 | } 17 | 18 | func newLogger() logger { 19 | return logger{ 20 | logger: log.New(io.Discard, "[Pulsar] ", log.LstdFlags), 21 | } 22 | } 23 | 24 | func (l logger) Debugf(format string, args ...any) { 25 | l.logger.Printf(format, args...) 26 | } 27 | 28 | func (l logger) Errorf(format string, args ...any) { 29 | l.logger.Printf(format, args...) 30 | } 31 | -------------------------------------------------------------------------------- /logger_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "io" 7 | "log" 8 | "testing" 9 | ) 10 | 11 | type testLogger struct { 12 | logger *log.Logger 13 | testing.TB 14 | } 15 | 16 | // newTestLogger returns a new logger that logs to the provided testing.TB. 17 | func newTestLogger(tb testing.TB) testLogger { 18 | tb.Helper() 19 | return testLogger{ 20 | logger: log.New(io.Discard, "[Pulsar] ", log.LstdFlags), 21 | TB: tb, 22 | } 23 | } 24 | 25 | func (l testLogger) Debugf(format string, args ...interface{}) { 26 | l.TB.Logf(format, args...) 27 | } 28 | 29 | func (l testLogger) Errorf(format string, args ...interface{}) { 30 | l.TB.Errorf(format, args...) 31 | } 32 | -------------------------------------------------------------------------------- /message.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "hash/crc32" 7 | 8 | pb "github.com/cornelk/pulsar-go/proto" 9 | "google.golang.org/protobuf/proto" 10 | ) 11 | 12 | // Message is a data structure representing Pulsar messages. 13 | type Message struct { 14 | consumerID uint64 // used to identify the consumer on Ack 15 | Body []byte 16 | Topic string 17 | ID *MessageID 18 | } 19 | 20 | // MessageID contains the ID of a message. 21 | type MessageID pb.MessageIdData 22 | 23 | // Marshal the ID. 24 | func (id *MessageID) Marshal() ([]byte, error) { 25 | return proto.Marshal((*pb.MessageIdData)(id)) 26 | } 27 | 28 | // Unmarshal the ID. 29 | func (id *MessageID) Unmarshal(b []byte) error { 30 | return proto.Unmarshal(b, (*pb.MessageIdData)(id)) 31 | } 32 | 33 | func getMessageMetaData(crcTable *crc32.Table, meta *pb.MessageMetadata, payload []byte) ([]byte, error) { 34 | serializedMeta, err := proto.Marshal(meta) 35 | if err != nil { 36 | return nil, fmt.Errorf("marshalling failed: %w", err) 37 | } 38 | 39 | size := uint32(len(serializedMeta)) 40 | headerContentSize := 2 + 4 + 4 + int(size) 41 | b := make([]byte, headerContentSize+len(payload)) 42 | 43 | // 2-byte byte array (0x0e01) identifying the current format 44 | binary.BigEndian.PutUint16(b, magicCrc32c) 45 | 46 | // size of the protobuf-serialized meta data 47 | binary.BigEndian.PutUint32(b[2+4:], size) 48 | 49 | // serialized meta data 50 | copy(b[2+4+4:], serializedMeta) 51 | 52 | // messages payload 53 | copy(b[headerContentSize:], payload) 54 | 55 | checksum := crc32.Checksum(b[2+4:], crcTable) 56 | // CRC32-C checksum of size and payload 57 | binary.BigEndian.PutUint32(b[2:], checksum) 58 | 59 | return b, nil 60 | } 61 | 62 | func getBatchedMessagePayload(meta *pb.SingleMessageMetadata, msg *Message) ([]byte, error) { 63 | serialized, err := proto.Marshal(meta) 64 | if err != nil { 65 | return nil, fmt.Errorf("marshalling failed: %w", err) 66 | } 67 | 68 | b := make([]byte, 4+len(serialized)+len(msg.Body)) 69 | binary.BigEndian.PutUint32(b, uint32(len(serialized))) 70 | copy(b[4:], serialized) 71 | copy(b[4+len(serialized):], msg.Body) 72 | 73 | return b, nil 74 | } 75 | -------------------------------------------------------------------------------- /message_test.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | "google.golang.org/protobuf/proto" 9 | ) 10 | 11 | func TestMessageIDMarshal(t *testing.T) { 12 | m1 := MessageID{ 13 | LedgerId: proto.Uint64(1), 14 | EntryId: proto.Uint64(2), 15 | Partition: proto.Int32(3), 16 | } 17 | 18 | b, err := m1.Marshal() 19 | require.NoError(t, err) 20 | 21 | var m2 MessageID 22 | err = m2.Unmarshal(b) 23 | require.NoError(t, err) 24 | 25 | assert.Equal(t, *m1.LedgerId, *m2.LedgerId) 26 | assert.Equal(t, *m1.EntryId, *m2.EntryId) 27 | assert.Equal(t, *m1.Partition, *m2.Partition) 28 | } 29 | -------------------------------------------------------------------------------- /messaging_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "context" 7 | "math" 8 | "net/http" 9 | "strings" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | func setup(t *testing.T) *Client { 18 | t.Helper() 19 | client, err := NewClient("pulsar://localhost:6650", WithLogger(newTestLogger(t))) 20 | require.NoError(t, err) 21 | 22 | // make sure that the namespace exists to avoid the error from pulsar: 23 | // "Policies not found for public/default namespace". 24 | ctx := context.Background() 25 | req, err := http.NewRequestWithContext(ctx, http.MethodPut, 26 | "http://localhost:8080/admin/v2/namespaces/public/default", nil) 27 | require.NoError(t, err) 28 | h := &http.Client{} 29 | resp, err := h.Do(req) 30 | require.NoError(t, err) 31 | err = resp.Body.Close() 32 | require.NoError(t, err) 33 | 34 | err = client.Dial(ctx) 35 | require.NoError(t, err) 36 | return client 37 | } 38 | 39 | func readMessageAndCompare(t *testing.T, consumer Consumer, expected *Message) *Message { 40 | t.Helper() 41 | ctx := context.Background() 42 | m, err := consumer.ReadMessage(ctx) 43 | require.NoError(t, err) 44 | require.NotNil(t, m) 45 | assert.Equal(t, expected.Body, m.Body) 46 | 47 | if expected.ID != nil { 48 | assert.Equal(t, expected.ID.LedgerId, m.ID.LedgerId) 49 | assert.Equal(t, expected.ID.EntryId, m.ID.EntryId) 50 | assert.Equal(t, expected.ID.Partition, m.ID.Partition) 51 | assert.Equal(t, expected.ID.BatchIndex, m.ID.BatchIndex) 52 | } 53 | return m 54 | } 55 | 56 | func TestSendReceiveEarliestPosition(t *testing.T) { 57 | client := setup(t) 58 | defer func() { 59 | assert.NoError(t, client.Close()) 60 | }() 61 | 62 | producer, topic := newTestProducer(t, client, "") 63 | 64 | msg1 := sendMessage(t, producer, "hello world 1") 65 | msg2 := sendMessage(t, producer, "hello world 2") 66 | 67 | consConf := ConsumerConfig{ 68 | Topic: topic, 69 | Subscription: "test-sub", 70 | InitialPosition: EarliestPosition, 71 | Durable: true, 72 | } 73 | 74 | ctx := context.Background() 75 | consumer, err := client.NewConsumer(ctx, consConf) 76 | require.NoError(t, err) 77 | 78 | m := readMessageAndCompare(t, consumer, msg1) 79 | topicDetail, err := NewTopic(m.Topic) 80 | require.NoError(t, err) 81 | assert.Equal(t, topic, topicDetail.LocalName) 82 | 83 | err = consumer.AckMessage(m) 84 | require.NoError(t, err) 85 | 86 | // restart consumer 87 | err = consumer.Close() 88 | require.NoError(t, err) 89 | consumer, err = client.NewConsumer(ctx, consConf) 90 | require.NoError(t, err) 91 | 92 | readMessageAndCompare(t, consumer, msg2) 93 | } 94 | 95 | func TestSendReceiveLatestPositionExclusive(t *testing.T) { 96 | client := setup(t) 97 | defer func() { 98 | assert.NoError(t, client.Close()) 99 | }() 100 | 101 | producer, topic := newTestProducer(t, client, "") 102 | 103 | sendMessage(t, producer, "hello world 1") 104 | 105 | consConf := ConsumerConfig{ 106 | Topic: topic, 107 | Subscription: "test-sub", 108 | InitialPosition: LatestPosition, 109 | Durable: true, 110 | } 111 | 112 | ctx := context.Background() 113 | consumer, err := client.NewConsumer(ctx, consConf) 114 | require.NoError(t, err) 115 | 116 | msg2 := sendMessage(t, producer, "hello world 2") 117 | 118 | // wait for message to be available 119 | timeout := time.After(1 * time.Second) 120 | <-timeout 121 | assert.True(t, consumer.HasNext()) 122 | 123 | readMessageAndCompare(t, consumer, msg2) 124 | } 125 | 126 | func TestSendReceiveLatestPositionInclusive(t *testing.T) { 127 | client := setup(t) 128 | defer func() { 129 | assert.NoError(t, client.Close()) 130 | }() 131 | 132 | producer, topic := newTestProducer(t, client, "") 133 | msg := sendMessage(t, producer, "hello world 1") 134 | 135 | consConf := ConsumerConfig{ 136 | Topic: topic, 137 | Subscription: "test-sub", 138 | InitialPosition: LatestPosition, 139 | StartMessageIDInclusive: true, 140 | } 141 | 142 | ctx := context.Background() 143 | consumer, err := client.NewConsumer(ctx, consConf) 144 | require.NoError(t, err) 145 | 146 | // wait for message to be available 147 | timeout := time.After(1 * time.Second) 148 | <-timeout 149 | require.True(t, consumer.HasNext()) 150 | 151 | readMessageAndCompare(t, consumer, msg) 152 | } 153 | 154 | func TestConsumerEmptyTopicLatestPositionInclusive(t *testing.T) { 155 | client := setup(t) 156 | defer func() { 157 | assert.NoError(t, client.Close()) 158 | }() 159 | 160 | consConf := ConsumerConfig{ 161 | Topic: randomTopicName(), 162 | Subscription: "test-sub", 163 | InitialPosition: LatestPosition, 164 | StartMessageIDInclusive: true, 165 | ForceTopicCreation: true, 166 | } 167 | 168 | ctx := context.Background() 169 | consumer, err := client.NewConsumer(ctx, consConf) 170 | require.NoError(t, err) 171 | 172 | // wait for message to be available 173 | timeout := time.After(1 * time.Second) 174 | <-timeout 175 | assert.False(t, consumer.HasNext()) 176 | } 177 | 178 | func TestConsumerNonExistingTopic(t *testing.T) { 179 | t.SkipNow() // TODO fix, use error streaming 180 | 181 | client := setup(t) 182 | defer func() { 183 | assert.NoError(t, client.Close()) 184 | }() 185 | 186 | consConf := ConsumerConfig{ 187 | Topic: randomTopicName(), 188 | } 189 | 190 | ctx := context.Background() 191 | _, err := client.NewConsumer(ctx, consConf) 192 | assert.True(t, strings.HasPrefix(err.Error(), "TopicNotFound:")) 193 | } 194 | 195 | func TestConsumerNothingToReceive(t *testing.T) { 196 | client := setup(t) 197 | defer func() { 198 | assert.NoError(t, client.Close()) 199 | }() 200 | 201 | topic := randomTopicName() 202 | consConf := ConsumerConfig{ 203 | Topic: topic, 204 | Subscription: "test-sub", 205 | InitialPosition: LatestPosition, 206 | Durable: true, 207 | ForceTopicCreation: true, 208 | } 209 | 210 | ctx := context.Background() 211 | consumer, err := client.NewConsumer(ctx, consConf) 212 | require.NoError(t, err) 213 | 214 | timeout := time.After(1 * time.Second) 215 | done := make(chan error) 216 | go func() { 217 | _, err := consumer.ReadMessage(ctx) 218 | done <- err 219 | }() 220 | 221 | select { 222 | case <-timeout: 223 | case err := <-done: 224 | require.Error(t, err) 225 | t.Fail() 226 | } 227 | assert.False(t, consumer.HasNext()) 228 | } 229 | 230 | func TestConsumerSeek(t *testing.T) { 231 | client := setup(t) 232 | defer func() { 233 | assert.NoError(t, client.Close()) 234 | }() 235 | 236 | producer, topic := newTestProducer(t, client, "") 237 | 238 | msg1 := sendMessage(t, producer, "hello world 1") 239 | msg2 := sendMessage(t, producer, "hello world 2") 240 | sendMessage(t, producer, "hello world 3") 241 | 242 | consConf := ConsumerConfig{ 243 | Topic: topic, 244 | Subscription: "test-sub", 245 | Type: SharedSubscription, 246 | InitialPosition: EarliestPosition, 247 | Durable: false, 248 | } 249 | 250 | ctx := context.Background() 251 | consumer, err := client.NewConsumer(ctx, consConf) 252 | require.NoError(t, err) 253 | 254 | readMessageAndCompare(t, consumer, msg1) 255 | 256 | readMessageAndCompare(t, consumer, msg2) 257 | 258 | err = consumer.SeekMessage(msg1) 259 | require.NoError(t, err) 260 | 261 | readMessageAndCompare(t, consumer, msg1) 262 | } 263 | 264 | func TestGetLastMessageID(t *testing.T) { 265 | client := setup(t) 266 | defer func() { 267 | assert.NoError(t, client.Close()) 268 | }() 269 | 270 | producer, topic := newTestProducer(t, client, "") 271 | 272 | consConf := ConsumerConfig{ 273 | Topic: topic, 274 | Subscription: "test-sub", 275 | Type: ExclusiveSubscription, 276 | InitialPosition: EarliestPosition, 277 | Durable: true, 278 | } 279 | 280 | ctx := context.Background() 281 | consumer, err := client.NewConsumer(ctx, consConf) 282 | require.NoError(t, err) 283 | 284 | messageID, err := consumer.LastMessageID() 285 | require.NoError(t, err) 286 | require.NotNil(t, messageID) 287 | assert.Equal(t, *messageID.EntryId, uint64(math.MaxUint64)) 288 | 289 | sendMessage(t, producer, "hello world") 290 | messageID, err = consumer.LastMessageID() 291 | require.NoError(t, err) 292 | require.NotNil(t, messageID) 293 | assert.EqualValues(t, 0, *messageID.EntryId) 294 | } 295 | 296 | func TestConsumer_ReceiverQueueSize(t *testing.T) { 297 | client := setup(t) 298 | defer func() { 299 | assert.NoError(t, client.Close()) 300 | }() 301 | 302 | producer, topic := newTestProducer(t, client, "") 303 | 304 | receiverQueueSize := 1000 305 | messageCount := receiverQueueSize * 3 306 | messages := make([]*Message, messageCount) 307 | 308 | // Publish an extra message over the configured queue size. 309 | for i := 0; i < messageCount; i++ { 310 | messages[i] = sendMessage(t, producer, "hello world") 311 | } 312 | 313 | // Create an exclusive consumer for the topic. 314 | consConf := ConsumerConfig{ 315 | Topic: topic, 316 | Subscription: "test-sub", 317 | InitialPosition: EarliestPosition, 318 | Durable: true, 319 | MessageChannel: make(chan *Message, receiverQueueSize), 320 | } 321 | 322 | ctx := context.Background() 323 | consumer, err := client.NewConsumer(ctx, consConf) 324 | require.NoError(t, err) 325 | 326 | // Read and ack the available messages for this consumer. 327 | for i := 0; i < messageCount; i++ { 328 | m := readMessageAndCompare(t, consumer, messages[i]) 329 | require.NoError(t, consumer.AckMessage(m)) 330 | } 331 | } 332 | -------------------------------------------------------------------------------- /producer.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "hash/crc32" 9 | "math" 10 | "sync" 11 | "time" 12 | 13 | pb "github.com/cornelk/pulsar-go/proto" 14 | "google.golang.org/protobuf/proto" 15 | ) 16 | 17 | // ProducerConfig is a configuration object used to create new instances 18 | // of Producer. 19 | type ProducerConfig struct { 20 | // The topic to write messages to. 21 | Topic string 22 | 23 | // The name of the producer. 24 | Name string 25 | 26 | // Limit on how many messages will be buffered before being sent as a batch. 27 | // 28 | // The default is a batch size of 100 messages. 29 | BatchSize int 30 | 31 | // Time limit on how often a batch that is not full yet will be flushed and 32 | // sent to Pulsar. 33 | // 34 | // The default is to flush every second. 35 | BatchTimeout time.Duration 36 | 37 | // Capacity of the internal producer message queue. 38 | // 39 | // The default is to use a queue capacity of 1000 messages. 40 | QueueCapacity int 41 | } 42 | 43 | const ( 44 | defaultBatchSize = 100 45 | defaultBatchTimeout = time.Second 46 | defaultQueueCapacity = 1000 47 | ) 48 | 49 | type producerCloser interface { 50 | CloseProducer(producerID uint64) error 51 | } 52 | 53 | // Producer provides a high-level API for sending messages to Pulsar. 54 | type Producer struct { 55 | ctx context.Context 56 | log Logger 57 | conn clientConn 58 | req *requests 59 | 60 | topic *Topic 61 | name *string 62 | batchSize int 63 | batchTimeout time.Duration 64 | 65 | producerID uint64 66 | pendingMessages chan *syncMessage 67 | sequenceID uint64 68 | sendResults map[uint64][]*sendResult 69 | sendResultsLock sync.Mutex 70 | 71 | crcTable *crc32.Table 72 | connected chan struct{} 73 | closer producerCloser 74 | } 75 | 76 | type syncMessage struct { 77 | msg *Message 78 | res *sendResult 79 | } 80 | 81 | type sendResult struct { 82 | ch chan struct{} // closing the channel marks this struct as filled 83 | err error 84 | id *pb.MessageIdData 85 | } 86 | 87 | // Validate method validates the config properties. 88 | func (config *ProducerConfig) Validate() error { 89 | if config.Topic == "" { 90 | return errors.New("topic is not set") 91 | } 92 | if _, err := NewTopic(config.Topic); err != nil { 93 | return fmt.Errorf("checking topic name: %w", err) 94 | } 95 | if config.BatchSize < 0 || config.BatchSize > math.MaxInt32 { 96 | return errors.New("invalid batch size") 97 | } 98 | if config.QueueCapacity < 0 { 99 | return errors.New("invalid queue capacity") 100 | } 101 | 102 | return nil 103 | } 104 | 105 | // newProducer creates and returns a new Producer configured with config. 106 | func newProducer(closer producerCloser, conn brokerConnection, config ProducerConfig, producerID uint64) (*Producer, error) { 107 | if err := config.Validate(); err != nil { 108 | return nil, fmt.Errorf("validating config: %w", err) 109 | } 110 | topic, err := NewTopic(config.Topic) 111 | if err != nil { 112 | return nil, err 113 | } 114 | 115 | p := &Producer{ 116 | ctx: conn.ctx, 117 | log: conn.log, 118 | conn: conn.conn, 119 | req: conn.req, 120 | 121 | topic: topic, 122 | name: proto.String(config.Name), 123 | batchSize: config.BatchSize, 124 | batchTimeout: config.BatchTimeout, 125 | 126 | producerID: producerID, 127 | sendResults: map[uint64][]*sendResult{}, 128 | 129 | crcTable: crc32.MakeTable(crc32.Castagnoli), 130 | connected: make(chan struct{}, 1), 131 | closer: closer, 132 | } 133 | 134 | queueCapacity := config.QueueCapacity 135 | if queueCapacity == 0 { 136 | queueCapacity = defaultQueueCapacity 137 | } 138 | p.pendingMessages = make(chan *syncMessage, queueCapacity) 139 | 140 | if p.batchSize == 0 { 141 | p.batchSize = defaultBatchSize 142 | } 143 | if p.batchTimeout.Nanoseconds() == 0 { 144 | p.batchTimeout = defaultBatchTimeout 145 | } 146 | 147 | return p, nil 148 | } 149 | 150 | func (p *Producer) topicReady(cmd *command) error { 151 | if cmd.err != nil { 152 | return cmd.err 153 | } 154 | 155 | reqID := p.req.newID() 156 | p.req.addCallback(reqID, p.handleProducerSuccess) 157 | return p.sendProduceCommand(reqID) 158 | } 159 | 160 | // Close stops writing messages and unregisters from the Client. 161 | func (p *Producer) Close() error { 162 | return p.closer.CloseProducer(p.producerID) 163 | } 164 | 165 | // WriteMessage puts the message into the message queue, blocks until the 166 | // message has been sent and an acknowledgement message is received from 167 | // Pulsar. 168 | func (p *Producer) WriteMessage(ctx context.Context, msg []byte) (*MessageID, error) { 169 | res := &sendResult{ 170 | ch: make(chan struct{}), 171 | } 172 | m := &syncMessage{ 173 | msg: &Message{ 174 | Body: msg, 175 | }, 176 | res: res, 177 | } 178 | 179 | select { 180 | case <-ctx.Done(): 181 | return nil, ctx.Err() 182 | case <-p.ctx.Done(): 183 | return nil, p.ctx.Err() 184 | case p.pendingMessages <- m: 185 | <-res.ch 186 | return (*MessageID)(res.id), res.err 187 | } 188 | } 189 | 190 | // WriteMessageAsync puts the message into the message queue. 191 | // If the message queue is full, this function will block until it can write 192 | // to the queue. The queue size can be specified in the Producer options. 193 | func (p *Producer) WriteMessageAsync(ctx context.Context, msg []byte) error { 194 | m := &syncMessage{ 195 | msg: &Message{ 196 | Body: msg, 197 | }, 198 | } 199 | 200 | select { 201 | case <-ctx.Done(): 202 | return ctx.Err() 203 | case <-p.ctx.Done(): 204 | return p.ctx.Err() 205 | case p.pendingMessages <- m: 206 | return nil 207 | } 208 | } 209 | 210 | func (p *Producer) sendMessageCommand(batch []*syncMessage) error { 211 | seq := proto.Uint64(p.sequenceID) 212 | base := &pb.BaseCommand{ 213 | Type: pb.BaseCommand_SEND.Enum(), 214 | Send: &pb.CommandSend{ 215 | ProducerId: proto.Uint64(p.producerID), 216 | SequenceId: seq, 217 | NumMessages: proto.Int32(int32(len(batch))), 218 | }, 219 | } 220 | p.sequenceID++ 221 | 222 | p.addBatchToSendResults(batch, *seq) 223 | 224 | now := proto.Uint64(uint64(time.Now().UnixNano() / int64(time.Millisecond))) 225 | 226 | var messagePayload bytes.Buffer 227 | for _, msg := range batch { 228 | meta := &pb.SingleMessageMetadata{ 229 | PayloadSize: proto.Int32(int32(len(msg.msg.Body))), 230 | EventTime: now, 231 | } 232 | 233 | b, err := getBatchedMessagePayload(meta, msg.msg) 234 | if err != nil { 235 | return err 236 | } 237 | if _, err = messagePayload.Write(b); err != nil { 238 | return err 239 | } 240 | } 241 | 242 | msgMeta := &pb.MessageMetadata{ 243 | ProducerName: p.name, 244 | SequenceId: seq, 245 | PublishTime: now, 246 | Compression: pb.CompressionType_NONE.Enum(), 247 | UncompressedSize: proto.Uint32(uint32(messagePayload.Len())), 248 | NumMessagesInBatch: proto.Int32(int32(len(batch))), 249 | } 250 | 251 | payload, err := getMessageMetaData(p.crcTable, msgMeta, messagePayload.Bytes()) 252 | if err != nil { 253 | return err 254 | } 255 | 256 | return p.conn.WriteCommand(base, payload) 257 | } 258 | 259 | // addBatchToSendResults adds the batch to the send results callback if any 260 | // message of the batch wants to know results in a callback channel. 261 | func (p *Producer) addBatchToSendResults(batch []*syncMessage, sequenceID uint64) { 262 | sendResults := make([]*sendResult, len(batch)) 263 | var sendResultCallbacks int 264 | 265 | for i, msg := range batch { 266 | if msg.res != nil { 267 | sendResults[i] = msg.res 268 | sendResultCallbacks++ 269 | } 270 | } 271 | 272 | if sendResultCallbacks == 0 { 273 | return 274 | } 275 | 276 | p.sendResultsLock.Lock() 277 | p.sendResults[sequenceID] = sendResults 278 | p.sendResultsLock.Unlock() 279 | } 280 | 281 | func (p *Producer) sendProduceCommand(reqID uint64) error { 282 | cmd := &pb.CommandProducer{ 283 | Topic: &p.topic.CompleteName, 284 | ProducerId: proto.Uint64(p.producerID), 285 | RequestId: proto.Uint64(reqID), 286 | } 287 | if *p.name == "" { 288 | cmd.UserProvidedProducerName = proto.Bool(false) 289 | } else { 290 | cmd.ProducerName = p.name 291 | cmd.UserProvidedProducerName = proto.Bool(true) 292 | } 293 | 294 | base := &pb.BaseCommand{ 295 | Type: pb.BaseCommand_PRODUCER.Enum(), 296 | Producer: cmd, 297 | } 298 | return p.conn.WriteCommand(base, nil) 299 | } 300 | 301 | func (p *Producer) handleProducerSuccess(cmd *command) error { 302 | defer close(p.connected) 303 | if cmd.err != nil { 304 | return cmd.err 305 | } 306 | 307 | p.name = proto.String(cmd.ProducerSuccess.GetProducerName()) 308 | p.sequenceID = uint64(cmd.ProducerSuccess.GetLastSequenceId() + 1) 309 | 310 | go p.messageProducerWorker() 311 | return nil 312 | } 313 | 314 | func (p *Producer) messageProducerWorker() { 315 | batch := make([]*syncMessage, 0, p.batchSize) 316 | 317 | timer := time.NewTimer(p.batchTimeout) 318 | defer timer.Stop() 319 | timerRunning := true 320 | 321 | for { 322 | mustFlush := false 323 | 324 | // reset the timer to tick in the given time interval after last send 325 | if timerRunning && !timer.Stop() { 326 | <-timer.C 327 | } 328 | timer.Reset(p.batchTimeout) 329 | timerRunning = true 330 | 331 | select { 332 | case <-p.ctx.Done(): 333 | return 334 | 335 | case m, ok := <-p.pendingMessages: 336 | if !ok { 337 | return 338 | } 339 | 340 | batch = append(batch, m) 341 | mustFlush = len(batch) >= p.batchSize 342 | 343 | case <-timer.C: 344 | timerRunning = false 345 | 346 | if len(batch) > 0 { 347 | mustFlush = true 348 | } 349 | } 350 | 351 | if !mustFlush { 352 | continue 353 | } 354 | 355 | if err := p.sendMessageCommand(batch); err != nil { 356 | p.log.Errorf("Sending message command failed: %s", err.Error()) 357 | } 358 | batch = batch[:0] 359 | } 360 | } 361 | 362 | func (p *Producer) processSendResult(sequenceID uint64, id *pb.MessageIdData, err error) error { 363 | p.sendResultsLock.Lock() 364 | results, ok := p.sendResults[sequenceID] 365 | delete(p.sendResults, sequenceID) 366 | p.sendResultsLock.Unlock() 367 | if !ok { 368 | return nil 369 | } 370 | 371 | if id != nil && id.Partition == nil { 372 | // set partition to -1 on not partitioned topics for the id having the 373 | // same content as when returned from a subscription. 374 | id.Partition = proto.Int32(-1) 375 | } 376 | 377 | for _, res := range results { 378 | if res == nil { 379 | continue 380 | } 381 | 382 | res.id = id 383 | res.err = err 384 | 385 | close(res.ch) 386 | } 387 | 388 | return nil 389 | } 390 | -------------------------------------------------------------------------------- /producer_registry.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | type producerRegistry struct { 9 | producerIDs uint64 10 | producersMtx sync.RWMutex 11 | producers map[uint64]*Producer 12 | } 13 | 14 | func newProducerRegistry() *producerRegistry { 15 | return &producerRegistry{ 16 | producersMtx: sync.RWMutex{}, 17 | producers: map[uint64]*Producer{}, 18 | } 19 | } 20 | 21 | func (r *producerRegistry) newID() uint64 { 22 | id := atomic.AddUint64(&r.producerIDs, 1) 23 | return id 24 | } 25 | 26 | func (r *producerRegistry) add(id uint64, producer *Producer) { 27 | r.producersMtx.Lock() 28 | r.producers[id] = producer 29 | r.producersMtx.Unlock() 30 | } 31 | 32 | func (r *producerRegistry) get(id uint64) (producer *Producer, ok bool) { 33 | r.producersMtx.RLock() 34 | producer, ok = r.producers[id] 35 | r.producersMtx.RUnlock() 36 | return producer, ok 37 | } 38 | 39 | func (r *producerRegistry) getAndDelete(id uint64) (producer *Producer, ok bool) { 40 | r.producersMtx.Lock() 41 | producer, ok = r.producers[id] 42 | if ok { 43 | delete(r.producers, id) 44 | } 45 | r.producersMtx.Unlock() 46 | return producer, ok 47 | } 48 | 49 | func (r *producerRegistry) all() []*Producer { 50 | r.producersMtx.RLock() 51 | producers := make([]*Producer, 0, len(r.producers)) 52 | for _, prod := range r.producers { 53 | producers = append(producers, prod) 54 | } 55 | defer r.producersMtx.RUnlock() 56 | return producers 57 | } 58 | -------------------------------------------------------------------------------- /producer_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | 3 | package pulsar 4 | 5 | import ( 6 | "context" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | "google.golang.org/protobuf/proto" 13 | ) 14 | 15 | func newTestProducer(t *testing.T, client *Client, topic string) (*Producer, string) { 16 | t.Helper() 17 | if topic == "" { 18 | topic = randomTopicName() 19 | } 20 | prodConf := ProducerConfig{ 21 | Topic: topic, 22 | Name: "test-producer", 23 | BatchSize: 1, 24 | } 25 | 26 | ctx := context.Background() 27 | var err error 28 | producer, err := client.NewProducer(ctx, prodConf) 29 | require.NoError(t, err) 30 | return producer, topic 31 | } 32 | 33 | func sendMessage(t *testing.T, producer *Producer, s string) *Message { 34 | t.Helper() 35 | m := &Message{ 36 | Body: []byte(s), 37 | } 38 | var err error 39 | ctx := context.Background() 40 | id, err := producer.WriteMessage(ctx, m.Body) 41 | if id.BatchIndex == nil { 42 | id.BatchIndex = proto.Int32(0) 43 | } 44 | require.NoError(t, err) 45 | require.NotNil(t, id) 46 | m.ID = id 47 | return m 48 | } 49 | 50 | func sendMessageAsync(t *testing.T, producer *Producer, s string) *Message { 51 | t.Helper() 52 | m := &Message{ 53 | Body: []byte(s), 54 | } 55 | ctx := context.Background() 56 | require.NoError(t, producer.WriteMessageAsync(ctx, m.Body)) 57 | return m 58 | } 59 | 60 | func TestProducerConfigValidate(t *testing.T) { 61 | conf := &ProducerConfig{ 62 | Topic: "test-topic-pulsar-go", 63 | } 64 | 65 | err := conf.Validate() 66 | assert.NoError(t, err) 67 | } 68 | 69 | func TestProducerRestartSequence(t *testing.T) { 70 | client := setup(t) 71 | defer func() { 72 | assert.NoError(t, client.Close()) 73 | }() 74 | 75 | prod, topic := newTestProducer(t, client, "") 76 | 77 | m1 := sendMessage(t, prod, "hello world 1") 78 | assert.EqualValues(t, 0, *m1.ID.EntryId) 79 | 80 | m2 := sendMessage(t, prod, "hello world 2") 81 | assert.EqualValues(t, 1, *m2.ID.EntryId) 82 | 83 | // restart producer 84 | err := prod.Close() 85 | require.NoError(t, err) 86 | prod, _ = newTestProducer(t, client, topic) 87 | assert.EqualValues(t, 0, prod.sequenceID) 88 | 89 | m3 := sendMessage(t, prod, "hello world 3") 90 | assert.EqualValues(t, 2, *m3.ID.EntryId) 91 | assert.Equal(t, *m1.ID.LedgerId, *m3.ID.LedgerId) 92 | } 93 | 94 | func TestProducerBrokerGeneratedName(t *testing.T) { 95 | client := setup(t) 96 | defer func() { 97 | assert.NoError(t, client.Close()) 98 | }() 99 | 100 | prodConf := ProducerConfig{ 101 | Topic: randomTopicName(), 102 | } 103 | 104 | ctx := context.Background() 105 | prod, err := client.NewProducer(ctx, prodConf) 106 | require.NoError(t, err) 107 | assert.NotEmpty(t, prod.name) 108 | } 109 | 110 | func TestProducerBatchSize(t *testing.T) { 111 | client := setup(t) 112 | defer func() { 113 | assert.NoError(t, client.Close()) 114 | }() 115 | 116 | topic := randomTopicName() 117 | prodConf := ProducerConfig{ 118 | Topic: topic, 119 | Name: "test-producer", 120 | BatchSize: 2, 121 | BatchTimeout: time.Minute, 122 | } 123 | 124 | ctx := context.Background() 125 | prod, err := client.NewProducer(ctx, prodConf) 126 | require.NoError(t, err) 127 | 128 | consConf := ConsumerConfig{ 129 | Topic: topic, 130 | InitialPosition: EarliestPosition, 131 | } 132 | 133 | consumer, err := client.NewConsumer(ctx, consConf) 134 | require.NoError(t, err) 135 | 136 | msg1 := sendMessageAsync(t, prod, "hello world 1") 137 | msg2 := sendMessageAsync(t, prod, "hello world 2") 138 | 139 | // wait for message to be available 140 | timeout := time.After(1 * time.Second) 141 | <-timeout 142 | assert.True(t, consumer.HasNext()) 143 | 144 | readMessageAndCompare(t, consumer, msg1) 145 | readMessageAndCompare(t, consumer, msg2) 146 | } 147 | 148 | func TestProducerBatchTimeout(t *testing.T) { 149 | client := setup(t) 150 | defer func() { 151 | assert.NoError(t, client.Close()) 152 | }() 153 | 154 | topic := randomTopicName() 155 | prodConf := ProducerConfig{ 156 | Topic: topic, 157 | Name: "test-producer", 158 | BatchSize: 100, 159 | BatchTimeout: 500 * time.Millisecond, 160 | } 161 | 162 | ctx := context.Background() 163 | 164 | consConf := ConsumerConfig{ 165 | Topic: topic, 166 | InitialPosition: EarliestPosition, 167 | ForceTopicCreation: true, 168 | } 169 | 170 | // start consumer before producer to make for better test timing 171 | consumer, err := client.NewConsumer(ctx, consConf) 172 | require.NoError(t, err) 173 | 174 | prod, err := client.NewProducer(ctx, prodConf) 175 | require.NoError(t, err) 176 | 177 | msg1 := sendMessageAsync(t, prod, "hello world 1") 178 | msg2 := sendMessageAsync(t, prod, "hello world 2") 179 | 180 | // wait for message to be available 181 | timeout := time.After(1100 * time.Millisecond) 182 | <-timeout 183 | assert.True(t, consumer.HasNext()) 184 | 185 | readMessageAndCompare(t, consumer, msg1) 186 | readMessageAndCompare(t, consumer, msg2) 187 | } 188 | -------------------------------------------------------------------------------- /proto/PulsarApi.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | syntax = "proto2"; 20 | 21 | package pulsar.proto; 22 | option java_package = "org.apache.pulsar.common.api.proto"; 23 | option go_package = "github.com/cornelk/pulsar-go/proto;pulsar_proto"; 24 | option optimize_for = LITE_RUNTIME; 25 | 26 | message Schema { 27 | enum Type { 28 | None = 0; 29 | String = 1; 30 | Json = 2; 31 | Protobuf = 3; 32 | Avro = 4; 33 | Bool = 5; 34 | Int8 = 6; 35 | Int16 = 7; 36 | Int32 = 8; 37 | Int64 = 9; 38 | Float = 10; 39 | Double = 11; 40 | Date = 12; 41 | Time = 13; 42 | Timestamp = 14; 43 | KeyValue = 15; 44 | Instant = 16; 45 | LocalDate = 17; 46 | LocalTime = 18; 47 | LocalDateTime = 19; 48 | ProtobufNative = 20; 49 | } 50 | 51 | required string name = 1; 52 | required bytes schema_data = 3; 53 | required Type type = 4; 54 | repeated KeyValue properties = 5; 55 | 56 | } 57 | 58 | message MessageIdData { 59 | required uint64 ledgerId = 1; 60 | required uint64 entryId = 2; 61 | optional int32 partition = 3 [default = -1]; 62 | optional int32 batch_index = 4 [default = -1]; 63 | repeated int64 ack_set = 5; 64 | optional int32 batch_size = 6; 65 | 66 | // For the chunk message id, we need to specify the first chunk message id. 67 | optional MessageIdData first_chunk_message_id = 7; 68 | } 69 | 70 | message KeyValue { 71 | required string key = 1; 72 | required string value = 2; 73 | } 74 | 75 | message KeyLongValue { 76 | required string key = 1; 77 | required uint64 value = 2; 78 | } 79 | 80 | message IntRange { 81 | required int32 start = 1; 82 | required int32 end = 2; 83 | } 84 | 85 | message EncryptionKeys { 86 | required string key = 1; 87 | required bytes value = 2; 88 | repeated KeyValue metadata = 3; 89 | } 90 | 91 | enum CompressionType { 92 | NONE = 0; 93 | LZ4 = 1; 94 | ZLIB = 2; 95 | ZSTD = 3; 96 | SNAPPY = 4; 97 | } 98 | 99 | enum ProducerAccessMode { 100 | Shared = 0; // By default multiple producers can publish on a topic 101 | Exclusive = 1; // Require exclusive access for producer. Fail immediately if there's already a producer connected. 102 | WaitForExclusive = 2; // Producer creation is pending until it can acquire exclusive access 103 | ExclusiveWithFencing = 3; // Require exclusive access for producer. Fence out old producer. 104 | } 105 | 106 | message MessageMetadata { 107 | required string producer_name = 1; 108 | required uint64 sequence_id = 2; 109 | required uint64 publish_time = 3; 110 | repeated KeyValue properties = 4; 111 | 112 | // Property set on replicated message, 113 | // includes the source cluster name 114 | optional string replicated_from = 5; 115 | //key to decide partition for the msg 116 | optional string partition_key = 6; 117 | // Override namespace's replication 118 | repeated string replicate_to = 7; 119 | optional CompressionType compression = 8 [default = NONE]; 120 | optional uint32 uncompressed_size = 9 [default = 0]; 121 | // Removed below checksum field from Metadata as 122 | // it should be part of send-command which keeps checksum of header + payload 123 | //optional sfixed64 checksum = 10; 124 | // differentiate single and batch message metadata 125 | optional int32 num_messages_in_batch = 11 [default = 1]; 126 | 127 | // the timestamp that this event occurs. it is typically set by applications. 128 | // if this field is omitted, `publish_time` can be used for the purpose of `event_time`. 129 | optional uint64 event_time = 12 [default = 0]; 130 | // Contains encryption key name, encrypted key and metadata to describe the key 131 | repeated EncryptionKeys encryption_keys = 13; 132 | // Algorithm used to encrypt data key 133 | optional string encryption_algo = 14; 134 | // Additional parameters required by encryption 135 | optional bytes encryption_param = 15; 136 | optional bytes schema_version = 16; 137 | 138 | optional bool partition_key_b64_encoded = 17 [ default = false ]; 139 | // Specific a key to overwrite the message key which used for ordering dispatch in Key_Shared mode. 140 | optional bytes ordering_key = 18; 141 | 142 | // Mark the message to be delivered at or after the specified timestamp 143 | optional int64 deliver_at_time = 19; 144 | 145 | // Identify whether a message is a "marker" message used for 146 | // internal metadata instead of application published data. 147 | // Markers will generally not be propagated back to clients 148 | optional int32 marker_type = 20; 149 | 150 | // transaction related message info 151 | optional uint64 txnid_least_bits = 22; 152 | optional uint64 txnid_most_bits = 23; 153 | 154 | /// Add highest sequence id to support batch message with external sequence id 155 | optional uint64 highest_sequence_id = 24 [default = 0]; 156 | 157 | // Indicate if the message payload value is set 158 | optional bool null_value = 25 [default = false]; 159 | optional string uuid = 26; 160 | optional int32 num_chunks_from_msg = 27; 161 | optional int32 total_chunk_msg_size = 28; 162 | optional int32 chunk_id = 29; 163 | 164 | // Indicate if the message partition key is set 165 | optional bool null_partition_key = 30 [default = false]; 166 | } 167 | 168 | message SingleMessageMetadata { 169 | repeated KeyValue properties = 1; 170 | optional string partition_key = 2; 171 | required int32 payload_size = 3; 172 | optional bool compacted_out = 4 [default = false]; 173 | 174 | // the timestamp that this event occurs. it is typically set by applications. 175 | // if this field is omitted, `publish_time` can be used for the purpose of `event_time`. 176 | optional uint64 event_time = 5 [default = 0]; 177 | optional bool partition_key_b64_encoded = 6 [ default = false ]; 178 | // Specific a key to overwrite the message key which used for ordering dispatch in Key_Shared mode. 179 | optional bytes ordering_key = 7; 180 | // Allows consumer retrieve the sequence id that the producer set. 181 | optional uint64 sequence_id = 8; 182 | // Indicate if the message payload value is set 183 | optional bool null_value = 9 [ default = false ]; 184 | // Indicate if the message partition key is set 185 | optional bool null_partition_key = 10 [ default = false]; 186 | } 187 | 188 | // metadata added for entry from broker 189 | message BrokerEntryMetadata { 190 | optional uint64 broker_timestamp = 1; 191 | optional uint64 index = 2; 192 | } 193 | 194 | enum ServerError { 195 | UnknownError = 0; 196 | MetadataError = 1; // Error with ZK/metadata 197 | PersistenceError = 2; // Error writing reading from BK 198 | AuthenticationError = 3; // Non valid authentication 199 | AuthorizationError = 4; // Not authorized to use resource 200 | 201 | ConsumerBusy = 5; // Unable to subscribe/unsubscribe because 202 | // other consumers are connected 203 | ServiceNotReady = 6; // Any error that requires client retry operation with a fresh lookup 204 | ProducerBlockedQuotaExceededError = 7; // Unable to create producer because backlog quota exceeded 205 | ProducerBlockedQuotaExceededException = 8; // Exception while creating producer because quota exceeded 206 | ChecksumError = 9; // Error while verifying message checksum 207 | UnsupportedVersionError = 10; // Error when an older client/version doesn't support a required feature 208 | TopicNotFound = 11; // Topic not found 209 | SubscriptionNotFound = 12; // Subscription not found 210 | ConsumerNotFound = 13; // Consumer not found 211 | TooManyRequests = 14; // Error with too many simultaneously request 212 | TopicTerminatedError = 15; // The topic has been terminated 213 | 214 | ProducerBusy = 16; // Producer with same name is already connected 215 | InvalidTopicName = 17; // The topic name is not valid 216 | 217 | IncompatibleSchema = 18; // Specified schema was incompatible with topic schema 218 | ConsumerAssignError = 19; // Dispatcher assign consumer error 219 | 220 | TransactionCoordinatorNotFound = 20; // Transaction coordinator not found error 221 | InvalidTxnStatus = 21; // Invalid txn status error 222 | NotAllowedError = 22; // Not allowed error 223 | 224 | TransactionConflict = 23; // Ack with transaction conflict 225 | TransactionNotFound = 24; // Transaction not found 226 | 227 | ProducerFenced = 25; // When a producer asks and fail to get exclusive producer access, 228 | // or loses the eclusive status after a reconnection, the broker will 229 | // use this error to indicate that this producer is now permanently 230 | // fenced. Applications are now supposed to close it and create a 231 | // new producer 232 | } 233 | 234 | enum AuthMethod { 235 | AuthMethodNone = 0; 236 | AuthMethodYcaV1 = 1; 237 | AuthMethodAthens = 2; 238 | } 239 | 240 | // Each protocol version identify new features that are 241 | // incrementally added to the protocol 242 | enum ProtocolVersion { 243 | v0 = 0; // Initial versioning 244 | v1 = 1; // Added application keep-alive 245 | v2 = 2; // Added RedeliverUnacknowledgedMessages Command 246 | v3 = 3; // Added compression with LZ4 and ZLib 247 | v4 = 4; // Added batch message support 248 | v5 = 5; // Added disconnect client w/o closing connection 249 | v6 = 6; // Added checksum computation for metadata + payload 250 | v7 = 7; // Added CommandLookupTopic - Binary Lookup 251 | v8 = 8; // Added CommandConsumerStats - Client fetches broker side consumer stats 252 | v9 = 9; // Added end of topic notification 253 | v10 = 10;// Added proxy to broker 254 | v11 = 11;// C++ consumers before this version are not correctly handling the checksum field 255 | v12 = 12;// Added get topic's last messageId from broker 256 | // Added CommandActiveConsumerChange 257 | // Added CommandGetTopicsOfNamespace 258 | v13 = 13; // Schema-registry : added avro schema format for json 259 | v14 = 14; // Add CommandAuthChallenge and CommandAuthResponse for mutual auth 260 | // Added Key_Shared subscription 261 | v15 = 15; // Add CommandGetOrCreateSchema and CommandGetOrCreateSchemaResponse 262 | v16 = 16; // Add support for broker entry metadata 263 | v17 = 17; // Added support ack receipt 264 | v18 = 18; // Add client support for broker entry metadata 265 | v19 = 19; // Add CommandTcClientConnectRequest and CommandTcClientConnectResponse 266 | v20 = 20; // Add client support for topic migration redirection CommandTopicMigrated 267 | } 268 | 269 | message CommandConnect { 270 | required string client_version = 1; 271 | optional AuthMethod auth_method = 2; // Deprecated. Use "auth_method_name" instead. 272 | optional string auth_method_name = 5; 273 | optional bytes auth_data = 3; 274 | optional int32 protocol_version = 4 [default = 0]; 275 | 276 | // Client can ask to be proxyied to a specific broker 277 | // This is only honored by a Pulsar proxy 278 | optional string proxy_to_broker_url = 6; 279 | 280 | // Original principal that was verified by 281 | // a Pulsar proxy. In this case the auth info above 282 | // will be the auth of the proxy itself 283 | optional string original_principal = 7; 284 | 285 | // Original auth role and auth Method that was passed 286 | // to the proxy. In this case the auth info above 287 | // will be the auth of the proxy itself 288 | optional string original_auth_data = 8; 289 | optional string original_auth_method = 9; 290 | 291 | // Feature flags 292 | optional FeatureFlags feature_flags = 10; 293 | } 294 | 295 | message FeatureFlags { 296 | optional bool supports_auth_refresh = 1 [default = false]; 297 | optional bool supports_broker_entry_metadata = 2 [default = false]; 298 | optional bool supports_partial_producer = 3 [default = false]; 299 | optional bool supports_topic_watchers = 4 [default = false]; 300 | } 301 | 302 | message CommandConnected { 303 | required string server_version = 1; 304 | optional int32 protocol_version = 2 [default = 0]; 305 | optional int32 max_message_size = 3; 306 | optional FeatureFlags feature_flags = 4; 307 | } 308 | 309 | message CommandAuthResponse { 310 | optional string client_version = 1; 311 | optional AuthData response = 2; 312 | optional int32 protocol_version = 3 [default = 0]; 313 | } 314 | 315 | message CommandAuthChallenge { 316 | optional string server_version = 1; 317 | optional AuthData challenge = 2; 318 | optional int32 protocol_version = 3 [default = 0]; 319 | } 320 | 321 | // To support mutual authentication type, such as Sasl, reuse this command to mutual auth. 322 | message AuthData { 323 | optional string auth_method_name = 1; 324 | optional bytes auth_data = 2; 325 | } 326 | 327 | enum KeySharedMode { 328 | AUTO_SPLIT = 0; 329 | STICKY = 1; 330 | } 331 | 332 | message KeySharedMeta { 333 | required KeySharedMode keySharedMode = 1; 334 | repeated IntRange hashRanges = 3; 335 | optional bool allowOutOfOrderDelivery = 4 [default = false]; 336 | } 337 | 338 | message CommandSubscribe { 339 | enum SubType { 340 | Exclusive = 0; 341 | Shared = 1; 342 | Failover = 2; 343 | Key_Shared = 3; 344 | } 345 | required string topic = 1; 346 | required string subscription = 2; 347 | required SubType subType = 3; 348 | 349 | required uint64 consumer_id = 4; 350 | required uint64 request_id = 5; 351 | optional string consumer_name = 6; 352 | optional int32 priority_level = 7; 353 | 354 | // Signal wether the subscription should be backed by a 355 | // durable cursor or not 356 | optional bool durable = 8 [default = true]; 357 | 358 | // If specified, the subscription will position the cursor 359 | // markd-delete position on the particular message id and 360 | // will send messages from that point 361 | optional MessageIdData start_message_id = 9; 362 | 363 | /// Add optional metadata key=value to this consumer 364 | repeated KeyValue metadata = 10; 365 | 366 | optional bool read_compacted = 11; 367 | 368 | optional Schema schema = 12; 369 | enum InitialPosition { 370 | Latest = 0; 371 | Earliest = 1; 372 | } 373 | // Signal whether the subscription will initialize on latest 374 | // or not -- earliest 375 | optional InitialPosition initialPosition = 13 [default = Latest]; 376 | 377 | // Mark the subscription as "replicated". Pulsar will make sure 378 | // to periodically sync the state of replicated subscriptions 379 | // across different clusters (when using geo-replication). 380 | optional bool replicate_subscription_state = 14; 381 | 382 | // If true, the subscribe operation will cause a topic to be 383 | // created if it does not exist already (and if topic auto-creation 384 | // is allowed by broker. 385 | // If false, the subscribe operation will fail if the topic 386 | // does not exist. 387 | optional bool force_topic_creation = 15 [default = true]; 388 | 389 | // If specified, the subscription will reset cursor's position back 390 | // to specified seconds and will send messages from that point 391 | optional uint64 start_message_rollback_duration_sec = 16 [default = 0]; 392 | 393 | optional KeySharedMeta keySharedMeta = 17; 394 | 395 | repeated KeyValue subscription_properties = 18; 396 | 397 | // The consumer epoch, when exclusive and failover consumer redeliver unack message will increase the epoch 398 | optional uint64 consumer_epoch = 19; 399 | } 400 | 401 | message CommandPartitionedTopicMetadata { 402 | required string topic = 1; 403 | required uint64 request_id = 2; 404 | // TODO - Remove original_principal, original_auth_data, original_auth_method 405 | // Original principal that was verified by 406 | // a Pulsar proxy. 407 | optional string original_principal = 3; 408 | 409 | // Original auth role and auth Method that was passed 410 | // to the proxy. 411 | optional string original_auth_data = 4; 412 | optional string original_auth_method = 5; 413 | } 414 | 415 | message CommandPartitionedTopicMetadataResponse { 416 | enum LookupType { 417 | Success = 0; 418 | Failed = 1; 419 | } 420 | optional uint32 partitions = 1; // Optional in case of error 421 | required uint64 request_id = 2; 422 | optional LookupType response = 3; 423 | optional ServerError error = 4; 424 | optional string message = 5; 425 | } 426 | 427 | message CommandLookupTopic { 428 | required string topic = 1; 429 | required uint64 request_id = 2; 430 | optional bool authoritative = 3 [default = false]; 431 | 432 | // TODO - Remove original_principal, original_auth_data, original_auth_method 433 | // Original principal that was verified by 434 | // a Pulsar proxy. 435 | optional string original_principal = 4; 436 | 437 | // Original auth role and auth Method that was passed 438 | // to the proxy. 439 | optional string original_auth_data = 5; 440 | optional string original_auth_method = 6; 441 | // 442 | optional string advertised_listener_name = 7; 443 | } 444 | 445 | message CommandLookupTopicResponse { 446 | enum LookupType { 447 | Redirect = 0; 448 | Connect = 1; 449 | Failed = 2; 450 | } 451 | 452 | optional string brokerServiceUrl = 1; // Optional in case of error 453 | optional string brokerServiceUrlTls = 2; 454 | optional LookupType response = 3; 455 | required uint64 request_id = 4; 456 | optional bool authoritative = 5 [default = false]; 457 | optional ServerError error = 6; 458 | optional string message = 7; 459 | 460 | // If it's true, indicates to the client that it must 461 | // always connect through the service url after the 462 | // lookup has been completed. 463 | optional bool proxy_through_service_url = 8 [default = false]; 464 | } 465 | 466 | /// Create a new Producer on a topic, assigning the given producer_id, 467 | /// all messages sent with this producer_id will be persisted on the topic 468 | message CommandProducer { 469 | required string topic = 1; 470 | required uint64 producer_id = 2; 471 | required uint64 request_id = 3; 472 | 473 | /// If a producer name is specified, the name will be used, 474 | /// otherwise the broker will generate a unique name 475 | optional string producer_name = 4; 476 | 477 | optional bool encrypted = 5 [default = false]; 478 | 479 | /// Add optional metadata key=value to this producer 480 | repeated KeyValue metadata = 6; 481 | 482 | optional Schema schema = 7; 483 | 484 | // If producer reconnect to broker, the epoch of this producer will +1 485 | optional uint64 epoch = 8 [default = 0]; 486 | 487 | // Indicate the name of the producer is generated or user provided 488 | // Use default true here is in order to be forward compatible with the client 489 | optional bool user_provided_producer_name = 9 [default = true]; 490 | 491 | // Require that this producers will be the only producer allowed on the topic 492 | optional ProducerAccessMode producer_access_mode = 10 [default = Shared]; 493 | 494 | // Topic epoch is used to fence off producers that reconnects after a new 495 | // exclusive producer has already taken over. This id is assigned by the 496 | // broker on the CommandProducerSuccess. The first time, the client will 497 | // leave it empty and then it will always carry the same epoch number on 498 | // the subsequent reconnections. 499 | optional uint64 topic_epoch = 11; 500 | 501 | optional bool txn_enabled = 12 [default = false]; 502 | 503 | // Name of the initial subscription of the topic. 504 | // If this field is not set, the initial subscription will not be created. 505 | // If this field is set but the broker's `allowAutoSubscriptionCreation` 506 | // is disabled, the producer will fail to be created. 507 | optional string initial_subscription_name = 13; 508 | } 509 | 510 | message CommandSend { 511 | required uint64 producer_id = 1; 512 | required uint64 sequence_id = 2; 513 | optional int32 num_messages = 3 [default = 1]; 514 | optional uint64 txnid_least_bits = 4 [default = 0]; 515 | optional uint64 txnid_most_bits = 5 [default = 0]; 516 | 517 | /// Add highest sequence id to support batch message with external sequence id 518 | optional uint64 highest_sequence_id = 6 [default = 0]; 519 | optional bool is_chunk =7 [default = false]; 520 | 521 | // Specify if the message being published is a Pulsar marker or not 522 | optional bool marker = 8 [default = false]; 523 | 524 | // Message id of this message, currently is used in replicator for shadow topic. 525 | optional MessageIdData message_id = 9; 526 | } 527 | 528 | message CommandSendReceipt { 529 | required uint64 producer_id = 1; 530 | required uint64 sequence_id = 2; 531 | optional MessageIdData message_id = 3; 532 | optional uint64 highest_sequence_id = 4 [default = 0]; 533 | } 534 | 535 | message CommandSendError { 536 | required uint64 producer_id = 1; 537 | required uint64 sequence_id = 2; 538 | required ServerError error = 3; 539 | required string message = 4; 540 | } 541 | 542 | message CommandMessage { 543 | required uint64 consumer_id = 1; 544 | required MessageIdData message_id = 2; 545 | optional uint32 redelivery_count = 3 [default = 0]; 546 | repeated int64 ack_set = 4; 547 | optional uint64 consumer_epoch = 5; 548 | } 549 | 550 | message CommandAck { 551 | enum AckType { 552 | Individual = 0; 553 | Cumulative = 1; 554 | } 555 | 556 | required uint64 consumer_id = 1; 557 | required AckType ack_type = 2; 558 | 559 | // In case of individual acks, the client can pass a list of message ids 560 | repeated MessageIdData message_id = 3; 561 | 562 | // Acks can contain a flag to indicate the consumer 563 | // received an invalid message that got discarded 564 | // before being passed on to the application. 565 | enum ValidationError { 566 | UncompressedSizeCorruption = 0; 567 | DecompressionError = 1; 568 | ChecksumMismatch = 2; 569 | BatchDeSerializeError = 3; 570 | DecryptionError = 4; 571 | } 572 | 573 | optional ValidationError validation_error = 4; 574 | repeated KeyLongValue properties = 5; 575 | 576 | optional uint64 txnid_least_bits = 6 [default = 0]; 577 | optional uint64 txnid_most_bits = 7 [default = 0]; 578 | optional uint64 request_id = 8; 579 | } 580 | 581 | message CommandAckResponse { 582 | required uint64 consumer_id = 1; 583 | optional uint64 txnid_least_bits = 2 [default = 0]; 584 | optional uint64 txnid_most_bits = 3 [default = 0]; 585 | optional ServerError error = 4; 586 | optional string message = 5; 587 | optional uint64 request_id = 6; 588 | } 589 | 590 | // changes on active consumer 591 | message CommandActiveConsumerChange { 592 | required uint64 consumer_id = 1; 593 | optional bool is_active = 2 [default = false]; 594 | } 595 | 596 | message CommandFlow { 597 | required uint64 consumer_id = 1; 598 | 599 | // Max number of messages to prefetch, in addition 600 | // of any number previously specified 601 | required uint32 messagePermits = 2; 602 | } 603 | 604 | message CommandUnsubscribe { 605 | required uint64 consumer_id = 1; 606 | required uint64 request_id = 2; 607 | } 608 | 609 | // Reset an existing consumer to a particular message id 610 | message CommandSeek { 611 | required uint64 consumer_id = 1; 612 | required uint64 request_id = 2; 613 | 614 | optional MessageIdData message_id = 3; 615 | optional uint64 message_publish_time = 4; 616 | } 617 | 618 | // Message sent by broker to client when a topic 619 | // has been forcefully terminated and there are no more 620 | // messages left to consume 621 | message CommandReachedEndOfTopic { 622 | required uint64 consumer_id = 1; 623 | } 624 | 625 | message CommandTopicMigrated { 626 | enum ResourceType { 627 | Producer = 0; 628 | Consumer = 1; 629 | } 630 | required uint64 resource_id = 1; 631 | required ResourceType resource_type = 2; 632 | optional string brokerServiceUrl = 3; 633 | optional string brokerServiceUrlTls = 4; 634 | 635 | } 636 | 637 | 638 | message CommandCloseProducer { 639 | required uint64 producer_id = 1; 640 | required uint64 request_id = 2; 641 | } 642 | 643 | message CommandCloseConsumer { 644 | required uint64 consumer_id = 1; 645 | required uint64 request_id = 2; 646 | } 647 | 648 | message CommandRedeliverUnacknowledgedMessages { 649 | required uint64 consumer_id = 1; 650 | repeated MessageIdData message_ids = 2; 651 | optional uint64 consumer_epoch = 3; 652 | } 653 | 654 | message CommandSuccess { 655 | required uint64 request_id = 1; 656 | optional Schema schema = 2; 657 | } 658 | 659 | /// Response from CommandProducer 660 | message CommandProducerSuccess { 661 | required uint64 request_id = 1; 662 | required string producer_name = 2; 663 | 664 | // The last sequence id that was stored by this producer in the previous session 665 | // This will only be meaningful if deduplication has been enabled. 666 | optional int64 last_sequence_id = 3 [default = -1]; 667 | optional bytes schema_version = 4; 668 | 669 | // The topic epoch assigned by the broker. This field will only be set if we 670 | // were requiring exclusive access when creating the producer. 671 | optional uint64 topic_epoch = 5; 672 | 673 | // If producer is not "ready", the client will avoid to timeout the request 674 | // for creating the producer. Instead it will wait indefinitely until it gets 675 | // a subsequent `CommandProducerSuccess` with `producer_ready==true`. 676 | optional bool producer_ready = 6 [default = true]; 677 | } 678 | 679 | message CommandError { 680 | required uint64 request_id = 1; 681 | required ServerError error = 2; 682 | required string message = 3; 683 | } 684 | 685 | // Commands to probe the state of connection. 686 | // When either client or broker doesn't receive commands for certain 687 | // amount of time, they will send a Ping probe. 688 | message CommandPing { 689 | } 690 | message CommandPong { 691 | } 692 | 693 | message CommandConsumerStats { 694 | required uint64 request_id = 1; 695 | // required string topic_name = 2; 696 | // required string subscription_name = 3; 697 | required uint64 consumer_id = 4; 698 | } 699 | 700 | message CommandConsumerStatsResponse { 701 | required uint64 request_id = 1; 702 | optional ServerError error_code = 2; 703 | optional string error_message = 3; 704 | 705 | /// Total rate of messages delivered to the consumer. msg/s 706 | optional double msgRateOut = 4; 707 | 708 | /// Total throughput delivered to the consumer. bytes/s 709 | optional double msgThroughputOut = 5; 710 | 711 | /// Total rate of messages redelivered by this consumer. msg/s 712 | optional double msgRateRedeliver = 6; 713 | 714 | /// Name of the consumer 715 | optional string consumerName = 7; 716 | 717 | /// Number of available message permits for the consumer 718 | optional uint64 availablePermits = 8; 719 | 720 | /// Number of unacknowledged messages for the consumer 721 | optional uint64 unackedMessages = 9; 722 | 723 | /// Flag to verify if consumer is blocked due to reaching threshold of unacked messages 724 | optional bool blockedConsumerOnUnackedMsgs = 10; 725 | 726 | /// Address of this consumer 727 | optional string address = 11; 728 | 729 | /// Timestamp of connection 730 | optional string connectedSince = 12; 731 | 732 | /// Whether this subscription is Exclusive or Shared or Failover 733 | optional string type = 13; 734 | 735 | /// Total rate of messages expired on this subscription. msg/s 736 | optional double msgRateExpired = 14; 737 | 738 | /// Number of messages in the subscription backlog 739 | optional uint64 msgBacklog = 15; 740 | 741 | /// Total rate of messages ack. msg/s 742 | optional double messageAckRate = 16; 743 | } 744 | 745 | message CommandGetLastMessageId { 746 | required uint64 consumer_id = 1; 747 | required uint64 request_id = 2; 748 | } 749 | 750 | message CommandGetLastMessageIdResponse { 751 | required MessageIdData last_message_id = 1; 752 | required uint64 request_id = 2; 753 | optional MessageIdData consumer_mark_delete_position = 3; 754 | } 755 | 756 | message CommandGetTopicsOfNamespace { 757 | enum Mode { 758 | PERSISTENT = 0; 759 | NON_PERSISTENT = 1; 760 | ALL = 2; 761 | } 762 | required uint64 request_id = 1; 763 | required string namespace = 2; 764 | optional Mode mode = 3 [default = PERSISTENT]; 765 | optional string topics_pattern = 4; 766 | optional string topics_hash = 5; 767 | } 768 | 769 | message CommandGetTopicsOfNamespaceResponse { 770 | required uint64 request_id = 1; 771 | repeated string topics = 2; 772 | // true iff the topic list was filtered by the pattern supplied by the client 773 | optional bool filtered = 3 [default = false]; 774 | // hash computed from the names of matching topics 775 | optional string topics_hash = 4; 776 | // if false, topics is empty and the list of matching topics has not changed 777 | optional bool changed = 5 [default = true]; 778 | } 779 | 780 | message CommandWatchTopicList { 781 | required uint64 request_id = 1; 782 | required uint64 watcher_id = 2; 783 | required string namespace = 3; 784 | required string topics_pattern = 4; 785 | // Only present when the client reconnects: 786 | optional string topics_hash = 5; 787 | } 788 | 789 | message CommandWatchTopicListSuccess { 790 | required uint64 request_id = 1; 791 | required uint64 watcher_id = 2; 792 | repeated string topic = 3; 793 | required string topics_hash = 4; 794 | } 795 | 796 | message CommandWatchTopicUpdate { 797 | required uint64 watcher_id = 1; 798 | repeated string new_topics = 2; 799 | repeated string deleted_topics = 3; 800 | required string topics_hash = 4; 801 | } 802 | 803 | message CommandWatchTopicListClose { 804 | required uint64 request_id = 1; 805 | required uint64 watcher_id = 2; 806 | } 807 | 808 | message CommandGetSchema { 809 | required uint64 request_id = 1; 810 | required string topic = 2; 811 | 812 | optional bytes schema_version = 3; 813 | } 814 | 815 | message CommandGetSchemaResponse { 816 | required uint64 request_id = 1; 817 | optional ServerError error_code = 2; 818 | optional string error_message = 3; 819 | 820 | optional Schema schema = 4; 821 | optional bytes schema_version = 5; 822 | } 823 | 824 | message CommandGetOrCreateSchema { 825 | required uint64 request_id = 1; 826 | required string topic = 2; 827 | required Schema schema = 3; 828 | } 829 | 830 | message CommandGetOrCreateSchemaResponse { 831 | required uint64 request_id = 1; 832 | optional ServerError error_code = 2; 833 | optional string error_message = 3; 834 | 835 | optional bytes schema_version = 4; 836 | } 837 | 838 | /// --- transaction related --- 839 | 840 | enum TxnAction { 841 | COMMIT = 0; 842 | ABORT = 1; 843 | } 844 | 845 | message CommandTcClientConnectRequest { 846 | required uint64 request_id = 1; 847 | required uint64 tc_id = 2 [default = 0]; 848 | } 849 | 850 | message CommandTcClientConnectResponse { 851 | required uint64 request_id = 1; 852 | optional ServerError error = 2; 853 | optional string message = 3; 854 | } 855 | 856 | message CommandNewTxn { 857 | required uint64 request_id = 1; 858 | optional uint64 txn_ttl_seconds = 2 [default = 0]; 859 | optional uint64 tc_id = 3 [default = 0]; 860 | } 861 | 862 | message CommandNewTxnResponse { 863 | required uint64 request_id = 1; 864 | optional uint64 txnid_least_bits = 2 [default = 0]; 865 | optional uint64 txnid_most_bits = 3 [default = 0]; 866 | optional ServerError error = 4; 867 | optional string message = 5; 868 | } 869 | 870 | message CommandAddPartitionToTxn { 871 | required uint64 request_id = 1; 872 | optional uint64 txnid_least_bits = 2 [default = 0]; 873 | optional uint64 txnid_most_bits = 3 [default = 0]; 874 | repeated string partitions = 4; 875 | } 876 | 877 | message CommandAddPartitionToTxnResponse { 878 | required uint64 request_id = 1; 879 | optional uint64 txnid_least_bits = 2 [default = 0]; 880 | optional uint64 txnid_most_bits = 3 [default = 0]; 881 | optional ServerError error = 4; 882 | optional string message = 5; 883 | } 884 | 885 | message Subscription { 886 | required string topic = 1; 887 | required string subscription = 2; 888 | } 889 | message CommandAddSubscriptionToTxn { 890 | required uint64 request_id = 1; 891 | optional uint64 txnid_least_bits = 2 [default = 0]; 892 | optional uint64 txnid_most_bits = 3 [default = 0]; 893 | repeated Subscription subscription = 4; 894 | } 895 | 896 | message CommandAddSubscriptionToTxnResponse { 897 | required uint64 request_id = 1; 898 | optional uint64 txnid_least_bits = 2 [default = 0]; 899 | optional uint64 txnid_most_bits = 3 [default = 0]; 900 | optional ServerError error = 4; 901 | optional string message = 5; 902 | } 903 | 904 | message CommandEndTxn { 905 | required uint64 request_id = 1; 906 | optional uint64 txnid_least_bits = 2 [default = 0]; 907 | optional uint64 txnid_most_bits = 3 [default = 0]; 908 | optional TxnAction txn_action = 4; 909 | } 910 | 911 | message CommandEndTxnResponse { 912 | required uint64 request_id = 1; 913 | optional uint64 txnid_least_bits = 2 [default = 0]; 914 | optional uint64 txnid_most_bits = 3 [default = 0]; 915 | optional ServerError error = 4; 916 | optional string message = 5; 917 | } 918 | 919 | message CommandEndTxnOnPartition { 920 | required uint64 request_id = 1; 921 | optional uint64 txnid_least_bits = 2 [default = 0]; 922 | optional uint64 txnid_most_bits = 3 [default = 0]; 923 | optional string topic = 4; 924 | optional TxnAction txn_action = 5; 925 | optional uint64 txnid_least_bits_of_low_watermark = 6; 926 | } 927 | 928 | message CommandEndTxnOnPartitionResponse { 929 | required uint64 request_id = 1; 930 | optional uint64 txnid_least_bits = 2 [default = 0]; 931 | optional uint64 txnid_most_bits = 3 [default = 0]; 932 | optional ServerError error = 4; 933 | optional string message = 5; 934 | } 935 | 936 | message CommandEndTxnOnSubscription { 937 | required uint64 request_id = 1; 938 | optional uint64 txnid_least_bits = 2 [default = 0]; 939 | optional uint64 txnid_most_bits = 3 [default = 0]; 940 | optional Subscription subscription= 4; 941 | optional TxnAction txn_action = 5; 942 | optional uint64 txnid_least_bits_of_low_watermark = 6; 943 | } 944 | 945 | message CommandEndTxnOnSubscriptionResponse { 946 | required uint64 request_id = 1; 947 | optional uint64 txnid_least_bits = 2 [default = 0]; 948 | optional uint64 txnid_most_bits = 3 [default = 0]; 949 | optional ServerError error = 4; 950 | optional string message = 5; 951 | } 952 | 953 | message BaseCommand { 954 | enum Type { 955 | CONNECT = 2; 956 | CONNECTED = 3; 957 | SUBSCRIBE = 4; 958 | 959 | PRODUCER = 5; 960 | 961 | SEND = 6; 962 | SEND_RECEIPT= 7; 963 | SEND_ERROR = 8; 964 | 965 | MESSAGE = 9; 966 | ACK = 10; 967 | FLOW = 11; 968 | 969 | UNSUBSCRIBE = 12; 970 | 971 | SUCCESS = 13; 972 | ERROR = 14; 973 | 974 | CLOSE_PRODUCER = 15; 975 | CLOSE_CONSUMER = 16; 976 | 977 | PRODUCER_SUCCESS = 17; 978 | 979 | PING = 18; 980 | PONG = 19; 981 | 982 | REDELIVER_UNACKNOWLEDGED_MESSAGES = 20; 983 | 984 | PARTITIONED_METADATA = 21; 985 | PARTITIONED_METADATA_RESPONSE = 22; 986 | 987 | LOOKUP = 23; 988 | LOOKUP_RESPONSE = 24; 989 | 990 | CONSUMER_STATS = 25; 991 | CONSUMER_STATS_RESPONSE = 26; 992 | 993 | REACHED_END_OF_TOPIC = 27; 994 | 995 | SEEK = 28; 996 | 997 | GET_LAST_MESSAGE_ID = 29; 998 | GET_LAST_MESSAGE_ID_RESPONSE = 30; 999 | 1000 | ACTIVE_CONSUMER_CHANGE = 31; 1001 | 1002 | 1003 | GET_TOPICS_OF_NAMESPACE = 32; 1004 | GET_TOPICS_OF_NAMESPACE_RESPONSE = 33; 1005 | 1006 | GET_SCHEMA = 34; 1007 | GET_SCHEMA_RESPONSE = 35; 1008 | 1009 | AUTH_CHALLENGE = 36; 1010 | AUTH_RESPONSE = 37; 1011 | 1012 | ACK_RESPONSE = 38; 1013 | 1014 | GET_OR_CREATE_SCHEMA = 39; 1015 | GET_OR_CREATE_SCHEMA_RESPONSE = 40; 1016 | 1017 | // transaction related 1018 | NEW_TXN = 50; 1019 | NEW_TXN_RESPONSE = 51; 1020 | 1021 | ADD_PARTITION_TO_TXN = 52; 1022 | ADD_PARTITION_TO_TXN_RESPONSE = 53; 1023 | 1024 | ADD_SUBSCRIPTION_TO_TXN = 54; 1025 | ADD_SUBSCRIPTION_TO_TXN_RESPONSE = 55; 1026 | 1027 | END_TXN = 56; 1028 | END_TXN_RESPONSE = 57; 1029 | 1030 | END_TXN_ON_PARTITION = 58; 1031 | END_TXN_ON_PARTITION_RESPONSE = 59; 1032 | 1033 | END_TXN_ON_SUBSCRIPTION = 60; 1034 | END_TXN_ON_SUBSCRIPTION_RESPONSE = 61; 1035 | TC_CLIENT_CONNECT_REQUEST = 62; 1036 | TC_CLIENT_CONNECT_RESPONSE = 63; 1037 | 1038 | WATCH_TOPIC_LIST = 64; 1039 | WATCH_TOPIC_LIST_SUCCESS = 65; 1040 | WATCH_TOPIC_UPDATE = 66; 1041 | WATCH_TOPIC_LIST_CLOSE = 67; 1042 | 1043 | TOPIC_MIGRATED = 68; 1044 | } 1045 | 1046 | 1047 | required Type type = 1; 1048 | 1049 | optional CommandConnect connect = 2; 1050 | optional CommandConnected connected = 3; 1051 | 1052 | optional CommandSubscribe subscribe = 4; 1053 | optional CommandProducer producer = 5; 1054 | optional CommandSend send = 6; 1055 | optional CommandSendReceipt send_receipt = 7; 1056 | optional CommandSendError send_error = 8; 1057 | optional CommandMessage message = 9; 1058 | optional CommandAck ack = 10; 1059 | optional CommandFlow flow = 11; 1060 | optional CommandUnsubscribe unsubscribe = 12; 1061 | 1062 | optional CommandSuccess success = 13; 1063 | optional CommandError error = 14; 1064 | 1065 | optional CommandCloseProducer close_producer = 15; 1066 | optional CommandCloseConsumer close_consumer = 16; 1067 | 1068 | optional CommandProducerSuccess producer_success = 17; 1069 | optional CommandPing ping = 18; 1070 | optional CommandPong pong = 19; 1071 | optional CommandRedeliverUnacknowledgedMessages redeliverUnacknowledgedMessages = 20; 1072 | 1073 | optional CommandPartitionedTopicMetadata partitionMetadata = 21; 1074 | optional CommandPartitionedTopicMetadataResponse partitionMetadataResponse = 22; 1075 | 1076 | optional CommandLookupTopic lookupTopic = 23; 1077 | optional CommandLookupTopicResponse lookupTopicResponse = 24; 1078 | 1079 | optional CommandConsumerStats consumerStats = 25; 1080 | optional CommandConsumerStatsResponse consumerStatsResponse = 26; 1081 | 1082 | optional CommandReachedEndOfTopic reachedEndOfTopic = 27; 1083 | 1084 | optional CommandSeek seek = 28; 1085 | 1086 | optional CommandGetLastMessageId getLastMessageId = 29; 1087 | optional CommandGetLastMessageIdResponse getLastMessageIdResponse = 30; 1088 | 1089 | optional CommandActiveConsumerChange active_consumer_change = 31; 1090 | 1091 | optional CommandGetTopicsOfNamespace getTopicsOfNamespace = 32; 1092 | optional CommandGetTopicsOfNamespaceResponse getTopicsOfNamespaceResponse = 33; 1093 | 1094 | optional CommandGetSchema getSchema = 34; 1095 | optional CommandGetSchemaResponse getSchemaResponse = 35; 1096 | 1097 | optional CommandAuthChallenge authChallenge = 36; 1098 | optional CommandAuthResponse authResponse = 37; 1099 | 1100 | optional CommandAckResponse ackResponse = 38; 1101 | 1102 | optional CommandGetOrCreateSchema getOrCreateSchema = 39; 1103 | optional CommandGetOrCreateSchemaResponse getOrCreateSchemaResponse = 40; 1104 | 1105 | // transaction related 1106 | optional CommandNewTxn newTxn = 50; 1107 | optional CommandNewTxnResponse newTxnResponse = 51; 1108 | optional CommandAddPartitionToTxn addPartitionToTxn= 52; 1109 | optional CommandAddPartitionToTxnResponse addPartitionToTxnResponse = 53; 1110 | optional CommandAddSubscriptionToTxn addSubscriptionToTxn = 54; 1111 | optional CommandAddSubscriptionToTxnResponse addSubscriptionToTxnResponse = 55; 1112 | optional CommandEndTxn endTxn = 56; 1113 | optional CommandEndTxnResponse endTxnResponse = 57; 1114 | optional CommandEndTxnOnPartition endTxnOnPartition = 58; 1115 | optional CommandEndTxnOnPartitionResponse endTxnOnPartitionResponse = 59; 1116 | optional CommandEndTxnOnSubscription endTxnOnSubscription = 60; 1117 | optional CommandEndTxnOnSubscriptionResponse endTxnOnSubscriptionResponse = 61; 1118 | optional CommandTcClientConnectRequest tcClientConnectRequest = 62; 1119 | optional CommandTcClientConnectResponse tcClientConnectResponse = 63; 1120 | 1121 | optional CommandWatchTopicList watchTopicList = 64; 1122 | optional CommandWatchTopicListSuccess watchTopicListSuccess = 65; 1123 | optional CommandWatchTopicUpdate watchTopicUpdate = 66; 1124 | optional CommandWatchTopicListClose watchTopicListClose = 67; 1125 | 1126 | optional CommandTopicMigrated topicMigrated = 68; 1127 | } 1128 | -------------------------------------------------------------------------------- /proto/generate.go: -------------------------------------------------------------------------------- 1 | // Package pulsar_proto contains the Apache Pulsar Proto definitions. 2 | package pulsar_proto 3 | 4 | //go:generate protoc --go_out=../../../../ PulsarApi.proto 5 | -------------------------------------------------------------------------------- /requests.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | type requestCallback func(*command) error 8 | 9 | type requests struct { 10 | mutex sync.RWMutex 11 | callbacks map[uint64]requestCallback 12 | custom map[uint64]string 13 | sequencer *sequencer 14 | } 15 | 16 | func newRequests() *requests { 17 | return &requests{ 18 | callbacks: map[uint64]requestCallback{}, 19 | custom: map[uint64]string{}, 20 | sequencer: &sequencer{}, 21 | } 22 | } 23 | 24 | // remove the request id and return it's assigned callback if existing. 25 | func (r *requests) remove(reqID uint64) (requestCallback, string) { 26 | r.mutex.Lock() 27 | f, ok := r.callbacks[reqID] 28 | if !ok { 29 | r.mutex.Unlock() 30 | return nil, "" 31 | } 32 | 33 | delete(r.callbacks, reqID) 34 | s := r.custom[reqID] 35 | delete(r.custom, reqID) 36 | r.mutex.Unlock() 37 | return f, s 38 | } 39 | 40 | func (r *requests) newID() uint64 { 41 | return r.sequencer.newID() 42 | } 43 | 44 | func (r *requests) addCallback(reqID uint64, f requestCallback) { 45 | r.mutex.Lock() 46 | r.callbacks[reqID] = f 47 | r.mutex.Unlock() 48 | } 49 | 50 | func (r *requests) addCallbackCustom(reqID uint64, f requestCallback, custom string) { 51 | r.mutex.Lock() 52 | r.callbacks[reqID] = f 53 | r.custom[reqID] = custom 54 | r.mutex.Unlock() 55 | } 56 | -------------------------------------------------------------------------------- /sequencer.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import "sync/atomic" 4 | 5 | type sequencer struct { 6 | id uint64 7 | } 8 | 9 | // generates a new ID and returns it as a pointer, as needed for use 10 | // in the proto messages. 11 | func (s *sequencer) newID() uint64 { 12 | return atomic.AddUint64(&s.id, 1) 13 | } 14 | -------------------------------------------------------------------------------- /topic.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // ... 10 | const ( 11 | publicTenant = "public" 12 | DefaultNamespace = "default" 13 | // TODO support partitioning partitionedTopicSuffix = "-partition-". 14 | persistentDomain = "persistent" 15 | nonPersistentDomain = "non-persistent" 16 | domainSeparator = "://" 17 | ) 18 | 19 | // Topic represents a Pulsar Topic. 20 | type Topic struct { 21 | Domain string 22 | Tenant string 23 | Namespace string 24 | LocalName string 25 | CompleteName string 26 | } 27 | 28 | // NewTopic creates a new topic struct from the given topic name. 29 | // The topic name can be in short form or a fully qualified topic name. 30 | func NewTopic(name string) (*Topic, error) { 31 | if !strings.Contains(name, domainSeparator) { 32 | // The short topic name can be: 33 | // - 34 | // - // 35 | parts := strings.Split(name, "/") 36 | switch len(parts) { 37 | case 3: 38 | name = persistentDomain + domainSeparator + 39 | name 40 | case 1: 41 | name = persistentDomain + domainSeparator + 42 | publicTenant + "/" + DefaultNamespace + "/" + parts[0] 43 | default: 44 | return nil, errors.New("invalid topic short name format") 45 | } 46 | } 47 | 48 | parts := strings.Split(name, domainSeparator) 49 | if len(parts) != 2 { 50 | return nil, errors.New("invalid topic domain format") 51 | } 52 | 53 | domain := parts[0] 54 | if domain != persistentDomain && domain != nonPersistentDomain { 55 | return nil, errors.New("invalid topic domain") 56 | } 57 | 58 | parts = strings.Split(parts[1], "/") 59 | if len(parts) != 3 { 60 | return nil, errors.New("invalid topic name format") 61 | } 62 | 63 | t := &Topic{ 64 | Domain: domain, 65 | Tenant: parts[0], 66 | Namespace: parts[1], 67 | LocalName: parts[2], 68 | CompleteName: "", 69 | } 70 | t.CompleteName = fmt.Sprintf("%s://%s/%s/%s", t.Domain, t.Tenant, 71 | t.Namespace, t.LocalName) 72 | return t, nil 73 | } 74 | -------------------------------------------------------------------------------- /topic_test.go: -------------------------------------------------------------------------------- 1 | package pulsar 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestNewTopic(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | error string 13 | completeName string 14 | }{ 15 | { 16 | name: "topic-1", 17 | completeName: "persistent://public/default/topic-1", 18 | }, 19 | { 20 | name: "property/namespace/topic-1", 21 | completeName: "persistent://property/namespace/topic-1", 22 | }, 23 | { 24 | name: "namespace/topic-1", 25 | error: "invalid topic short name format", 26 | }, 27 | { 28 | name: "://tenant://namespace", 29 | error: "invalid topic domain format", 30 | }, 31 | { 32 | name: "://tenant/namespace", 33 | error: "invalid topic domain", 34 | }, 35 | { 36 | name: "persistent://property/namespace/topic/1", 37 | error: "invalid topic name format", 38 | }, 39 | } 40 | 41 | for _, test := range tests { 42 | topic, err := NewTopic(test.name) 43 | if err == nil { 44 | if test.error != "" { 45 | t.Errorf("%v: unexpected error '%v'", test.name, err) 46 | } 47 | } else { 48 | if test.error != err.Error() { 49 | t.Errorf("%v: expected error '%v' but got '%v'", test.name, test.error, err) 50 | } 51 | } 52 | 53 | if topic == nil { 54 | continue 55 | } 56 | 57 | assert.Equal(t, test.completeName, topic.CompleteName) 58 | } 59 | } 60 | --------------------------------------------------------------------------------