├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── documentbot.yml │ └── rust.yml ├── .gitignore ├── COPYRIGHT ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── PulsarApi.proto ├── README.md ├── build.rs ├── docs └── release.md ├── examples ├── batching.rs ├── consumer.rs ├── producer.rs ├── reader.rs └── round_trip.rs └── src ├── authentication.rs ├── client.rs ├── connection.rs ├── connection_manager.rs ├── consumer.rs ├── error.rs ├── executor.rs ├── lib.rs ├── message.rs ├── producer.rs ├── reader.rs └── service_discovery.rs /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @freeznet @RobertIndie @fantapsody @streamnative/pulsar-rust-client 2 | *.md @streamnative/technical-writers -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | *(If this PR fixes a github issue, please add `Fixes #`.)* 17 | 18 | Fixes # 19 | 20 | *(or if this PR is one task of a github issue, please add `Master Issue: #` to link to the master issue.)* 21 | 22 | Master Issue: # 23 | 24 | ### Motivation 25 | 26 | *Explain here the context, and why you're making that change. What is the problem you're trying to solve.* 27 | 28 | ### Modifications 29 | 30 | *Describe the modifications you've done.* 31 | 32 | ### Verifying this change 33 | 34 | - [ ] Make sure that the change passes the CI checks. 35 | 36 | *(Please pick either of the following options)* 37 | 38 | This change is a trivial rework / code cleanup without any test coverage. 39 | 40 | *(or)* 41 | 42 | This change is already covered by existing tests, such as *(please describe tests)*. 43 | 44 | *(or)* 45 | 46 | This change added tests and can be verified as follows: 47 | 48 | *(example:)* 49 | - *Added integration tests for end-to-end deployment with large payloads (10MB)* 50 | - *Extended integration test for recovery after broker failure* 51 | 52 | ### Documentation 53 | 54 | Check the box below. 55 | 56 | Need to update docs? 57 | 58 | - [ ] `doc-required` 59 | 60 | (If you need help on updating docs, create a doc issue) 61 | 62 | - [ ] `no-need-doc` 63 | 64 | (Please explain why) 65 | 66 | - [ ] `doc` 67 | 68 | (If this PR contains doc changes) 69 | 70 | -------------------------------------------------------------------------------- /.github/workflows/documentbot.yml: -------------------------------------------------------------------------------- 1 | 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | # under the License. 19 | # 20 | 21 | name: Auto Labeling 22 | 23 | on: 24 | pull_request_target : 25 | types: 26 | - opened 27 | - edited 28 | - labeled 29 | 30 | 31 | 32 | # A GitHub token created for a PR coming from a fork doesn't have 33 | # 'admin' or 'write' permission (which is required to add labels) 34 | # To avoid this issue, you can use the `scheduled` event and run 35 | # this action on a certain interval.And check the label about the 36 | # document. 37 | 38 | jobs: 39 | labeling: 40 | if: ${{ github.repository == 'streamnative/pulsar-rs' }} 41 | permissions: 42 | pull-requests: write 43 | runs-on: ubuntu-latest 44 | steps: 45 | - uses: actions/checkout@v2 46 | 47 | - uses: streamnative/github-workflow-libraries/doc-label-check@master 48 | with: 49 | github-token: ${{ secrets.GITHUB_TOKEN }} 50 | label-pattern: '- \[(.*?)\] ?`(.+?)`' # matches '- [x] `label`' 51 | 52 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Start Pulsar Standalone Container 16 | run: docker run --name pulsar -p 6650:6650 -p 8080:8080 -d -e GITHUB_ACTIONS=true -e CI=true streamnative/pulsar:2.9.2.5 /pulsar/bin/pulsar standalone 17 | 18 | - uses: actions/checkout@v2 19 | - name: Build 20 | run: cargo build --verbose 21 | 22 | - name: Cache cargo registry 23 | uses: actions/cache@v1 24 | with: 25 | path: ~/.cargo/registry 26 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 27 | - name: Cache cargo index 28 | uses: actions/cache@v1 29 | with: 30 | path: ~/.cargo/git 31 | key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} 32 | - name: Cache cargo build 33 | uses: actions/cache@v1 34 | with: 35 | path: target 36 | key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} 37 | 38 | - name: Run tests 39 | run: cargo test -- --nocapture 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea 3 | **/*.rs.bk 4 | Cargo.lock 5 | -------------------------------------------------------------------------------- /COPYRIGHT: -------------------------------------------------------------------------------- 1 | Short version for non-lawyers: 2 | 3 | The pulsar crate is dual-licensed under Apache 2.0 and MIT terms. 4 | 5 | 6 | Longer version: 7 | 8 | Copyrights in this library are retained by their contributors. No copyright 9 | assignment is required to contribute to the library. 10 | 11 | Except as otherwise noted (below and/or in individual files), the library is 12 | licensed under the Apache License, Version 2.0 or 13 | or the MIT license 14 | or , at your option. 15 | 16 | This library may include packages written by third parties which carry their 17 | own copyright notices and license terms. 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sn-pulsar" 3 | version = "4.1.2" 4 | edition = "2018" 5 | authors = [ 6 | "Colin Stearns ", 7 | "Kevin Stenerson ", 8 | "Geoffroy Couprie ", 9 | ] 10 | 11 | license = "MIT/Apache-2.0" 12 | readme = "./README.md" 13 | repository = "https://github.com/streamnative/pulsar-rs" 14 | documentation = "https://docs.rs/sn-pulsar" 15 | description = "Rust client for Apache Pulsar" 16 | keywords = ["pulsar", "api", "client"] 17 | 18 | [lib] 19 | name = "pulsar" 20 | 21 | [dependencies] 22 | bytes = "1.0.0" 23 | crc = "2.0.0" 24 | futures = "0.3" 25 | nom = { version="7.0.0", default-features=false, features=["alloc"] } 26 | prost = "0.9.0" 27 | prost-derive = "0.9.0" 28 | rand = "0.8" 29 | chrono = "0.4" 30 | futures-timer = "3.0" 31 | log = "0.4.6" 32 | url = "2.1" 33 | regex = "1.1.7" 34 | bit-vec = "0.6" 35 | futures-io = "0.3" 36 | native-tls = "0.2" 37 | pem = "0.8" 38 | tokio = { version = "1.0", features = ["rt", "net", "time"], optional = true } 39 | tokio-util = { version = "0.6", features = ["codec"], optional = true } 40 | tokio-native-tls = { version = "0.3", optional = true } 41 | async-std = {version = "1.9", features = [ "attributes", "unstable" ], optional = true } 42 | asynchronous-codec = { version = "0.6", optional = true } 43 | async-native-tls = { version = "0.3", optional = true } 44 | lz4 = { version = "1.23", optional = true } 45 | flate2 = { version = "1.0", optional = true } 46 | zstd = { version = "0.9", optional = true } 47 | snap = { version = "1.0", optional = true } 48 | openidconnect = { version = "2.1.1", optional = true } 49 | oauth2 = { version = "4.1", optional = true } 50 | serde = { version = "1.0", features = ["derive"], optional = true } 51 | serde_json = { version = "1.0", optional = true } 52 | async-trait = "0.1.51" 53 | data-url = { version = "0.1.1", optional = true } 54 | 55 | [dev-dependencies] 56 | serde = { version = "1.0", features = ["derive"] } 57 | serde_json = "1.0" 58 | env_logger = "0.9" 59 | tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } 60 | 61 | [build-dependencies] 62 | prost-build = "0.9.0" 63 | 64 | [features] 65 | default = [ "compression", "tokio-runtime", "async-std-runtime", "auth-oauth2" ] 66 | compression = [ "lz4", "flate2", "zstd", "snap" ] 67 | tokio-runtime = [ "tokio", "tokio-util", "tokio-native-tls" ] 68 | async-std-runtime = [ "async-std", "asynchronous-codec", "async-native-tls" ] 69 | auth-oauth2 = [ "openidconnect", "oauth2", "serde", "serde_json", "data-url" ] 70 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /PulsarApi.proto: -------------------------------------------------------------------------------- 1 | /** 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, 13 | * software distributed under the License is distributed on an 14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | * KIND, either express or implied. See the License for the 16 | * specific language governing permissions and limitations 17 | * under the License. 18 | */ 19 | syntax = "proto2"; 20 | 21 | package pulsar.proto; 22 | option java_package = "org.apache.pulsar.common.api.proto"; 23 | option optimize_for = LITE_RUNTIME; 24 | 25 | message Schema { 26 | enum Type { 27 | None = 0; 28 | String = 1; 29 | Json = 2; 30 | Protobuf = 3; 31 | Avro = 4; 32 | Bool = 5; 33 | Int8 = 6; 34 | Int16 = 7; 35 | Int32 = 8; 36 | Int64 = 9; 37 | Float = 10; 38 | Double = 11; 39 | Date = 12; 40 | Time = 13; 41 | Timestamp = 14; 42 | KeyValue = 15; 43 | Instant = 16; 44 | LocalDate = 17; 45 | LocalTime = 18; 46 | LocalDateTime = 19; 47 | ProtobufNative = 20; 48 | } 49 | 50 | required string name = 1; 51 | required bytes schema_data = 3; 52 | required Type type = 4; 53 | repeated KeyValue properties = 5; 54 | 55 | } 56 | 57 | message MessageIdData { 58 | required uint64 ledgerId = 1; 59 | required uint64 entryId = 2; 60 | optional int32 partition = 3 [default = -1]; 61 | optional int32 batch_index = 4 [default = -1]; 62 | repeated int64 ack_set = 5; 63 | optional int32 batch_size = 6; 64 | } 65 | 66 | message KeyValue { 67 | required string key = 1; 68 | required string value = 2; 69 | } 70 | 71 | message KeyLongValue { 72 | required string key = 1; 73 | required uint64 value = 2; 74 | } 75 | 76 | message IntRange { 77 | required int32 start = 1; 78 | required int32 end = 2; 79 | } 80 | 81 | message EncryptionKeys { 82 | required string key = 1; 83 | required bytes value = 2; 84 | repeated KeyValue metadata = 3; 85 | } 86 | 87 | enum CompressionType { 88 | NONE = 0; 89 | LZ4 = 1; 90 | ZLIB = 2; 91 | ZSTD = 3; 92 | SNAPPY = 4; 93 | } 94 | 95 | message MessageMetadata { 96 | required string producer_name = 1; 97 | required uint64 sequence_id = 2; 98 | required uint64 publish_time = 3; 99 | repeated KeyValue properties = 4; 100 | 101 | // Property set on replicated message, 102 | // includes the source cluster name 103 | optional string replicated_from = 5; 104 | //key to decide partition for the msg 105 | optional string partition_key = 6; 106 | // Override namespace's replication 107 | repeated string replicate_to = 7; 108 | optional CompressionType compression = 8 [default = NONE]; 109 | optional uint32 uncompressed_size = 9 [default = 0]; 110 | // Removed below checksum field from Metadata as 111 | // it should be part of send-command which keeps checksum of header + payload 112 | //optional sfixed64 checksum = 10; 113 | // differentiate single and batch message metadata 114 | optional int32 num_messages_in_batch = 11 [default = 1]; 115 | 116 | // the timestamp that this event occurs. it is typically set by applications. 117 | // if this field is omitted, `publish_time` can be used for the purpose of `event_time`. 118 | optional uint64 event_time = 12 [default = 0]; 119 | // Contains encryption key name, encrypted key and metadata to describe the key 120 | repeated EncryptionKeys encryption_keys = 13; 121 | // Algorithm used to encrypt data key 122 | optional string encryption_algo = 14; 123 | // Additional parameters required by encryption 124 | optional bytes encryption_param = 15; 125 | optional bytes schema_version = 16; 126 | 127 | optional bool partition_key_b64_encoded = 17 [ default = false ]; 128 | // Specific a key to overwrite the message key which used for ordering dispatch in Key_Shared mode. 129 | optional bytes ordering_key = 18; 130 | 131 | // Mark the message to be delivered at or after the specified timestamp 132 | optional int64 deliver_at_time = 19; 133 | 134 | // Identify whether a message is a "marker" message used for 135 | // internal metadata instead of application published data. 136 | // Markers will generally not be propagated back to clients 137 | optional int32 marker_type = 20; 138 | 139 | // transaction related message info 140 | optional uint64 txnid_least_bits = 22; 141 | optional uint64 txnid_most_bits = 23; 142 | 143 | /// Add highest sequence id to support batch message with external sequence id 144 | optional uint64 highest_sequence_id = 24 [default = 0]; 145 | 146 | // Indicate if the message payload value is set 147 | optional bool null_value = 25 [ default = false ]; 148 | optional string uuid = 26; 149 | optional int32 num_chunks_from_msg = 27; 150 | optional int32 total_chunk_msg_size = 28; 151 | optional int32 chunk_id = 29; 152 | 153 | // Indicate if the message partition key is set 154 | optional bool null_partition_key = 30 [default = false]; 155 | } 156 | 157 | message SingleMessageMetadata { 158 | repeated KeyValue properties = 1; 159 | optional string partition_key = 2; 160 | required int32 payload_size = 3; 161 | optional bool compacted_out = 4 [default = false]; 162 | 163 | // the timestamp that this event occurs. it is typically set by applications. 164 | // if this field is omitted, `publish_time` can be used for the purpose of `event_time`. 165 | optional uint64 event_time = 5 [default = 0]; 166 | optional bool partition_key_b64_encoded = 6 [ default = false ]; 167 | // Specific a key to overwrite the message key which used for ordering dispatch in Key_Shared mode. 168 | optional bytes ordering_key = 7; 169 | // Allows consumer retrieve the sequence id that the producer set. 170 | optional uint64 sequence_id = 8; 171 | // Indicate if the message payload value is set 172 | optional bool null_value = 9 [ default = false ]; 173 | // Indicate if the message partition key is set 174 | optional bool null_partition_key = 10 [ default = false]; 175 | } 176 | 177 | enum ServerError { 178 | UnknownError = 0; 179 | MetadataError = 1; // Error with ZK/metadata 180 | PersistenceError = 2; // Error writing reading from BK 181 | AuthenticationError = 3; // Non valid authentication 182 | AuthorizationError = 4; // Not authorized to use resource 183 | 184 | ConsumerBusy = 5; // Unable to subscribe/unsubscribe because 185 | // other consumers are connected 186 | ServiceNotReady = 6; // Any error that requires client retry operation with a fresh lookup 187 | ProducerBlockedQuotaExceededError = 7; // Unable to create producer because backlog quota exceeded 188 | ProducerBlockedQuotaExceededException = 8; // Exception while creating producer because quota exceeded 189 | ChecksumError = 9; // Error while verifying message checksum 190 | UnsupportedVersionError = 10; // Error when an older client/version doesn't support a required feature 191 | TopicNotFound = 11; // Topic not found 192 | SubscriptionNotFound = 12; // Subscription not found 193 | ConsumerNotFound = 13; // Consumer not found 194 | TooManyRequests = 14; // Error with too many simultaneously request 195 | TopicTerminatedError = 15; // The topic has been terminated 196 | 197 | ProducerBusy = 16; // Producer with same name is already connected 198 | InvalidTopicName = 17; // The topic name is not valid 199 | 200 | IncompatibleSchema = 18; // Specified schema was incompatible with topic schema 201 | ConsumerAssignError = 19; // Dispatcher assign consumer error 202 | 203 | TransactionCoordinatorNotFound = 20; // Transaction coordinator not found error 204 | InvalidTxnStatus = 21; // Invalid txn status error 205 | NotAllowedError = 22; // Not allowed error 206 | 207 | TransactionConflict = 23; // Ack with transaction conflict 208 | } 209 | 210 | enum AuthMethod { 211 | AuthMethodNone = 0; 212 | AuthMethodYcaV1 = 1; 213 | AuthMethodAthens = 2; 214 | } 215 | 216 | // Each protocol version identify new features that are 217 | // incrementally added to the protocol 218 | enum ProtocolVersion { 219 | v0 = 0; // Initial versioning 220 | v1 = 1; // Added application keep-alive 221 | v2 = 2; // Added RedeliverUnacknowledgedMessages Command 222 | v3 = 3; // Added compression with LZ4 and ZLib 223 | v4 = 4; // Added batch message support 224 | v5 = 5; // Added disconnect client w/o closing connection 225 | v6 = 6; // Added checksum computation for metadata + payload 226 | v7 = 7; // Added CommandLookupTopic - Binary Lookup 227 | v8 = 8; // Added CommandConsumerStats - Client fetches broker side consumer stats 228 | v9 = 9; // Added end of topic notification 229 | v10 = 10;// Added proxy to broker 230 | v11 = 11;// C++ consumers before this version are not correctly handling the checksum field 231 | v12 = 12;// Added get topic's last messageId from broker 232 | // Added CommandActiveConsumerChange 233 | // Added CommandGetTopicsOfNamespace 234 | v13 = 13; // Schema-registry : added avro schema format for json 235 | v14 = 14; // Add CommandAuthChallenge and CommandAuthResponse for mutual auth 236 | // Added Key_Shared subscription 237 | v15 = 15; // Add CommandGetOrCreateSchema and CommandGetOrCreateSchemaResponse 238 | } 239 | 240 | message CommandConnect { 241 | required string client_version = 1; 242 | optional AuthMethod auth_method = 2; // Deprecated. Use "auth_method_name" instead. 243 | optional string auth_method_name = 5; 244 | optional bytes auth_data = 3; 245 | optional int32 protocol_version = 4 [default = 0]; 246 | 247 | // Client can ask to be proxyied to a specific broker 248 | // This is only honored by a Pulsar proxy 249 | optional string proxy_to_broker_url = 6; 250 | 251 | // Original principal that was verified by 252 | // a Pulsar proxy. In this case the auth info above 253 | // will be the auth of the proxy itself 254 | optional string original_principal = 7; 255 | 256 | // Original auth role and auth Method that was passed 257 | // to the proxy. In this case the auth info above 258 | // will be the auth of the proxy itself 259 | optional string original_auth_data = 8; 260 | optional string original_auth_method = 9; 261 | 262 | // Feature flags 263 | optional FeatureFlags feature_flags = 10; 264 | } 265 | 266 | message FeatureFlags { 267 | optional bool supports_auth_refresh = 1 [default = false]; 268 | } 269 | 270 | message CommandConnected { 271 | required string server_version = 1; 272 | optional int32 protocol_version = 2 [default = 0]; 273 | optional int32 max_message_size = 3; 274 | } 275 | 276 | message CommandAuthResponse { 277 | optional string client_version = 1; 278 | optional AuthData response = 2; 279 | optional int32 protocol_version = 3 [default = 0]; 280 | } 281 | 282 | message CommandAuthChallenge { 283 | optional string server_version = 1; 284 | optional AuthData challenge = 2; 285 | optional int32 protocol_version = 3 [default = 0]; 286 | } 287 | 288 | // To support mutual authentication type, such as Sasl, reuse this command to mutual auth. 289 | message AuthData { 290 | optional string auth_method_name = 1; 291 | optional bytes auth_data = 2; 292 | } 293 | 294 | enum KeySharedMode { 295 | AUTO_SPLIT = 0; 296 | STICKY = 1; 297 | } 298 | 299 | message KeySharedMeta { 300 | required KeySharedMode keySharedMode = 1; 301 | repeated IntRange hashRanges = 3; 302 | optional bool allowOutOfOrderDelivery = 4 [default = false]; 303 | } 304 | 305 | message CommandSubscribe { 306 | enum SubType { 307 | Exclusive = 0; 308 | Shared = 1; 309 | Failover = 2; 310 | Key_Shared = 3; 311 | } 312 | required string topic = 1; 313 | required string subscription = 2; 314 | required SubType subType = 3; 315 | 316 | required uint64 consumer_id = 4; 317 | required uint64 request_id = 5; 318 | optional string consumer_name = 6; 319 | optional int32 priority_level = 7; 320 | 321 | // Signal wether the subscription should be backed by a 322 | // durable cursor or not 323 | optional bool durable = 8 [default = true]; 324 | 325 | // If specified, the subscription will position the cursor 326 | // markd-delete position on the particular message id and 327 | // will send messages from that point 328 | optional MessageIdData start_message_id = 9; 329 | 330 | /// Add optional metadata key=value to this consumer 331 | repeated KeyValue metadata = 10; 332 | 333 | optional bool read_compacted = 11; 334 | 335 | optional Schema schema = 12; 336 | enum InitialPosition { 337 | Latest = 0; 338 | Earliest = 1; 339 | } 340 | // Signal whether the subscription will initialize on latest 341 | // or not -- earliest 342 | optional InitialPosition initialPosition = 13 [default = Latest]; 343 | 344 | // Mark the subscription as "replicated". Pulsar will make sure 345 | // to periodically sync the state of replicated subscriptions 346 | // across different clusters (when using geo-replication). 347 | optional bool replicate_subscription_state = 14; 348 | 349 | // If true, the subscribe operation will cause a topic to be 350 | // created if it does not exist already (and if topic auto-creation 351 | // is allowed by broker. 352 | // If false, the subscribe operation will fail if the topic 353 | // does not exist. 354 | optional bool force_topic_creation = 15 [default = true]; 355 | 356 | // If specified, the subscription will reset cursor's position back 357 | // to specified seconds and will send messages from that point 358 | optional uint64 start_message_rollback_duration_sec = 16 [default = 0]; 359 | 360 | optional KeySharedMeta keySharedMeta = 17; 361 | } 362 | 363 | message CommandPartitionedTopicMetadata { 364 | required string topic = 1; 365 | required uint64 request_id = 2; 366 | // TODO - Remove original_principal, original_auth_data, original_auth_method 367 | // Original principal that was verified by 368 | // a Pulsar proxy. 369 | optional string original_principal = 3; 370 | 371 | // Original auth role and auth Method that was passed 372 | // to the proxy. 373 | optional string original_auth_data = 4; 374 | optional string original_auth_method = 5; 375 | } 376 | 377 | message CommandPartitionedTopicMetadataResponse { 378 | enum LookupType { 379 | Success = 0; 380 | Failed = 1; 381 | } 382 | optional uint32 partitions = 1; // Optional in case of error 383 | required uint64 request_id = 2; 384 | optional LookupType response = 3; 385 | optional ServerError error = 4; 386 | optional string message = 5; 387 | } 388 | 389 | message CommandLookupTopic { 390 | required string topic = 1; 391 | required uint64 request_id = 2; 392 | optional bool authoritative = 3 [default = false]; 393 | 394 | // TODO - Remove original_principal, original_auth_data, original_auth_method 395 | // Original principal that was verified by 396 | // a Pulsar proxy. 397 | optional string original_principal = 4; 398 | 399 | // Original auth role and auth Method that was passed 400 | // to the proxy. 401 | optional string original_auth_data = 5; 402 | optional string original_auth_method = 6; 403 | // 404 | optional string advertised_listener_name = 7; 405 | } 406 | 407 | message CommandLookupTopicResponse { 408 | enum LookupType { 409 | Redirect = 0; 410 | Connect = 1; 411 | Failed = 2; 412 | } 413 | 414 | optional string brokerServiceUrl = 1; // Optional in case of error 415 | optional string brokerServiceUrlTls = 2; 416 | optional LookupType response = 3; 417 | required uint64 request_id = 4; 418 | optional bool authoritative = 5 [default = false]; 419 | optional ServerError error = 6; 420 | optional string message = 7; 421 | 422 | // If it's true, indicates to the client that it must 423 | // always connect through the service url after the 424 | // lookup has been completed. 425 | optional bool proxy_through_service_url = 8 [default = false]; 426 | } 427 | 428 | /// Create a new Producer on a topic, assigning the given producer_id, 429 | /// all messages sent with this producer_id will be persisted on the topic 430 | message CommandProducer { 431 | required string topic = 1; 432 | required uint64 producer_id = 2; 433 | required uint64 request_id = 3; 434 | 435 | /// If a producer name is specified, the name will be used, 436 | /// otherwise the broker will generate a unique name 437 | optional string producer_name = 4; 438 | 439 | optional bool encrypted = 5 [default = false]; 440 | 441 | /// Add optional metadata key=value to this producer 442 | repeated KeyValue metadata = 6; 443 | 444 | optional Schema schema = 7; 445 | 446 | // If producer reconnect to broker, the epoch of this producer will +1 447 | optional uint64 epoch = 8 [default = 0]; 448 | 449 | // Indicate the name of the producer is generated or user provided 450 | // Use default true here is in order to be forward compatible with the client 451 | optional bool user_provided_producer_name = 9 [default = true]; 452 | } 453 | 454 | message CommandSend { 455 | required uint64 producer_id = 1; 456 | required uint64 sequence_id = 2; 457 | optional int32 num_messages = 3 [default = 1]; 458 | optional uint64 txnid_least_bits = 4 [default = 0]; 459 | optional uint64 txnid_most_bits = 5 [default = 0]; 460 | 461 | /// Add highest sequence id to support batch message with external sequence id 462 | optional uint64 highest_sequence_id = 6 [default = 0]; 463 | optional bool is_chunk =7 [default = false]; 464 | } 465 | 466 | message CommandSendReceipt { 467 | required uint64 producer_id = 1; 468 | required uint64 sequence_id = 2; 469 | optional MessageIdData message_id = 3; 470 | optional uint64 highest_sequence_id = 4 [default = 0]; 471 | } 472 | 473 | message CommandSendError { 474 | required uint64 producer_id = 1; 475 | required uint64 sequence_id = 2; 476 | required ServerError error = 3; 477 | required string message = 4; 478 | } 479 | 480 | message CommandMessage { 481 | required uint64 consumer_id = 1; 482 | required MessageIdData message_id = 2; 483 | optional uint32 redelivery_count = 3 [default = 0]; 484 | repeated int64 ack_set = 4; 485 | } 486 | 487 | message CommandAck { 488 | enum AckType { 489 | Individual = 0; 490 | Cumulative = 1; 491 | } 492 | 493 | required uint64 consumer_id = 1; 494 | required AckType ack_type = 2; 495 | 496 | // In case of individual acks, the client can pass a list of message ids 497 | repeated MessageIdData message_id = 3; 498 | 499 | // Acks can contain a flag to indicate the consumer 500 | // received an invalid message that got discarded 501 | // before being passed on to the application. 502 | enum ValidationError { 503 | UncompressedSizeCorruption = 0; 504 | DecompressionError = 1; 505 | ChecksumMismatch = 2; 506 | BatchDeSerializeError = 3; 507 | DecryptionError = 4; 508 | } 509 | 510 | optional ValidationError validation_error = 4; 511 | repeated KeyLongValue properties = 5; 512 | 513 | optional uint64 txnid_least_bits = 6 [default = 0]; 514 | optional uint64 txnid_most_bits = 7 [default = 0]; 515 | optional uint64 request_id = 8; 516 | } 517 | 518 | message CommandAckResponse { 519 | required uint64 consumer_id = 1; 520 | optional uint64 txnid_least_bits = 2 [default = 0]; 521 | optional uint64 txnid_most_bits = 3 [default = 0]; 522 | optional ServerError error = 4; 523 | optional string message = 5; 524 | optional uint64 request_id = 6; 525 | } 526 | 527 | // changes on active consumer 528 | message CommandActiveConsumerChange { 529 | required uint64 consumer_id = 1; 530 | optional bool is_active = 2 [default = false]; 531 | } 532 | 533 | message CommandFlow { 534 | required uint64 consumer_id = 1; 535 | 536 | // Max number of messages to prefetch, in addition 537 | // of any number previously specified 538 | required uint32 messagePermits = 2; 539 | } 540 | 541 | message CommandUnsubscribe { 542 | required uint64 consumer_id = 1; 543 | required uint64 request_id = 2; 544 | } 545 | 546 | // Reset an existing consumer to a particular message id 547 | message CommandSeek { 548 | required uint64 consumer_id = 1; 549 | required uint64 request_id = 2; 550 | 551 | optional MessageIdData message_id = 3; 552 | optional uint64 message_publish_time = 4; 553 | } 554 | 555 | // Message sent by broker to client when a topic 556 | // has been forcefully terminated and there are no more 557 | // messages left to consume 558 | message CommandReachedEndOfTopic { 559 | required uint64 consumer_id = 1; 560 | } 561 | 562 | message CommandCloseProducer { 563 | required uint64 producer_id = 1; 564 | required uint64 request_id = 2; 565 | } 566 | 567 | message CommandCloseConsumer { 568 | required uint64 consumer_id = 1; 569 | required uint64 request_id = 2; 570 | } 571 | 572 | message CommandRedeliverUnacknowledgedMessages { 573 | required uint64 consumer_id = 1; 574 | repeated MessageIdData message_ids = 2; 575 | } 576 | 577 | message CommandSuccess { 578 | required uint64 request_id = 1; 579 | optional Schema schema = 2; 580 | } 581 | 582 | /// Response from CommandProducer 583 | message CommandProducerSuccess { 584 | required uint64 request_id = 1; 585 | required string producer_name = 2; 586 | 587 | // The last sequence id that was stored by this producer in the previous session 588 | // This will only be meaningful if deduplication has been enabled. 589 | optional int64 last_sequence_id = 3 [default = -1]; 590 | optional bytes schema_version = 4; 591 | } 592 | 593 | message CommandError { 594 | required uint64 request_id = 1; 595 | required ServerError error = 2; 596 | required string message = 3; 597 | } 598 | 599 | // Commands to probe the state of connection. 600 | // When either client or broker doesn't receive commands for certain 601 | // amount of time, they will send a Ping probe. 602 | message CommandPing { 603 | } 604 | message CommandPong { 605 | } 606 | 607 | message CommandConsumerStats { 608 | required uint64 request_id = 1; 609 | // required string topic_name = 2; 610 | // required string subscription_name = 3; 611 | required uint64 consumer_id = 4; 612 | } 613 | 614 | message CommandConsumerStatsResponse { 615 | required uint64 request_id = 1; 616 | optional ServerError error_code = 2; 617 | optional string error_message = 3; 618 | 619 | /// Total rate of messages delivered to the consumer. msg/s 620 | optional double msgRateOut = 4; 621 | 622 | /// Total throughput delivered to the consumer. bytes/s 623 | optional double msgThroughputOut = 5; 624 | 625 | /// Total rate of messages redelivered by this consumer. msg/s 626 | optional double msgRateRedeliver = 6; 627 | 628 | /// Name of the consumer 629 | optional string consumerName = 7; 630 | 631 | /// Number of available message permits for the consumer 632 | optional uint64 availablePermits = 8; 633 | 634 | /// Number of unacknowledged messages for the consumer 635 | optional uint64 unackedMessages = 9; 636 | 637 | /// Flag to verify if consumer is blocked due to reaching threshold of unacked messages 638 | optional bool blockedConsumerOnUnackedMsgs = 10; 639 | 640 | /// Address of this consumer 641 | optional string address = 11; 642 | 643 | /// Timestamp of connection 644 | optional string connectedSince = 12; 645 | 646 | /// Whether this subscription is Exclusive or Shared or Failover 647 | optional string type = 13; 648 | 649 | /// Total rate of messages expired on this subscription. msg/s 650 | optional double msgRateExpired = 14; 651 | 652 | /// Number of messages in the subscription backlog 653 | optional uint64 msgBacklog = 15; 654 | } 655 | 656 | message CommandGetLastMessageId { 657 | required uint64 consumer_id = 1; 658 | required uint64 request_id = 2; 659 | } 660 | 661 | message CommandGetLastMessageIdResponse { 662 | required MessageIdData last_message_id = 1; 663 | required uint64 request_id = 2; 664 | } 665 | 666 | message CommandGetTopicsOfNamespace { 667 | enum Mode { 668 | PERSISTENT = 0; 669 | NON_PERSISTENT = 1; 670 | ALL = 2; 671 | } 672 | required uint64 request_id = 1; 673 | required string namespace = 2; 674 | optional Mode mode = 3 [default = PERSISTENT]; 675 | } 676 | 677 | message CommandGetTopicsOfNamespaceResponse { 678 | required uint64 request_id = 1; 679 | repeated string topics = 2; 680 | } 681 | 682 | message CommandGetSchema { 683 | required uint64 request_id = 1; 684 | required string topic = 2; 685 | 686 | optional bytes schema_version = 3; 687 | } 688 | 689 | message CommandGetSchemaResponse { 690 | required uint64 request_id = 1; 691 | optional ServerError error_code = 2; 692 | optional string error_message = 3; 693 | 694 | optional Schema schema = 4; 695 | optional bytes schema_version = 5; 696 | } 697 | 698 | message CommandGetOrCreateSchema { 699 | required uint64 request_id = 1; 700 | required string topic = 2; 701 | required Schema schema = 3; 702 | } 703 | 704 | message CommandGetOrCreateSchemaResponse { 705 | required uint64 request_id = 1; 706 | optional ServerError error_code = 2; 707 | optional string error_message = 3; 708 | 709 | optional bytes schema_version = 4; 710 | } 711 | 712 | /// --- transaction related --- 713 | 714 | enum TxnAction { 715 | COMMIT = 0; 716 | ABORT = 1; 717 | } 718 | 719 | message CommandNewTxn { 720 | required uint64 request_id = 1; 721 | optional uint64 txn_ttl_seconds = 2 [default = 0]; 722 | optional uint64 tc_id = 3 [default = 0]; 723 | } 724 | 725 | message CommandNewTxnResponse { 726 | required uint64 request_id = 1; 727 | optional uint64 txnid_least_bits = 2 [default = 0]; 728 | optional uint64 txnid_most_bits = 3 [default = 0]; 729 | optional ServerError error = 4; 730 | optional string message = 5; 731 | } 732 | 733 | message CommandAddPartitionToTxn { 734 | required uint64 request_id = 1; 735 | optional uint64 txnid_least_bits = 2 [default = 0]; 736 | optional uint64 txnid_most_bits = 3 [default = 0]; 737 | repeated string partitions = 4; 738 | } 739 | 740 | message CommandAddPartitionToTxnResponse { 741 | required uint64 request_id = 1; 742 | optional uint64 txnid_least_bits = 2 [default = 0]; 743 | optional uint64 txnid_most_bits = 3 [default = 0]; 744 | optional ServerError error = 4; 745 | optional string message = 5; 746 | } 747 | 748 | message Subscription { 749 | required string topic = 1; 750 | required string subscription = 2; 751 | } 752 | message CommandAddSubscriptionToTxn { 753 | required uint64 request_id = 1; 754 | optional uint64 txnid_least_bits = 2 [default = 0]; 755 | optional uint64 txnid_most_bits = 3 [default = 0]; 756 | repeated Subscription subscription = 4; 757 | } 758 | 759 | message CommandAddSubscriptionToTxnResponse { 760 | required uint64 request_id = 1; 761 | optional uint64 txnid_least_bits = 2 [default = 0]; 762 | optional uint64 txnid_most_bits = 3 [default = 0]; 763 | optional ServerError error = 4; 764 | optional string message = 5; 765 | } 766 | 767 | message CommandEndTxn { 768 | required uint64 request_id = 1; 769 | optional uint64 txnid_least_bits = 2 [default = 0]; 770 | optional uint64 txnid_most_bits = 3 [default = 0]; 771 | optional TxnAction txn_action = 4; 772 | repeated MessageIdData message_id = 5; 773 | } 774 | 775 | message CommandEndTxnResponse { 776 | required uint64 request_id = 1; 777 | optional uint64 txnid_least_bits = 2 [default = 0]; 778 | optional uint64 txnid_most_bits = 3 [default = 0]; 779 | optional ServerError error = 4; 780 | optional string message = 5; 781 | } 782 | 783 | message CommandEndTxnOnPartition { 784 | required uint64 request_id = 1; 785 | optional uint64 txnid_least_bits = 2 [default = 0]; 786 | optional uint64 txnid_most_bits = 3 [default = 0]; 787 | optional string topic = 4; 788 | optional TxnAction txn_action = 5; 789 | repeated MessageIdData message_id = 6; 790 | } 791 | 792 | message CommandEndTxnOnPartitionResponse { 793 | required uint64 request_id = 1; 794 | optional uint64 txnid_least_bits = 2 [default = 0]; 795 | optional uint64 txnid_most_bits = 3 [default = 0]; 796 | optional ServerError error = 4; 797 | optional string message = 5; 798 | } 799 | 800 | message CommandEndTxnOnSubscription { 801 | required uint64 request_id = 1; 802 | optional uint64 txnid_least_bits = 2 [default = 0]; 803 | optional uint64 txnid_most_bits = 3 [default = 0]; 804 | optional Subscription subscription= 4; 805 | optional TxnAction txn_action = 5; 806 | } 807 | 808 | message CommandEndTxnOnSubscriptionResponse { 809 | required uint64 request_id = 1; 810 | optional uint64 txnid_least_bits = 2 [default = 0]; 811 | optional uint64 txnid_most_bits = 3 [default = 0]; 812 | optional ServerError error = 4; 813 | optional string message = 5; 814 | } 815 | 816 | message BaseCommand { 817 | enum Type { 818 | CONNECT = 2; 819 | CONNECTED = 3; 820 | SUBSCRIBE = 4; 821 | 822 | PRODUCER = 5; 823 | 824 | SEND = 6; 825 | SEND_RECEIPT= 7; 826 | SEND_ERROR = 8; 827 | 828 | MESSAGE = 9; 829 | ACK = 10; 830 | FLOW = 11; 831 | 832 | UNSUBSCRIBE = 12; 833 | 834 | SUCCESS = 13; 835 | ERROR = 14; 836 | 837 | CLOSE_PRODUCER = 15; 838 | CLOSE_CONSUMER = 16; 839 | 840 | PRODUCER_SUCCESS = 17; 841 | 842 | PING = 18; 843 | PONG = 19; 844 | 845 | REDELIVER_UNACKNOWLEDGED_MESSAGES = 20; 846 | 847 | PARTITIONED_METADATA = 21; 848 | PARTITIONED_METADATA_RESPONSE = 22; 849 | 850 | LOOKUP = 23; 851 | LOOKUP_RESPONSE = 24; 852 | 853 | CONSUMER_STATS = 25; 854 | CONSUMER_STATS_RESPONSE = 26; 855 | 856 | REACHED_END_OF_TOPIC = 27; 857 | 858 | SEEK = 28; 859 | 860 | GET_LAST_MESSAGE_ID = 29; 861 | GET_LAST_MESSAGE_ID_RESPONSE = 30; 862 | 863 | ACTIVE_CONSUMER_CHANGE = 31; 864 | 865 | 866 | GET_TOPICS_OF_NAMESPACE = 32; 867 | GET_TOPICS_OF_NAMESPACE_RESPONSE = 33; 868 | 869 | GET_SCHEMA = 34; 870 | GET_SCHEMA_RESPONSE = 35; 871 | 872 | AUTH_CHALLENGE = 36; 873 | AUTH_RESPONSE = 37; 874 | 875 | ACK_RESPONSE = 38; 876 | 877 | GET_OR_CREATE_SCHEMA = 39; 878 | GET_OR_CREATE_SCHEMA_RESPONSE = 40; 879 | 880 | // transaction related 881 | NEW_TXN = 50; 882 | NEW_TXN_RESPONSE = 51; 883 | 884 | ADD_PARTITION_TO_TXN = 52; 885 | ADD_PARTITION_TO_TXN_RESPONSE = 53; 886 | 887 | ADD_SUBSCRIPTION_TO_TXN = 54; 888 | ADD_SUBSCRIPTION_TO_TXN_RESPONSE = 55; 889 | 890 | END_TXN = 56; 891 | END_TXN_RESPONSE = 57; 892 | 893 | END_TXN_ON_PARTITION = 58; 894 | END_TXN_ON_PARTITION_RESPONSE = 59; 895 | 896 | END_TXN_ON_SUBSCRIPTION = 60; 897 | END_TXN_ON_SUBSCRIPTION_RESPONSE = 61; 898 | 899 | } 900 | 901 | 902 | required Type type = 1; 903 | 904 | optional CommandConnect connect = 2; 905 | optional CommandConnected connected = 3; 906 | 907 | optional CommandSubscribe subscribe = 4; 908 | optional CommandProducer producer = 5; 909 | optional CommandSend send = 6; 910 | optional CommandSendReceipt send_receipt = 7; 911 | optional CommandSendError send_error = 8; 912 | optional CommandMessage message = 9; 913 | optional CommandAck ack = 10; 914 | optional CommandFlow flow = 11; 915 | optional CommandUnsubscribe unsubscribe = 12; 916 | 917 | optional CommandSuccess success = 13; 918 | optional CommandError error = 14; 919 | 920 | optional CommandCloseProducer close_producer = 15; 921 | optional CommandCloseConsumer close_consumer = 16; 922 | 923 | optional CommandProducerSuccess producer_success = 17; 924 | optional CommandPing ping = 18; 925 | optional CommandPong pong = 19; 926 | optional CommandRedeliverUnacknowledgedMessages redeliverUnacknowledgedMessages = 20; 927 | 928 | optional CommandPartitionedTopicMetadata partitionMetadata = 21; 929 | optional CommandPartitionedTopicMetadataResponse partitionMetadataResponse = 22; 930 | 931 | optional CommandLookupTopic lookupTopic = 23; 932 | optional CommandLookupTopicResponse lookupTopicResponse = 24; 933 | 934 | optional CommandConsumerStats consumerStats = 25; 935 | optional CommandConsumerStatsResponse consumerStatsResponse = 26; 936 | 937 | optional CommandReachedEndOfTopic reachedEndOfTopic = 27; 938 | 939 | optional CommandSeek seek = 28; 940 | 941 | optional CommandGetLastMessageId getLastMessageId = 29; 942 | optional CommandGetLastMessageIdResponse getLastMessageIdResponse = 30; 943 | 944 | optional CommandActiveConsumerChange active_consumer_change = 31; 945 | 946 | optional CommandGetTopicsOfNamespace getTopicsOfNamespace = 32; 947 | optional CommandGetTopicsOfNamespaceResponse getTopicsOfNamespaceResponse = 33; 948 | 949 | optional CommandGetSchema getSchema = 34; 950 | optional CommandGetSchemaResponse getSchemaResponse = 35; 951 | 952 | optional CommandAuthChallenge authChallenge = 36; 953 | optional CommandAuthResponse authResponse = 37; 954 | 955 | optional CommandAckResponse ackResponse = 38; 956 | 957 | optional CommandGetOrCreateSchema getOrCreateSchema = 39; 958 | optional CommandGetOrCreateSchemaResponse getOrCreateSchemaResponse = 40; 959 | 960 | // transaction related 961 | optional CommandNewTxn newTxn = 50; 962 | optional CommandNewTxnResponse newTxnResponse = 51; 963 | optional CommandAddPartitionToTxn addPartitionToTxn= 52; 964 | optional CommandAddPartitionToTxnResponse addPartitionToTxnResponse = 53; 965 | optional CommandAddSubscriptionToTxn addSubscriptionToTxn = 54; 966 | optional CommandAddSubscriptionToTxnResponse addSubscriptionToTxnResponse = 55; 967 | optional CommandEndTxn endTxn = 56; 968 | optional CommandEndTxnResponse endTxnResponse = 57; 969 | optional CommandEndTxnOnPartition endTxnOnPartition = 58; 970 | optional CommandEndTxnOnPartitionResponse endTxnOnPartitionResponse = 59; 971 | optional CommandEndTxnOnSubscription endTxnOnSubscription = 60; 972 | optional CommandEndTxnOnSubscriptionResponse endTxnOnSubscriptionResponse = 61; 973 | } 974 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **NOTE: This fork is deprecated. The Rust Client is actively maintained in https://github.com/streamnative/pulsar-rs. See this [thread](https://github.com/streamnative-oss/sn-pulsar-rs/issues/20) for more details.** 2 | 3 | ## Pulsar 4 | #### Future-based Rust client for [Apache Pulsar](https://pulsar.apache.org/) 5 | ![](https://img.shields.io/crates/v/sn-pulsar?style=flat-square) 6 | 7 | [Documentation](https://docs.rs/sn-pulsar) 8 | 9 | This is a pure Rust client for Apache Pulsar that does not depend on the 10 | C++ Pulsar library. It provides an async/await based API, compatible with 11 | [Tokio](https://tokio.rs/) and [async-std](https://async.rs/). 12 | 13 | Features: 14 | - URL based (`pulsar://` and `pulsar+ssl://`) connections with DNS lookup 15 | - multi topic consumers (based on a regex or list) 16 | - TLS connection 17 | - configurable executor (Tokio or async-std) 18 | - automatic reconnection with exponential back off 19 | - message batching 20 | - compression with LZ4, zlib, zstd or Snappy (can be deactivated with Cargo features) 21 | 22 | ### Getting Started 23 | Cargo.toml 24 | ```toml 25 | futures = "0.3" 26 | pulsar = { version = "4.1.3", package = "sn_pulsar" } 27 | tokio = "1.0" 28 | ``` 29 | 30 | #### Producing 31 | ```rust 32 | use serde::{Serialize, Deserialize}; 33 | use pulsar::{ 34 | message::proto, producer, Error as PulsarError, Pulsar, SerializeMessage, TokioExecutor, 35 | }; 36 | 37 | #[derive(Serialize, Deserialize)] 38 | struct TestData { 39 | data: String, 40 | } 41 | 42 | impl<'a> SerializeMessage for &'a TestData { 43 | fn serialize_message(input: Self) -> Result { 44 | let payload = serde_json::to_vec(input).map_err(|e| PulsarError::Custom(e.to_string()))?; 45 | Ok(producer::Message { 46 | payload, 47 | ..Default::default() 48 | }) 49 | } 50 | } 51 | 52 | #[tokio::main] 53 | async fn main() -> Result<(), pulsar::Error> { 54 | env_logger::init(); 55 | 56 | let addr = "pulsar://127.0.0.1:6650"; 57 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 58 | let mut producer = pulsar 59 | .producer() 60 | .with_topic("non-persistent://public/default/test") 61 | .with_name("my producer") 62 | .with_options(producer::ProducerOptions { 63 | schema: Some(proto::Schema { 64 | type_: proto::schema::Type::String as i32, 65 | ..Default::default() 66 | }), 67 | ..Default::default() 68 | }) 69 | .build() 70 | .await?; 71 | 72 | let mut counter = 0usize; 73 | loop { 74 | producer 75 | .send(TestData { 76 | data: "data".to_string(), 77 | }) 78 | .await?; 79 | 80 | counter += 1; 81 | println!("{} messages", counter); 82 | tokio::time::sleep(std::time::Duration::from_millis(2000)).await; 83 | } 84 | } 85 | ``` 86 | 87 | #### Consuming 88 | ```rust 89 | #[macro_use] 90 | extern crate serde; 91 | use futures::TryStreamExt; 92 | use pulsar::{ 93 | message::proto::command_subscribe::SubType, message::Payload, Consumer, DeserializeMessage, 94 | Pulsar, TokioExecutor, 95 | }; 96 | 97 | #[derive(Serialize, Deserialize)] 98 | struct TestData { 99 | data: String, 100 | } 101 | 102 | impl DeserializeMessage for TestData { 103 | type Output = Result; 104 | 105 | fn deserialize_message(payload: &Payload) -> Self::Output { 106 | serde_json::from_slice(&payload.data) 107 | } 108 | } 109 | 110 | #[tokio::main] 111 | async fn main() -> Result<(), pulsar::Error> { 112 | env_logger::init(); 113 | 114 | let addr = "pulsar://127.0.0.1:6650"; 115 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 116 | 117 | let mut consumer: Consumer = pulsar 118 | .consumer() 119 | .with_topic("test") 120 | .with_consumer_name("test_consumer") 121 | .with_subscription_type(SubType::Exclusive) 122 | .with_subscription("test_subscription") 123 | .build() 124 | .await?; 125 | 126 | let mut counter = 0usize; 127 | while let Some(msg) = consumer.try_next().await? { 128 | consumer.ack(&msg).await?; 129 | let data = match msg.deserialize() { 130 | Ok(data) => data, 131 | Err(e) => { 132 | log::error!("could not deserialize message: {:?}", e); 133 | break; 134 | } 135 | }; 136 | 137 | if data.data.as_str() != "data" { 138 | log::error!("Unexpected payload: {}", &data.data); 139 | break; 140 | } 141 | counter += 1; 142 | log::info!("got {} messages", counter); 143 | } 144 | 145 | Ok(()) 146 | } 147 | ``` 148 | 149 | ### License 150 | This library is licensed under the terms of both the MIT license and the Apache License (Version 2.0), and may include packages written by third parties which carry their own copyright notices and license terms. 151 | 152 | See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and 153 | [COPYRIGHT](COPYRIGHT) for details. 154 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | extern crate prost_build; 2 | 3 | fn main() { 4 | prost_build::compile_protos(&["./PulsarApi.proto"], &["./"]).unwrap(); 5 | } 6 | -------------------------------------------------------------------------------- /docs/release.md: -------------------------------------------------------------------------------- 1 | # How to release 2 | 3 | Make sure that `branch-x.y` exist. 4 | 5 | - Add streamnative bot app to your slack 6 | - Run command `/release pulsar-rs --branch=x.y --version=x.y.z` 7 | 8 | -------------------------------------------------------------------------------- /examples/batching.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde; 3 | use futures::{future::join_all, TryStreamExt}; 4 | use pulsar::{ 5 | message::proto, message::proto::command_subscribe::SubType, message::Payload, producer, 6 | Consumer, DeserializeMessage, Error as PulsarError, Pulsar, SerializeMessage, TokioExecutor, 7 | }; 8 | 9 | #[derive(Debug, Serialize, Deserialize)] 10 | struct TestData { 11 | data: String, 12 | } 13 | 14 | impl SerializeMessage for TestData { 15 | fn serialize_message(input: Self) -> Result { 16 | let payload = serde_json::to_vec(&input).map_err(|e| PulsarError::Custom(e.to_string()))?; 17 | Ok(producer::Message { 18 | payload, 19 | ..Default::default() 20 | }) 21 | } 22 | } 23 | 24 | impl DeserializeMessage for TestData { 25 | type Output = Result; 26 | 27 | fn deserialize_message(payload: &Payload) -> Self::Output { 28 | serde_json::from_slice(&payload.data) 29 | } 30 | } 31 | 32 | #[tokio::main] 33 | async fn main() -> Result<(), pulsar::Error> { 34 | env_logger::init(); 35 | 36 | let addr = "pulsar://127.0.0.1:6650"; 37 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 38 | let mut producer = pulsar 39 | .producer() 40 | .with_topic("test-batch-compression-snappy") 41 | .with_name("my-producer2".to_string()) 42 | .with_options(producer::ProducerOptions { 43 | batch_size: Some(4), 44 | //compression: Some(proto::CompressionType::Lz4), 45 | //compression: Some(proto::CompressionType::Zlib), 46 | //compression: Some(proto::CompressionType::Zstd), 47 | compression: Some(proto::CompressionType::Snappy), 48 | ..Default::default() 49 | }) 50 | .build() 51 | .await?; 52 | 53 | producer 54 | .check_connection() 55 | .await 56 | .map(|_| println!("connection ok"))?; 57 | 58 | tokio::task::spawn(async move { 59 | let mut counter = 0usize; 60 | let mut v = Vec::new(); 61 | loop { 62 | println!("will send"); 63 | let receipt_rx = producer 64 | .send(TestData { 65 | data: "data".to_string(), 66 | }) 67 | .await 68 | .unwrap(); 69 | v.push(receipt_rx); 70 | println!("sent"); 71 | counter += 1; 72 | if counter % 4 == 0 { 73 | //producer.send_batch().await.unwrap(); 74 | println!("sent {} messages", counter); 75 | break; 76 | } 77 | } 78 | 79 | println!("receipts: {:?}", join_all(v).await); 80 | }); 81 | 82 | let mut consumer: Consumer = pulsar 83 | .consumer() 84 | .with_topic("test-batch-compression-snappy") 85 | .with_consumer_name("test_consumer") 86 | .with_subscription_type(SubType::Exclusive) 87 | .with_subscription("test_subscription") 88 | .build() 89 | .await?; 90 | 91 | let mut counter = 0usize; 92 | while let Some(msg) = consumer.try_next().await? { 93 | consumer.ack(&msg).await?; 94 | let data = msg.deserialize().unwrap(); 95 | if data.data.as_str() != "data" { 96 | panic!("Unexpected payload: {}", &data.data); 97 | } 98 | println!("got message: {:?}", data); 99 | counter += 1; 100 | if counter % 4 == 0 { 101 | println!("sent {} messages", counter); 102 | break; 103 | } 104 | } 105 | 106 | Ok(()) 107 | } 108 | -------------------------------------------------------------------------------- /examples/consumer.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde; 3 | use futures::TryStreamExt; 4 | use pulsar::{ 5 | Authentication, Consumer, DeserializeMessage, Payload, Pulsar, SubType, TokioExecutor, 6 | }; 7 | use std::env; 8 | use pulsar::authentication::oauth2::{OAuth2Authentication}; 9 | 10 | #[derive(Serialize, Deserialize)] 11 | struct TestData { 12 | data: String, 13 | } 14 | 15 | impl DeserializeMessage for TestData { 16 | type Output = Result; 17 | 18 | fn deserialize_message(payload: &Payload) -> Self::Output { 19 | serde_json::from_slice(&payload.data) 20 | } 21 | } 22 | 23 | #[tokio::main] 24 | async fn main() -> Result<(), pulsar::Error> { 25 | env_logger::init(); 26 | 27 | let addr = env::var("PULSAR_ADDRESS") 28 | .ok() 29 | .unwrap_or_else(|| "pulsar://127.0.0.1:6650".to_string()); 30 | let topic = env::var("PULSAR_TOPIC") 31 | .ok() 32 | .unwrap_or_else(|| "non-persistent://public/default/test".to_string()); 33 | 34 | let mut builder = Pulsar::builder(addr, TokioExecutor); 35 | 36 | if let Ok(token) = env::var("PULSAR_TOKEN") { 37 | let authentication = Authentication { 38 | name: "token".to_string(), 39 | data: token.into_bytes(), 40 | }; 41 | 42 | builder = builder.with_auth(authentication); 43 | } else if let Ok(oauth2_cfg) = env::var("PULSAR_OAUTH2") { 44 | builder = builder.with_auth_provider(OAuth2Authentication::client_credentials( 45 | serde_json::from_str(oauth2_cfg.as_str()) 46 | .expect(format!("invalid oauth2 config [{}]", oauth2_cfg.as_str()).as_str()))); 47 | } 48 | 49 | let pulsar: Pulsar<_> = builder.build().await?; 50 | 51 | let mut consumer: Consumer = pulsar 52 | .consumer() 53 | .with_topic(topic) 54 | .with_consumer_name("test_consumer") 55 | .with_subscription_type(SubType::Exclusive) 56 | .with_subscription("test_subscription") 57 | .build() 58 | .await?; 59 | 60 | let mut counter = 0usize; 61 | while let Some(msg) = consumer.try_next().await? { 62 | consumer.ack(&msg).await?; 63 | log::info!("metadata: {:?}", msg.metadata()); 64 | log::info!("id: {:?}", msg.message_id()); 65 | let data = match msg.deserialize() { 66 | Ok(data) => data, 67 | Err(e) => { 68 | log::error!("could not deserialize message: {:?}", e); 69 | break; 70 | } 71 | }; 72 | 73 | if data.data.as_str() != "data" { 74 | log::error!("Unexpected payload: {}", &data.data); 75 | break; 76 | } 77 | counter += 1; 78 | log::info!("got {} messages", counter); 79 | } 80 | 81 | Ok(()) 82 | } 83 | -------------------------------------------------------------------------------- /examples/producer.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde; 3 | use pulsar::{ 4 | message::proto, producer, Authentication, Error as PulsarError, Pulsar, SerializeMessage, 5 | TokioExecutor, 6 | }; 7 | use std::env; 8 | use pulsar::authentication::oauth2::{OAuth2Authentication}; 9 | 10 | #[derive(Serialize, Deserialize)] 11 | struct TestData { 12 | data: String, 13 | } 14 | 15 | impl SerializeMessage for TestData { 16 | fn serialize_message(input: Self) -> Result { 17 | let payload = serde_json::to_vec(&input).map_err(|e| PulsarError::Custom(e.to_string()))?; 18 | Ok(producer::Message { 19 | payload, 20 | ..Default::default() 21 | }) 22 | } 23 | } 24 | 25 | #[tokio::main] 26 | async fn main() -> Result<(), pulsar::Error> { 27 | env_logger::init(); 28 | 29 | let addr = env::var("PULSAR_ADDRESS") 30 | .ok() 31 | .unwrap_or_else(|| "pulsar://127.0.0.1:6650".to_string()); 32 | let topic = env::var("PULSAR_TOPIC") 33 | .ok() 34 | .unwrap_or_else(|| "non-persistent://public/default/test".to_string()); 35 | 36 | let mut builder = Pulsar::builder(addr, TokioExecutor); 37 | 38 | if let Ok(token) = env::var("PULSAR_TOKEN") { 39 | let authentication = Authentication { 40 | name: "token".to_string(), 41 | data: token.into_bytes(), 42 | }; 43 | 44 | builder = builder.with_auth(authentication); 45 | } else if let Ok(oauth2_cfg) = env::var("PULSAR_OAUTH2") { 46 | builder = builder.with_auth_provider(OAuth2Authentication::client_credentials( 47 | serde_json::from_str(oauth2_cfg.as_str()) 48 | .expect(format!("invalid oauth2 config [{}]", oauth2_cfg.as_str()).as_str()))); 49 | } 50 | 51 | let pulsar: Pulsar<_> = builder.build().await?; 52 | let mut producer = pulsar 53 | .producer() 54 | .with_topic(topic) 55 | .with_name("my producer") 56 | .with_options(producer::ProducerOptions { 57 | schema: Some(proto::Schema { 58 | r#type: proto::schema::Type::String as i32, 59 | ..Default::default() 60 | }), 61 | ..Default::default() 62 | }) 63 | .build() 64 | .await?; 65 | 66 | let mut counter = 0usize; 67 | loop { 68 | producer 69 | .send(TestData { 70 | data: "data".to_string(), 71 | }) 72 | .await? 73 | .await 74 | .unwrap(); 75 | 76 | counter += 1; 77 | println!("{} messages", counter); 78 | tokio::time::sleep(std::time::Duration::from_millis(2000)).await; 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /examples/reader.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde; 3 | use futures::TryStreamExt; 4 | use pulsar::{ 5 | consumer::ConsumerOptions, proto::Schema, reader::Reader, Authentication, DeserializeMessage, 6 | Payload, Pulsar, TokioExecutor, 7 | }; 8 | use std::env; 9 | 10 | #[derive(Serialize, Deserialize)] 11 | struct TestData { 12 | data: String, 13 | } 14 | 15 | impl DeserializeMessage for TestData { 16 | type Output = Result; 17 | 18 | fn deserialize_message(payload: &Payload) -> Self::Output { 19 | serde_json::from_slice(&payload.data) 20 | } 21 | } 22 | #[tokio::main] 23 | async fn main() -> Result<(), pulsar::Error> { 24 | env_logger::init(); 25 | 26 | let addr = env::var("PULSAR_ADDRESS") 27 | .ok() 28 | .unwrap_or_else(|| "pulsar://127.0.0.1:6650".to_string()); 29 | let topic = env::var("PULSAR_TOPIC") 30 | .ok() 31 | .unwrap_or_else(|| "non-persistent://public/default/test".to_string()); 32 | 33 | let mut builder = Pulsar::builder(addr, TokioExecutor); 34 | 35 | if let Ok(token) = env::var("PULSAR_TOKEN") { 36 | let authentication = Authentication { 37 | name: "token".to_string(), 38 | data: token.into_bytes(), 39 | }; 40 | 41 | builder = builder.with_auth(authentication); 42 | } 43 | 44 | let pulsar: Pulsar<_> = builder.build().await?; 45 | 46 | let mut reader: Reader = pulsar 47 | .reader() 48 | .with_topic(topic) 49 | .with_consumer_name("test_reader") 50 | .with_options(ConsumerOptions::default().with_schema(Schema { 51 | r#type: pulsar::proto::schema::Type::String as i32, 52 | ..Default::default() 53 | })) 54 | // subscription defaults to SubType::Exclusive 55 | .into_reader() 56 | .await?; 57 | // log::info!("created a reader"); 58 | 59 | let mut counter = 0usize; 60 | 61 | // listen to 5 messages 62 | while let Some(msg) = reader.try_next().await? { 63 | log::info!("metadata: {:#?}", msg.metadata()); 64 | 65 | log::info!("id: {:?}", msg.message_id()); 66 | let data = match msg.deserialize() { 67 | Ok(data) => data, 68 | Err(e) => { 69 | log::error!("Could not deserialize message: {:?}", e); 70 | break; 71 | } 72 | }; 73 | 74 | if data.data.as_str() != "data" { 75 | log::error!("Unexpected payload: {}", &data.data); 76 | break; 77 | } 78 | counter += 1; 79 | 80 | if counter > 5 { 81 | break; 82 | } 83 | log::info!("got {} messages", counter); 84 | } 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /examples/round_trip.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde; 3 | use futures::TryStreamExt; 4 | use pulsar::{ 5 | message::proto, message::proto::command_subscribe::SubType, message::Payload, producer, 6 | Consumer, DeserializeMessage, Error as PulsarError, Pulsar, SerializeMessage, TokioExecutor, 7 | }; 8 | 9 | #[derive(Serialize, Deserialize)] 10 | struct TestData { 11 | data: String, 12 | } 13 | 14 | impl SerializeMessage for TestData { 15 | fn serialize_message(input: Self) -> Result { 16 | let payload = serde_json::to_vec(&input).map_err(|e| PulsarError::Custom(e.to_string()))?; 17 | Ok(producer::Message { 18 | payload, 19 | ..Default::default() 20 | }) 21 | } 22 | } 23 | 24 | impl DeserializeMessage for TestData { 25 | type Output = Result; 26 | 27 | fn deserialize_message(payload: &Payload) -> Self::Output { 28 | serde_json::from_slice(&payload.data) 29 | } 30 | } 31 | 32 | #[tokio::main] 33 | async fn main() -> Result<(), pulsar::Error> { 34 | env_logger::init(); 35 | 36 | let addr = "pulsar://127.0.0.1:6650"; 37 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 38 | let mut producer = pulsar 39 | .producer() 40 | .with_topic("test") 41 | .with_name("my-producer") 42 | .with_options(producer::ProducerOptions { 43 | schema: Some(proto::Schema { 44 | r#type: proto::schema::Type::String as i32, 45 | ..Default::default() 46 | }), 47 | ..Default::default() 48 | }) 49 | .build() 50 | .await?; 51 | 52 | tokio::task::spawn(async move { 53 | let mut counter = 0usize; 54 | loop { 55 | producer 56 | .send(TestData { 57 | data: "data".to_string(), 58 | }) 59 | .await 60 | .unwrap() 61 | .await 62 | .unwrap(); 63 | counter += 1; 64 | if counter % 1000 == 0 { 65 | println!("sent {} messages", counter); 66 | } 67 | } 68 | }); 69 | 70 | let pulsar2: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 71 | 72 | let mut consumer: Consumer = pulsar2 73 | .consumer() 74 | .with_topic("test") 75 | .with_consumer_name("test_consumer") 76 | .with_subscription_type(SubType::Exclusive) 77 | .with_subscription("test_subscription") 78 | .build() 79 | .await?; 80 | 81 | let mut counter = 0usize; 82 | while let Some(msg) = consumer.try_next().await? { 83 | log::info!("id: {:?}", msg.message_id()); 84 | consumer.ack(&msg).await?; 85 | let data = msg.deserialize().unwrap(); 86 | if data.data.as_str() != "data" { 87 | panic!("Unexpected payload: {}", &data.data); 88 | } 89 | counter += 1; 90 | if counter % 1000 == 0 { 91 | println!("received {} messages", counter); 92 | } 93 | } 94 | 95 | Ok(()) 96 | } 97 | -------------------------------------------------------------------------------- /src/authentication.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use crate::error::AuthenticationError; 4 | 5 | #[async_trait] 6 | pub trait Authentication: Send + Sync + 'static { 7 | fn auth_method_name(&self) -> String; 8 | 9 | async fn initialize(&mut self) -> Result<(), AuthenticationError>; 10 | 11 | async fn auth_data(&mut self) -> Result, AuthenticationError>; 12 | } 13 | 14 | pub mod token { 15 | use std::rc::Rc; 16 | 17 | use async_trait::async_trait; 18 | 19 | use crate::authentication::Authentication; 20 | use crate::error::AuthenticationError; 21 | 22 | pub struct TokenAuthentication { 23 | token: Vec, 24 | } 25 | 26 | impl TokenAuthentication { 27 | pub fn new(token: String) -> Rc { 28 | Rc::new(TokenAuthentication { 29 | token: token.into_bytes() 30 | }) 31 | } 32 | } 33 | 34 | #[async_trait] 35 | impl Authentication for TokenAuthentication { 36 | fn auth_method_name(&self) -> String { 37 | String::from("token") 38 | } 39 | 40 | async fn initialize(&mut self) -> Result<(), AuthenticationError> { 41 | Ok(()) 42 | } 43 | 44 | async fn auth_data(&mut self) -> Result, AuthenticationError> { 45 | Ok(self.token.clone()) 46 | } 47 | } 48 | } 49 | 50 | #[cfg(feature = "auth-oauth2")] 51 | pub mod oauth2 { 52 | use std::fmt::{Display, Formatter}; 53 | use std::fs; 54 | use std::time::Instant; 55 | 56 | use async_trait::async_trait; 57 | use data_url::{DataUrl}; 58 | use nom::lib::std::ops::Add; 59 | use oauth2::{AuthUrl, ClientId, ClientSecret, Scope, TokenResponse, TokenUrl}; 60 | use oauth2::AuthType::RequestBody; 61 | use oauth2::basic::{BasicClient, BasicTokenResponse}; 62 | use oauth2::reqwest::async_http_client; 63 | use openidconnect::core::CoreProviderMetadata; 64 | use openidconnect::IssuerUrl; 65 | use serde::Deserialize; 66 | use url::Url; 67 | 68 | use crate::authentication::Authentication; 69 | use crate::error::AuthenticationError; 70 | 71 | #[derive(Deserialize, Debug)] 72 | struct OAuth2PrivateParams { 73 | client_id: String, 74 | client_secret: String, 75 | client_email: Option, 76 | issuer_url: Option, 77 | } 78 | 79 | #[derive(Deserialize, Debug)] 80 | pub struct OAuth2Params { 81 | pub issuer_url: String, 82 | pub credentials_url: String, 83 | pub audience: Option, 84 | pub scope: Option, 85 | } 86 | 87 | impl Display for OAuth2Params { 88 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 89 | write!(f, "OAuth2Params({}, {}, {:?}, {:?})", self.issuer_url, self.credentials_url, self.audience, self.scope) 90 | } 91 | } 92 | 93 | pub struct CachedToken { 94 | token_secret: Vec, 95 | expiring_at: Option, 96 | expired_at: Option, 97 | } 98 | 99 | impl From for CachedToken { 100 | fn from(resp: BasicTokenResponse) -> Self { 101 | let now = Instant::now(); 102 | CachedToken { 103 | expiring_at: resp.expires_in().map(|d| now.add(d.mul_f32(0.9))), 104 | expired_at: resp.expires_in().map(|d| now.add(d)), 105 | token_secret: resp.access_token().secret().clone().into_bytes(), 106 | } 107 | } 108 | } 109 | 110 | impl CachedToken { 111 | fn is_expiring(&self) -> bool { 112 | match &self.expiring_at { 113 | Some(expiring_at) => Instant::now().ge(expiring_at), 114 | None => false, 115 | } 116 | } 117 | 118 | fn is_expired(&self) -> bool { 119 | match &self.expired_at { 120 | Some(expired_at) => Instant::now().ge(expired_at), 121 | None => false, 122 | } 123 | } 124 | } 125 | 126 | pub struct OAuth2Authentication { 127 | params: OAuth2Params, 128 | private_params: Option, 129 | token_url: Option, 130 | token: Option, 131 | } 132 | 133 | impl OAuth2Authentication { 134 | pub fn client_credentials(params: OAuth2Params) -> Box { 135 | Box::new(OAuth2Authentication { 136 | params, 137 | private_params: None, 138 | token_url: None, 139 | token: None, 140 | }) 141 | } 142 | } 143 | 144 | impl OAuth2Params { 145 | fn read_private_params(&self) -> Result> { 146 | let credentials_url = Url::parse(self.credentials_url.as_str())?; 147 | match credentials_url.scheme() { 148 | "file" => { 149 | let path = credentials_url.path(); 150 | Ok(serde_json::from_str(fs::read_to_string(path)?.as_str())?) 151 | } 152 | "data" => { 153 | let data_url = match DataUrl::process(self.credentials_url.as_str()) { 154 | Ok(data_url) => data_url, 155 | Err(err) => { 156 | return Err(Box::from(format!("invalid data url [{}]: {:?}", self.credentials_url.as_str(), err))); 157 | } 158 | }; 159 | let body = match data_url.decode_to_vec() { 160 | Ok((body, _)) => body, 161 | Err(err) => { 162 | return Err(Box::from(format!("invalid data url [{}]: {:?}", self.credentials_url.as_str(), err))); 163 | } 164 | }; 165 | 166 | Ok(serde_json::from_slice(&body)?) 167 | } 168 | _ => { 169 | Err(Box::from(format!("invalid credential url [{}]", self.credentials_url.as_str()))) 170 | } 171 | } 172 | } 173 | } 174 | 175 | #[async_trait] 176 | impl Authentication for OAuth2Authentication { 177 | fn auth_method_name(&self) -> String { 178 | String::from("token") 179 | } 180 | 181 | async fn initialize(&mut self) -> Result<(), AuthenticationError> { 182 | match self.params.read_private_params() { 183 | Ok(private_params) => self.private_params = Some(private_params), 184 | Err(e) => return Err(AuthenticationError::Custom(e.to_string())), 185 | } 186 | if let Err(e) = self.token_url().await { 187 | return Err(AuthenticationError::Custom(e.to_string())); 188 | } 189 | Ok(()) 190 | } 191 | 192 | async fn auth_data(&mut self) -> Result, AuthenticationError> { 193 | if self.private_params.is_none() { 194 | return Err(AuthenticationError::Custom("not initialized".to_string())); 195 | } 196 | let mut need_token = false; 197 | let mut none_or_expired = true; 198 | if let Some(token) = self.token.as_ref() { 199 | none_or_expired = token.is_expired(); 200 | if none_or_expired || token.is_expiring() { 201 | need_token = true; 202 | } 203 | } else { 204 | need_token = true; 205 | } 206 | if need_token { 207 | match self.fetch_token().await { 208 | Ok(token) => { 209 | self.token = Some(token.into()); 210 | } 211 | Err(e) => { 212 | if none_or_expired { 213 | // invalidate the expired token 214 | self.token = None; 215 | return Err(AuthenticationError::Custom(e.to_string())); 216 | } else { 217 | warn!("failed to get a new token for [{}], use the existing one for now", self.params); 218 | } 219 | } 220 | } 221 | } 222 | Ok(self.token.as_ref().unwrap().token_secret.clone()) 223 | } 224 | } 225 | 226 | impl OAuth2Authentication { 227 | async fn token_url(&mut self) -> Result, Box> { 228 | match &self.token_url { 229 | Some(url) => Ok(Some(url.clone())), 230 | None => { 231 | let metadata = CoreProviderMetadata::discover_async( 232 | IssuerUrl::from_url(Url::parse(self.params.issuer_url.as_str())?), async_http_client).await?; 233 | if let Some(token_endpoint) = metadata.token_endpoint() { 234 | self.token_url = Some(token_endpoint.clone()); 235 | } else { 236 | return Err(Box::from("token url not exists")); 237 | } 238 | 239 | match metadata.token_endpoint() { 240 | Some(endpoint) => { 241 | Ok(Some(endpoint.clone())) 242 | } 243 | None => Err(Box::from("token endpoint is unavailable")) 244 | } 245 | } 246 | } 247 | } 248 | 249 | async fn fetch_token(&mut self) -> Result> { 250 | let private_params = self.private_params.as_ref() 251 | .expect("oauth2 provider is uninitialized"); 252 | 253 | let issuer_url = if let Some(url) = private_params.issuer_url.as_ref() { 254 | url.as_str() 255 | } else { 256 | self.params.issuer_url.as_str() 257 | }; 258 | 259 | let client = BasicClient::new( 260 | ClientId::new(private_params.client_id.clone()), 261 | Some(ClientSecret::new(private_params.client_secret.clone())), 262 | AuthUrl::from_url(Url::parse(issuer_url)?), 263 | self.token_url().await?) 264 | .set_auth_type(RequestBody); 265 | 266 | let mut request = client 267 | .exchange_client_credentials(); 268 | 269 | if let Some(audience) = &self.params.audience { 270 | request = request.add_extra_param("audience", audience.clone()); 271 | } 272 | 273 | if let Some(scope) = &self.params.scope { 274 | request = request.add_scope(Scope::new(scope.clone())); 275 | } 276 | 277 | let token = request 278 | .request_async(async_http_client).await?; 279 | debug!("Got a new oauth2 token for [{}]", self.params); 280 | Ok(token) 281 | } 282 | } 283 | 284 | #[cfg(test)] 285 | mod tests { 286 | use crate::authentication::oauth2::OAuth2Params; 287 | 288 | #[test] 289 | fn parse_data_url() { 290 | let params = OAuth2Params { 291 | issuer_url: "".to_string(), 292 | credentials_url: "data:application/json;base64,eyJjbGllbnRfaWQiOiJjbGllbnQtaWQiLCJjbGllbnRfc2VjcmV0IjoiY2xpZW50LXNlY3JldCJ9Cg==".to_string(), 293 | audience: None, 294 | scope: None, 295 | }; 296 | let private_params = params.read_private_params().unwrap(); 297 | assert_eq!(private_params.client_id, "client-id"); 298 | assert_eq!(private_params.client_secret, "client-secret"); 299 | assert_eq!(private_params.client_email, None); 300 | assert_eq!(private_params.issuer_url, None); 301 | } 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::string::FromUtf8Error; 2 | use std::sync::Arc; 3 | 4 | use futures::channel::{mpsc, oneshot}; 5 | 6 | use crate::connection::Authentication; 7 | use crate::connection_manager::{ 8 | BrokerAddress, ConnectionManager, ConnectionRetryOptions, OperationRetryOptions, TlsOptions, 9 | }; 10 | use crate::consumer::{ConsumerBuilder, ConsumerOptions, InitialPosition}; 11 | use crate::error::Error; 12 | use crate::executor::Executor; 13 | use crate::message::proto::{self, CommandSendReceipt}; 14 | use crate::message::Payload; 15 | use crate::producer::{self, ProducerBuilder, SendFuture}; 16 | use crate::service_discovery::ServiceDiscovery; 17 | use futures::StreamExt; 18 | use futures::lock::Mutex; 19 | 20 | /// Helper trait for consumer deserialization 21 | pub trait DeserializeMessage { 22 | /// type produced from the message 23 | type Output: Sized; 24 | /// deserialize method that will be called by the consumer 25 | fn deserialize_message(payload: &Payload) -> Self::Output; 26 | } 27 | 28 | impl DeserializeMessage for Vec { 29 | type Output = Self; 30 | 31 | fn deserialize_message(payload: &Payload) -> Self::Output { 32 | payload.data.to_vec() 33 | } 34 | } 35 | 36 | impl DeserializeMessage for String { 37 | type Output = Result; 38 | 39 | fn deserialize_message(payload: &Payload) -> Self::Output { 40 | String::from_utf8(payload.data.to_vec()) 41 | } 42 | } 43 | 44 | /// Helper trait for message serialization 45 | pub trait SerializeMessage { 46 | /// serialize method that will be called by the producer 47 | fn serialize_message(input: Self) -> Result; 48 | } 49 | 50 | impl SerializeMessage for producer::Message { 51 | fn serialize_message(input: Self) -> Result { 52 | Ok(input) 53 | } 54 | } 55 | 56 | impl<'a> SerializeMessage for () { 57 | fn serialize_message(_input: Self) -> Result { 58 | Ok(producer::Message { 59 | ..Default::default() 60 | }) 61 | } 62 | } 63 | 64 | impl<'a> SerializeMessage for &'a [u8] { 65 | fn serialize_message(input: Self) -> Result { 66 | Ok(producer::Message { 67 | payload: input.to_vec(), 68 | ..Default::default() 69 | }) 70 | } 71 | } 72 | 73 | impl SerializeMessage for Vec { 74 | fn serialize_message(input: Self) -> Result { 75 | Ok(producer::Message { 76 | payload: input, 77 | ..Default::default() 78 | }) 79 | } 80 | } 81 | 82 | impl SerializeMessage for String { 83 | fn serialize_message(input: Self) -> Result { 84 | let payload = input.into_bytes(); 85 | Ok(producer::Message { 86 | payload, 87 | ..Default::default() 88 | }) 89 | } 90 | } 91 | 92 | impl<'a> SerializeMessage for &String { 93 | fn serialize_message(input: Self) -> Result { 94 | let payload = input.as_bytes().to_vec(); 95 | Ok(producer::Message { 96 | payload, 97 | ..Default::default() 98 | }) 99 | } 100 | } 101 | 102 | impl<'a> SerializeMessage for &'a str { 103 | fn serialize_message(input: Self) -> Result { 104 | let payload = input.as_bytes().to_vec(); 105 | Ok(producer::Message { 106 | payload, 107 | ..Default::default() 108 | }) 109 | } 110 | } 111 | 112 | /// Pulsar client 113 | /// 114 | /// This is the starting point of this API, used to create connections, producers and consumers 115 | /// 116 | /// While methods are provided to create the client, producers and consumers directly, 117 | /// the builders should be used for more clarity: 118 | /// 119 | /// ```rust,no_run 120 | /// use pulsar::{Pulsar, TokioExecutor}; 121 | /// 122 | /// # async fn run(auth: pulsar::Authentication, retry: pulsar::ConnectionRetryOptions) -> Result<(), pulsar::Error> { 123 | /// let addr = "pulsar://127.0.0.1:6650"; 124 | /// // you can indicate which executor you use as the return type of client creation 125 | /// let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor) 126 | /// .with_auth(auth) 127 | /// .with_connection_retry_options(retry) 128 | /// .build() 129 | /// .await?; 130 | /// 131 | /// let mut producer = pulsar 132 | /// .producer() 133 | /// .with_topic("non-persistent://public/default/test") 134 | /// .with_name("my producer") 135 | /// .build() 136 | /// .await?; 137 | /// # Ok(()) 138 | /// # } 139 | /// ``` 140 | #[derive(Clone)] 141 | pub struct Pulsar { 142 | pub(crate) manager: Arc>, 143 | service_discovery: Arc>, 144 | // this field is an Option to avoid a cyclic dependency between Pulsar 145 | // and run_producer: the run_producer loop needs a client to create 146 | // a multitopic producer, this producer stores internally a copy 147 | // of the Pulsar struct. So even if we drop the main Pulsar instance, 148 | // the run_producer loop still lives because it contains a copy of 149 | // the sender it waits on. 150 | // o,solve this, we create a client without this sender, use it in 151 | // run_producer, then fill in the producer field afterwards in the 152 | // main Pulsar instance 153 | producer: Option>, 154 | pub(crate) operation_retry_options: OperationRetryOptions, 155 | pub(crate) executor: Arc, 156 | } 157 | 158 | impl Pulsar { 159 | /// creates a new client 160 | pub(crate) async fn new>( 161 | url: S, 162 | auth: Option>>>, 163 | connection_retry_parameters: Option, 164 | operation_retry_parameters: Option, 165 | tls_options: Option, 166 | executor: Exe, 167 | ) -> Result { 168 | let url: String = url.into(); 169 | let executor = Arc::new(executor); 170 | let operation_retry_options = operation_retry_parameters.unwrap_or_default(); 171 | let manager = ConnectionManager::new( 172 | url, 173 | auth, 174 | connection_retry_parameters, 175 | operation_retry_options.clone(), 176 | tls_options, 177 | executor.clone(), 178 | ) 179 | .await?; 180 | let manager = Arc::new(manager); 181 | 182 | // set up a regular connection check 183 | let weak_manager = Arc::downgrade(&manager); 184 | let mut interval = executor.interval(std::time::Duration::from_secs(60)); 185 | let res = executor.spawn(Box::pin(async move { 186 | while let Some(()) = interval.next().await { 187 | if let Some(strong_manager) = weak_manager.upgrade() { 188 | strong_manager.check_connections().await; 189 | } else { 190 | // if all the strong references to the manager were dropped, 191 | // we can stop the task 192 | break; 193 | } 194 | } 195 | })); 196 | if res.is_err() { 197 | error!("the executor could not spawn the check connection task"); 198 | return Err(crate::error::ConnectionError::Shutdown.into()); 199 | } 200 | 201 | let service_discovery = Arc::new(ServiceDiscovery::with_manager(manager.clone())); 202 | let (producer, producer_rx) = mpsc::unbounded(); 203 | 204 | let mut client = Pulsar { 205 | manager, 206 | service_discovery, 207 | producer: None, 208 | operation_retry_options, 209 | executor, 210 | }; 211 | 212 | let _ = client 213 | .executor 214 | .spawn(Box::pin(run_producer(client.clone(), producer_rx))); 215 | client.producer = Some(producer); 216 | Ok(client) 217 | } 218 | 219 | /// creates a new client builder 220 | /// 221 | /// ```rust,no_run 222 | /// use pulsar::{Pulsar, TokioExecutor}; 223 | /// 224 | /// # async fn run() -> Result<(), pulsar::Error> { 225 | /// let addr = "pulsar://127.0.0.1:6650"; 226 | /// // you can indicate which executor you use as the return type of client creation 227 | /// let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor) 228 | /// .build() 229 | /// .await?; 230 | /// # Ok(()) 231 | /// # } 232 | /// ``` 233 | pub fn builder>(url: S, executor: Exe) -> PulsarBuilder { 234 | PulsarBuilder { 235 | url: url.into(), 236 | auth_provider: None, 237 | connection_retry_options: None, 238 | operation_retry_options: None, 239 | tls_options: None, 240 | executor, 241 | } 242 | } 243 | 244 | /// creates a consumer builder 245 | /// 246 | /// ```rust,no_run 247 | /// use pulsar::{SubType, Consumer}; 248 | /// 249 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 250 | /// # type TestData = String; 251 | /// let mut consumer: Consumer = pulsar 252 | /// .consumer() 253 | /// .with_topic("non-persistent://public/default/test") 254 | /// .with_consumer_name("test_consumer") 255 | /// .with_subscription_type(SubType::Exclusive) 256 | /// .with_subscription("test_subscription") 257 | /// .build() 258 | /// .await?; 259 | /// # Ok(()) 260 | /// # } 261 | /// ``` 262 | pub fn consumer(&self) -> ConsumerBuilder { 263 | ConsumerBuilder::new(self) 264 | } 265 | 266 | /// creates a producer builder 267 | /// 268 | /// ```rust,no_run 269 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 270 | /// let mut producer = pulsar 271 | /// .producer() 272 | /// .with_topic("non-persistent://public/default/test") 273 | /// .with_name("my producer") 274 | /// .build() 275 | /// .await?; 276 | /// # Ok(()) 277 | /// # } 278 | /// ``` 279 | pub fn producer(&self) -> ProducerBuilder { 280 | ProducerBuilder::new(self) 281 | } 282 | 283 | /// creates a reader builder 284 | /// ```rust, no_run 285 | /// use pulsar::reader::Reader; 286 | /// 287 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 288 | /// # type TestData = String; 289 | /// let mut reader: Reader = pulsar 290 | /// .reader() 291 | /// .with_topic("non-persistent://public/default/test") 292 | /// .with_consumer_name("my_reader") 293 | /// .into_reader() 294 | /// .await?; 295 | /// # Ok(()) 296 | /// # } 297 | /// ``` 298 | pub fn reader(&self) -> ConsumerBuilder { 299 | // this makes it exactly the same like the consumer() method though 300 | ConsumerBuilder::new(self).with_options( 301 | ConsumerOptions::default() 302 | .durable(false) 303 | .with_initial_position(InitialPosition::Latest), 304 | ) 305 | } 306 | 307 | /// gets the address of a broker handling the topic 308 | /// 309 | /// ```rust,no_run 310 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 311 | /// let broker_address = pulsar.lookup_topic("persistent://public/default/test").await?; 312 | /// # Ok(()) 313 | /// # } 314 | /// ``` 315 | pub async fn lookup_topic>(&self, topic: S) -> Result { 316 | self.service_discovery 317 | .lookup_topic(topic) 318 | .await 319 | .map_err(|e| e.into()) 320 | } 321 | 322 | /// gets the number of partitions for a partitioned topic 323 | /// 324 | /// ```rust,no_run 325 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 326 | /// let nb = pulsar.lookup_partitioned_topic_number("persistent://public/default/test").await?; 327 | /// # Ok(()) 328 | /// # } 329 | /// ``` 330 | pub async fn lookup_partitioned_topic_number>( 331 | &self, 332 | topic: S, 333 | ) -> Result { 334 | self.service_discovery 335 | .lookup_partitioned_topic_number(topic) 336 | .await 337 | .map_err(|e| e.into()) 338 | } 339 | 340 | /// gets the address of brokers handling the topic's partitions. If the topic is not 341 | /// a partitioned topic, result will be a single element containing the topic and address 342 | /// of the non-partitioned topic provided. 343 | /// 344 | /// ```rust,no_run 345 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 346 | /// let broker_addresses = pulsar.lookup_partitioned_topic("persistent://public/default/test").await?; 347 | /// # Ok(()) 348 | /// # } 349 | /// ``` 350 | pub async fn lookup_partitioned_topic>( 351 | &self, 352 | topic: S, 353 | ) -> Result, Error> { 354 | self.service_discovery 355 | .lookup_partitioned_topic(topic) 356 | .await 357 | .map_err(|e| e.into()) 358 | } 359 | 360 | /// gets the list of topics from a namespace 361 | /// 362 | /// ```rust,no_run 363 | /// use pulsar::message::proto::command_get_topics_of_namespace::Mode; 364 | /// 365 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 366 | /// let topics = pulsar.get_topics_of_namespace("public/default".to_string(), Mode::Persistent).await?; 367 | /// # Ok(()) 368 | /// # } 369 | /// ``` 370 | pub async fn get_topics_of_namespace( 371 | &self, 372 | namespace: String, 373 | mode: proto::command_get_topics_of_namespace::Mode, 374 | ) -> Result, Error> { 375 | let conn = self.manager.get_base_connection().await?; 376 | let topics = conn 377 | .sender() 378 | .get_topics_of_namespace(namespace, mode) 379 | .await?; 380 | Ok(topics.topics) 381 | } 382 | 383 | /// Sends a message on a topic. 384 | /// 385 | /// This function will lazily initialize and re-use producers as needed. For better 386 | /// control over producers, creating and using a `Producer` is recommended. 387 | /// 388 | /// ```rust,no_run 389 | /// use pulsar::message::proto::command_get_topics_of_namespace::Mode; 390 | /// 391 | /// # async fn run(pulsar: pulsar::Pulsar) -> Result<(), pulsar::Error> { 392 | /// let topics = pulsar.send("persistent://public/default/test", "hello world!").await?; 393 | /// # Ok(()) 394 | /// # } 395 | /// ``` 396 | pub async fn send, M: SerializeMessage + Sized>( 397 | &self, 398 | topic: S, 399 | message: M, 400 | ) -> Result { 401 | let message = M::serialize_message(message)?; 402 | self.send_raw(message, topic).await 403 | } 404 | 405 | async fn send_raw>( 406 | &self, 407 | message: producer::Message, 408 | topic: S, 409 | ) -> Result { 410 | let (resolver, future) = oneshot::channel(); 411 | self.producer 412 | .as_ref() 413 | .expect("a client without the producer channel should only be used internally") 414 | .unbounded_send(SendMessage { 415 | topic: topic.into(), 416 | message, 417 | resolver, 418 | }) 419 | .map_err(|_| Error::Custom("producer unexpectedly disconnected".into()))?; 420 | Ok(SendFuture(future)) 421 | } 422 | } 423 | 424 | /// Helper structure to generate a [Pulsar] client 425 | pub struct PulsarBuilder { 426 | url: String, 427 | auth_provider: Option>, 428 | connection_retry_options: Option, 429 | operation_retry_options: Option, 430 | tls_options: Option, 431 | executor: Exe, 432 | } 433 | 434 | impl PulsarBuilder { 435 | /// Authentication parameters (JWT, Biscuit, etc) 436 | pub fn with_auth(self, auth: Authentication) -> Self { 437 | self.with_auth_provider(Box::new(auth)) 438 | } 439 | 440 | pub fn with_auth_provider(mut self, auth: Box) -> Self { 441 | self.auth_provider = Some(auth); 442 | self 443 | } 444 | 445 | /// Exponential back off parameters for automatic reconnection 446 | pub fn with_connection_retry_options( 447 | mut self, 448 | connection_retry_options: ConnectionRetryOptions, 449 | ) -> Self { 450 | self.connection_retry_options = Some(connection_retry_options); 451 | self 452 | } 453 | 454 | /// Retry parameters for Pulsar operations 455 | pub fn with_operation_retry_options( 456 | mut self, 457 | operation_retry_options: OperationRetryOptions, 458 | ) -> Self { 459 | self.operation_retry_options = Some(operation_retry_options); 460 | self 461 | } 462 | 463 | /// add a custom certificate chain to authenticate the server in TLS connections 464 | pub fn with_certificate_chain(mut self, certificate_chain: Vec) -> Self { 465 | match &mut self.tls_options { 466 | Some(tls) => tls.certificate_chain = Some(certificate_chain), 467 | None => { 468 | self.tls_options = Some(TlsOptions { 469 | certificate_chain: Some(certificate_chain), 470 | ..Default::default() 471 | }) 472 | } 473 | } 474 | self 475 | } 476 | 477 | pub fn with_allow_insecure_connection(mut self, allow: bool) -> Self { 478 | match &mut self.tls_options { 479 | Some(tls) => tls.allow_insecure_connection = allow, 480 | None => { 481 | self.tls_options = Some(TlsOptions { 482 | allow_insecure_connection: allow, 483 | ..Default::default() 484 | }) 485 | } 486 | } 487 | self 488 | } 489 | 490 | pub fn with_tls_hostname_verification_enabled(mut self, enabled: bool) -> Self { 491 | match &mut self.tls_options { 492 | Some(tls) => tls.tls_hostname_verification_enabled = enabled, 493 | None => { 494 | self.tls_options = Some(TlsOptions { 495 | tls_hostname_verification_enabled: enabled, 496 | ..Default::default() 497 | }) 498 | } 499 | } 500 | self 501 | } 502 | 503 | /// add a custom certificate chain from a file to authenticate the server in TLS connections 504 | pub fn with_certificate_chain_file>( 505 | self, 506 | path: P, 507 | ) -> Result { 508 | use std::io::Read; 509 | 510 | let mut file = std::fs::File::open(path)?; 511 | let mut v = vec![]; 512 | file.read_to_end(&mut v)?; 513 | 514 | Ok(self.with_certificate_chain(v)) 515 | } 516 | 517 | /// creates the Pulsar client and connects it 518 | pub async fn build(self) -> Result, Error> { 519 | let PulsarBuilder { 520 | url, 521 | auth_provider, 522 | connection_retry_options, 523 | operation_retry_options, 524 | tls_options, 525 | executor, 526 | } = self; 527 | 528 | Pulsar::new( 529 | url, 530 | auth_provider.map(|p| Arc::new(Mutex::new(p))), 531 | connection_retry_options, 532 | operation_retry_options, 533 | tls_options, 534 | executor, 535 | ) 536 | .await 537 | } 538 | } 539 | 540 | struct SendMessage { 541 | topic: String, 542 | message: producer::Message, 543 | resolver: oneshot::Sender>, 544 | } 545 | 546 | async fn run_producer( 547 | client: Pulsar, 548 | mut messages: mpsc::UnboundedReceiver, 549 | ) { 550 | let mut producer = client.producer().build_multi_topic(); 551 | while let Some(SendMessage { 552 | topic, 553 | message: payload, 554 | resolver, 555 | }) = messages.next().await 556 | { 557 | match producer.send(topic, payload).await { 558 | Ok(future) => { 559 | let _ = client.executor.spawn(Box::pin(async move { 560 | let _ = resolver.send(future.await); 561 | })); 562 | } 563 | Err(e) => { 564 | let _ = resolver.send(Err(e)); 565 | } 566 | } 567 | } 568 | } 569 | -------------------------------------------------------------------------------- /src/connection_manager.rs: -------------------------------------------------------------------------------- 1 | use crate::connection::{Connection}; 2 | use crate::error::ConnectionError; 3 | use crate::executor::Executor; 4 | use std::collections::HashMap; 5 | use std::sync::Arc; 6 | use std::time::Duration; 7 | 8 | use futures::{channel::oneshot, lock::Mutex}; 9 | use native_tls::Certificate; 10 | use rand::Rng; 11 | use url::Url; 12 | 13 | /// holds connection information for a broker 14 | #[derive(Debug, Clone, Hash, PartialEq, Eq)] 15 | pub struct BrokerAddress { 16 | /// URL we're using for connection (can be the proxy's URL) 17 | pub url: Url, 18 | /// pulsar URL for the broker we're actually contacting 19 | /// this must follow the IP:port format 20 | pub broker_url: String, 21 | /// true if we're connecting through a proxy 22 | pub proxy: bool, 23 | } 24 | 25 | /// configuration for reconnection exponential back off 26 | #[derive(Debug, Clone)] 27 | pub struct ConnectionRetryOptions { 28 | /// minimum delay between connection retries 29 | pub min_backoff: Duration, 30 | /// maximum delay between rconnection etries 31 | pub max_backoff: Duration, 32 | /// maximum number of connection retries 33 | pub max_retries: u32, 34 | /// time limit to establish a connection 35 | pub connection_timeout: Duration, 36 | /// keep-alive interval for each broker connection 37 | pub keep_alive: Duration, 38 | } 39 | 40 | impl std::default::Default for ConnectionRetryOptions { 41 | fn default() -> Self { 42 | ConnectionRetryOptions { 43 | min_backoff: Duration::from_millis(10), 44 | max_backoff: Duration::from_secs(30), 45 | max_retries: 12u32, 46 | connection_timeout: Duration::from_secs(10), 47 | keep_alive: Duration::from_secs(60), 48 | } 49 | } 50 | } 51 | 52 | /// configuration for Pulsar operation retries 53 | #[derive(Debug, Clone)] 54 | pub struct OperationRetryOptions { 55 | /// time limit to receive an answer to a Pulsar operation 56 | pub operation_timeout: Duration, 57 | /// delay between operation retries after a ServiceNotReady error 58 | pub retry_delay: Duration, 59 | /// maximum number of operation retries. None indicates infinite retries 60 | pub max_retries: Option, 61 | } 62 | 63 | impl std::default::Default for OperationRetryOptions { 64 | fn default() -> Self { 65 | OperationRetryOptions { 66 | operation_timeout: Duration::from_secs(30), 67 | retry_delay: Duration::from_millis(500), 68 | max_retries: None, 69 | } 70 | } 71 | } 72 | 73 | /// configuration for TLS connections 74 | #[derive(Debug, Clone)] 75 | pub struct TlsOptions { 76 | /// contains a list of PEM encoded certificates 77 | pub certificate_chain: Option>, 78 | 79 | /// allow insecure TLS connection if set to true 80 | /// 81 | /// defaults to *false* 82 | pub allow_insecure_connection: bool, 83 | 84 | /// whether hostname verification is enabled when insecure TLS connection is allowed 85 | /// 86 | /// defaults to *true* 87 | pub tls_hostname_verification_enabled: bool, 88 | } 89 | 90 | impl Default for TlsOptions { 91 | fn default() -> Self { 92 | Self { 93 | certificate_chain: None, 94 | allow_insecure_connection: false, 95 | tls_hostname_verification_enabled: true, 96 | } 97 | } 98 | } 99 | 100 | enum ConnectionStatus { 101 | Connected(Arc>), 102 | Connecting(Vec>, ConnectionError>>>), 103 | } 104 | 105 | /// Look up broker addresses for topics and partitioned topics 106 | /// 107 | /// The ConnectionManager object provides a single interface to start 108 | /// interacting with a cluster. It will automatically follow redirects 109 | /// or use a proxy, and aggregate broker connections 110 | #[derive(Clone)] 111 | pub struct ConnectionManager { 112 | pub url: Url, 113 | auth: Option>>>, 114 | pub(crate) executor: Arc, 115 | connections: Arc>>>, 116 | connection_retry_options: ConnectionRetryOptions, 117 | pub(crate) operation_retry_options: OperationRetryOptions, 118 | tls_options: TlsOptions, 119 | certificate_chain: Vec, 120 | } 121 | 122 | impl ConnectionManager { 123 | pub async fn new( 124 | url: String, 125 | auth: Option>>>, 126 | connection_retry: Option, 127 | operation_retry_options: OperationRetryOptions, 128 | tls: Option, 129 | executor: Arc, 130 | ) -> Result { 131 | let connection_retry_options = connection_retry.unwrap_or_default(); 132 | let tls_options = tls.unwrap_or_default(); 133 | let url = Url::parse(&url) 134 | .map_err(|e| { 135 | error!("error parsing URL: {:?}", e); 136 | ConnectionError::NotFound 137 | }) 138 | .and_then(|url| { 139 | url.host_str().ok_or_else(|| { 140 | error!("missing host for URL: {:?}", url); 141 | ConnectionError::NotFound 142 | })?; 143 | Ok(url) 144 | })?; 145 | 146 | let certificate_chain = match tls_options.certificate_chain.as_ref() { 147 | None => vec![], 148 | Some(certificate_chain) => { 149 | let mut v = vec![]; 150 | for cert in pem::parse_many(&certificate_chain).iter().rev() { 151 | v.push( 152 | Certificate::from_der(&cert.contents[..]) 153 | .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?, 154 | ); 155 | } 156 | v 157 | } 158 | }; 159 | 160 | if let Some(auth) = auth.clone() { 161 | auth.lock().await.initialize().await?; 162 | } 163 | 164 | let manager = ConnectionManager { 165 | url: url.clone(), 166 | auth, 167 | executor, 168 | connections: Arc::new(Mutex::new(HashMap::new())), 169 | connection_retry_options, 170 | operation_retry_options, 171 | tls_options, 172 | certificate_chain, 173 | }; 174 | let broker_address = BrokerAddress { 175 | url: url.clone(), 176 | broker_url: format!("{}:{}", url.host_str().unwrap(), url.port().unwrap_or(6650)), 177 | proxy: false, 178 | }; 179 | manager.connect(broker_address).await?; 180 | Ok(manager) 181 | } 182 | 183 | pub fn get_base_address(&self) -> BrokerAddress { 184 | BrokerAddress { 185 | url: self.url.clone(), 186 | broker_url: format!( 187 | "{}:{}", 188 | self.url.host_str().unwrap(), 189 | self.url.port().unwrap_or(6650) 190 | ), 191 | proxy: false, 192 | } 193 | } 194 | 195 | /// get an active Connection from a broker address 196 | /// 197 | /// creates a connection if not available 198 | pub async fn get_base_connection(&self) -> Result>, ConnectionError> { 199 | let broker_address = BrokerAddress { 200 | url: self.url.clone(), 201 | broker_url: format!( 202 | "{}:{}", 203 | self.url.host_str().unwrap(), 204 | self.url.port().unwrap_or(6650) 205 | ), 206 | proxy: false, 207 | }; 208 | 209 | self.get_connection(&broker_address).await 210 | } 211 | 212 | /// get an active Connection from a broker address 213 | /// 214 | /// creates a connection if not available 215 | pub async fn get_connection( 216 | &self, 217 | broker: &BrokerAddress, 218 | ) -> Result>, ConnectionError> { 219 | let rx = { 220 | let mut conns = self.connections.lock().await; 221 | match conns.get_mut(broker) { 222 | None => None, 223 | Some(ConnectionStatus::Connected(conn)) => { 224 | if conn.is_valid() { 225 | return Ok(conn.clone()); 226 | } else { 227 | None 228 | } 229 | } 230 | Some(ConnectionStatus::Connecting(ref mut v)) => { 231 | let (tx, rx) = oneshot::channel(); 232 | v.push(tx); 233 | Some(rx) 234 | } 235 | } 236 | }; 237 | 238 | match rx { 239 | None => self.connect(broker.clone()).await, 240 | Some(rx) => match rx.await { 241 | Ok(res) => res, 242 | Err(_) => Err(ConnectionError::Canceled), 243 | }, 244 | } 245 | } 246 | 247 | async fn connect_inner( 248 | &self, 249 | broker: &BrokerAddress, 250 | ) -> Result>, ConnectionError> { 251 | debug!("ConnectionManager::connect({:?})", broker); 252 | 253 | let rx = { 254 | match self 255 | .connections 256 | .lock() 257 | .await 258 | .entry(broker.clone()) 259 | .or_insert_with(|| ConnectionStatus::Connecting(Vec::new())) 260 | { 261 | ConnectionStatus::Connecting(ref mut v) => { 262 | if v.is_empty() { 263 | None 264 | } else { 265 | let (tx, rx) = oneshot::channel(); 266 | v.push(tx); 267 | Some(rx) 268 | } 269 | } 270 | ConnectionStatus::Connected(_) => None, 271 | } 272 | }; 273 | if let Some(rx) = rx { 274 | return match rx.await { 275 | Ok(res) => res, 276 | Err(_) => Err(ConnectionError::Canceled), 277 | }; 278 | } 279 | 280 | let proxy_url = if broker.proxy { 281 | Some(broker.broker_url.clone()) 282 | } else { 283 | None 284 | }; 285 | 286 | let mut current_backoff; 287 | let mut current_retries = 0u32; 288 | 289 | let start = std::time::Instant::now(); 290 | let conn = loop { 291 | match Connection::new( 292 | broker.url.clone(), 293 | self.auth.clone(), 294 | proxy_url.clone(), 295 | &self.certificate_chain, 296 | self.tls_options.allow_insecure_connection, 297 | self.tls_options.tls_hostname_verification_enabled, 298 | self.connection_retry_options.connection_timeout, 299 | self.operation_retry_options.operation_timeout, 300 | self.executor.clone(), 301 | ) 302 | .await 303 | { 304 | Ok(c) => break c, 305 | Err(ConnectionError::Io(e)) => { 306 | if e.kind() != std::io::ErrorKind::ConnectionRefused 307 | || e.kind() != std::io::ErrorKind::TimedOut 308 | { 309 | return Err(ConnectionError::Io(e)); 310 | } 311 | 312 | if current_retries == self.connection_retry_options.max_retries { 313 | return Err(ConnectionError::Io(e)); 314 | } 315 | 316 | let jitter = rand::thread_rng().gen_range(0..10); 317 | current_backoff = std::cmp::min( 318 | self.connection_retry_options.min_backoff 319 | * 2u32.saturating_pow(current_retries), 320 | self.connection_retry_options.max_backoff, 321 | ) + self.connection_retry_options.min_backoff * jitter; 322 | current_retries += 1; 323 | 324 | trace!( 325 | "current retries: {}, current_backoff(pow = {}): {}ms", 326 | current_retries, 327 | 2u32.pow(current_retries - 1), 328 | current_backoff.as_millis() 329 | ); 330 | error!( 331 | "connection error, retrying connection to {} after {}ms", 332 | broker.url, 333 | current_backoff.as_millis() 334 | ); 335 | self.executor.delay(current_backoff).await; 336 | } 337 | Err(e) => return Err(e), 338 | } 339 | }; 340 | let connection_id = conn.id(); 341 | if let Some(url) = proxy_url.as_ref() { 342 | info!( 343 | "Connected n°{} to {} via proxy {} in {}ms", 344 | connection_id, 345 | url, 346 | broker.url, 347 | (std::time::Instant::now() - start).as_millis() 348 | ); 349 | } else { 350 | info!( 351 | "Connected n°{} to {} in {}ms", 352 | connection_id, 353 | broker.url, 354 | (std::time::Instant::now() - start).as_millis() 355 | ); 356 | } 357 | let c = Arc::new(conn); 358 | 359 | Ok(c) 360 | } 361 | 362 | async fn connect( 363 | &self, 364 | broker: BrokerAddress, 365 | ) -> Result>, ConnectionError> { 366 | let c = match self.connect_inner(&broker).await { 367 | Err(e) => { 368 | // the current ConnectionStatus is Connecting, containing 369 | // notification channels for all the tasks waiting for the 370 | // reconnection. If we delete this status, they will be 371 | // notified that reconnection is canceled instead of getting 372 | // stuck 373 | if let Some(ConnectionStatus::Connecting(mut v)) = 374 | self.connections.lock().await.remove(&broker) 375 | { 376 | for tx in v.drain(..) { 377 | // we cannot clone ConnectionError so we tell other 378 | // tasks that reconnection is canceled 379 | let _ = tx.send(Err(ConnectionError::Canceled)); 380 | } 381 | } 382 | 383 | return Err(e); 384 | } 385 | Ok(c) => c, 386 | }; 387 | 388 | let connection_id = c.id(); 389 | let proxy_url = if broker.proxy { 390 | Some(broker.broker_url.clone()) 391 | } else { 392 | None 393 | }; 394 | 395 | // set up client heartbeats for the connection 396 | let weak_conn = Arc::downgrade(&c); 397 | let mut interval = self 398 | .executor 399 | .interval(self.connection_retry_options.keep_alive); 400 | let broker_url = broker.url.clone(); 401 | let proxy_to_broker_url = proxy_url.clone(); 402 | let res = self.executor.spawn(Box::pin(async move { 403 | use crate::futures::StreamExt; 404 | while let Some(()) = interval.next().await { 405 | if let Some(url) = proxy_to_broker_url.as_ref() { 406 | trace!( 407 | "will ping connection {} to {} via proxy {}", 408 | connection_id, 409 | url, 410 | broker_url 411 | ); 412 | } else { 413 | trace!("will ping connection {} to {}", connection_id, broker_url); 414 | } 415 | if let Some(strong_conn) = weak_conn.upgrade() { 416 | if !strong_conn.is_valid() { 417 | trace!("connection {} is not valid anymore, skip heart beat task", 418 | connection_id); 419 | break; 420 | } 421 | if let Err(e) = strong_conn.sender().send_ping().await { 422 | error!( 423 | "could not ping connection {} to the server at {}: {}", 424 | connection_id, broker_url, e 425 | ); 426 | } 427 | } else { 428 | // if the strong pointers were dropped, we can stop the heartbeat for this 429 | // connection 430 | trace!("strong connection was dropped, stopping keepalive task"); 431 | break; 432 | } 433 | } 434 | })); 435 | if res.is_err() { 436 | error!("the executor could not spawn the heartbeat future"); 437 | return Err(ConnectionError::Shutdown); 438 | } 439 | 440 | let old = self 441 | .connections 442 | .lock() 443 | .await 444 | .insert(broker, ConnectionStatus::Connected(c.clone())); 445 | match old { 446 | Some(ConnectionStatus::Connecting(mut v)) => { 447 | //info!("was in connecting state({} waiting)", v.len()); 448 | for tx in v.drain(..) { 449 | let _ = tx.send(Ok(c.clone())); 450 | } 451 | } 452 | Some(ConnectionStatus::Connected(_)) => { 453 | //info!("removing old connection"); 454 | } 455 | None => { 456 | //info!("setting up new connection"); 457 | } 458 | }; 459 | 460 | Ok(c) 461 | } 462 | 463 | /// tests that all connections are valid and still used 464 | pub(crate) async fn check_connections(&self) { 465 | trace!("cleaning invalid or unused connections"); 466 | self.connections 467 | .lock() 468 | .await 469 | .retain(|_, ref mut connection| match connection { 470 | ConnectionStatus::Connecting(_) => true, 471 | ConnectionStatus::Connected(conn) => { 472 | // if the manager holds the only reference to that 473 | // connection, we can remove it from the manager 474 | // no need for special synchronization here: we're already 475 | // in a mutex, and a case appears where the Arc is cloned 476 | // somewhere at the same time, that just means the manager 477 | // will create a new connection the next time it is asked 478 | conn.is_valid() && Arc::strong_count(conn) > 1 479 | } 480 | }); 481 | } 482 | } 483 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! Error types 2 | use std::sync::{ 3 | atomic::{AtomicBool, Ordering}, 4 | Arc, Mutex, 5 | }; 6 | use std::{fmt, io}; 7 | 8 | #[derive(Debug)] 9 | pub enum Error { 10 | Connection(ConnectionError), 11 | Consumer(ConsumerError), 12 | Producer(ProducerError), 13 | ServiceDiscovery(ServiceDiscoveryError), 14 | Authentication(AuthenticationError), 15 | Custom(String), 16 | Executor, 17 | } 18 | 19 | impl From for Error { 20 | fn from(err: ConnectionError) -> Self { 21 | Error::Connection(err) 22 | } 23 | } 24 | 25 | impl From for Error { 26 | fn from(err: ConsumerError) -> Self { 27 | Error::Consumer(err) 28 | } 29 | } 30 | 31 | impl From for Error { 32 | fn from(err: ProducerError) -> Self { 33 | Error::Producer(err) 34 | } 35 | } 36 | 37 | impl From for Error { 38 | fn from(err: ServiceDiscoveryError) -> Self { 39 | Error::ServiceDiscovery(err) 40 | } 41 | } 42 | 43 | impl fmt::Display for Error { 44 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 45 | match self { 46 | Error::Connection(e) => write!(f, "Connection error: {}", e), 47 | Error::Consumer(e) => write!(f, "consumer error: {}", e), 48 | Error::Producer(e) => write!(f, "producer error: {}", e), 49 | Error::ServiceDiscovery(e) => write!(f, "service discovery error: {}", e), 50 | Error::Authentication(e) => write!(f, "authentication error: {}", e), 51 | Error::Custom(e) => write!(f, "error: {}", e), 52 | Error::Executor => write!(f, "could not spawn task"), 53 | } 54 | } 55 | } 56 | 57 | impl std::error::Error for Error { 58 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 59 | match self { 60 | Error::Connection(e) => e.source(), 61 | Error::Consumer(e) => e.source(), 62 | Error::Producer(e) => e.source(), 63 | Error::ServiceDiscovery(e) => e.source(), 64 | Error::Authentication(e) => e.source(), 65 | Error::Custom(_) => None, 66 | Error::Executor => None, 67 | } 68 | } 69 | } 70 | 71 | #[derive(Debug)] 72 | pub enum ConnectionError { 73 | Io(io::Error), 74 | Disconnected, 75 | PulsarError(Option, Option), 76 | Unexpected(String), 77 | Decoding(String), 78 | Encoding(String), 79 | SocketAddr(String), 80 | UnexpectedResponse(String), 81 | Tls(native_tls::Error), 82 | Authentication(AuthenticationError), 83 | NotFound, 84 | Canceled, 85 | Shutdown, 86 | } 87 | 88 | impl From for ConnectionError { 89 | fn from(err: io::Error) -> Self { 90 | ConnectionError::Io(err) 91 | } 92 | } 93 | 94 | impl From for ConnectionError { 95 | fn from(err: native_tls::Error) -> Self { 96 | ConnectionError::Tls(err) 97 | } 98 | } 99 | 100 | impl From for ConnectionError { 101 | fn from(err: AuthenticationError) -> Self { 102 | ConnectionError::Authentication(err) 103 | } 104 | } 105 | 106 | impl fmt::Display for ConnectionError { 107 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 108 | match self { 109 | ConnectionError::Io(e) => write!(f, "{}", e), 110 | ConnectionError::Disconnected => write!(f, "Disconnected"), 111 | ConnectionError::PulsarError(e, s) => { 112 | write!(f, "Server error ({:?}): {}", e, s.as_deref().unwrap_or("")) 113 | } 114 | ConnectionError::Unexpected(e) => write!(f, "{}", e), 115 | ConnectionError::Decoding(e) => write!(f, "Error decoding message: {}", e), 116 | ConnectionError::Encoding(e) => write!(f, "Error encoding message: {}", e), 117 | ConnectionError::SocketAddr(e) => write!(f, "Error obtaining socket address: {}", e), 118 | ConnectionError::Tls(e) => write!(f, "Error connecting TLS stream: {}", e), 119 | ConnectionError::Authentication(e) => write!(f, "Error authentication: {}", e), 120 | ConnectionError::UnexpectedResponse(e) => { 121 | write!(f, "Unexpected response from pulsar: {}", e) 122 | } 123 | ConnectionError::NotFound => write!(f, "error looking up URL"), 124 | ConnectionError::Canceled => write!(f, "canceled request"), 125 | ConnectionError::Shutdown => write!(f, "The connection was shut down"), 126 | } 127 | } 128 | } 129 | 130 | impl std::error::Error for ConnectionError { 131 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 132 | match self { 133 | ConnectionError::Io(e) => Some(e), 134 | _ => None, 135 | } 136 | } 137 | } 138 | 139 | #[derive(Debug)] 140 | pub enum ConsumerError { 141 | Connection(ConnectionError), 142 | MissingPayload(String), 143 | Io(io::Error), 144 | ChannelFull, 145 | Closed, 146 | BuildError, 147 | } 148 | 149 | impl From for ConsumerError { 150 | fn from(err: ConnectionError) -> Self { 151 | ConsumerError::Connection(err) 152 | } 153 | } 154 | 155 | impl From for ConsumerError { 156 | fn from(err: io::Error) -> Self { 157 | ConsumerError::Io(err) 158 | } 159 | } 160 | 161 | impl From for ConsumerError { 162 | fn from(err: futures::channel::mpsc::SendError) -> Self { 163 | if err.is_full() { 164 | ConsumerError::ChannelFull 165 | } else { 166 | ConsumerError::Closed 167 | } 168 | } 169 | } 170 | 171 | impl fmt::Display for ConsumerError { 172 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 173 | match self { 174 | ConsumerError::Connection(e) => write!(f, "Connection error: {}", e), 175 | ConsumerError::MissingPayload(s) => write!(f, "Missing payload: {}", s), 176 | ConsumerError::Io(s) => write!(f, "Decompression error: {}", s), 177 | ConsumerError::ChannelFull => write!( 178 | f, 179 | "cannot send message to the consumer engine: the channel is full" 180 | ), 181 | ConsumerError::Closed => write!( 182 | f, 183 | "cannot send message to the consumer engine: the channel is closed" 184 | ), 185 | ConsumerError::BuildError => write!(f, "Error while building the consumer."), 186 | } 187 | } 188 | } 189 | 190 | impl std::error::Error for ConsumerError { 191 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 192 | match self { 193 | ConsumerError::Connection(e) => Some(e), 194 | _ => None, 195 | } 196 | } 197 | } 198 | 199 | pub enum ProducerError { 200 | Connection(ConnectionError), 201 | Custom(String), 202 | Io(io::Error), 203 | PartialSend(Vec>), 204 | /// Indiciates the error was part of sending a batch, and thus shared across the batch 205 | Batch(Arc), 206 | } 207 | 208 | impl From for ProducerError { 209 | fn from(err: ConnectionError) -> Self { 210 | ProducerError::Connection(err) 211 | } 212 | } 213 | 214 | impl From for ProducerError { 215 | fn from(err: io::Error) -> Self { 216 | ProducerError::Io(err) 217 | } 218 | } 219 | 220 | impl fmt::Display for ProducerError { 221 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 222 | match self { 223 | ProducerError::Connection(e) => write!(f, "Connection error: {}", e), 224 | ProducerError::Io(e) => write!(f, "Compression error: {}", e), 225 | ProducerError::Custom(s) => write!(f, "Custom error: {}", s), 226 | ProducerError::Batch(e) => write!(f, "Batch error: {}", e), 227 | ProducerError::PartialSend(e) => { 228 | let (successes, failures) = e.iter().fold((0, 0), |(s, f), r| match r { 229 | Ok(_) => (s + 1, f), 230 | Err(_) => (s, f + 1), 231 | }); 232 | write!( 233 | f, 234 | "Partial send error - {} successful, {} failed", 235 | successes, failures 236 | )?; 237 | 238 | if failures > 0 { 239 | let first_error = e 240 | .iter() 241 | .find(|r| r.is_err()) 242 | .unwrap() 243 | .as_ref() 244 | .map(drop) 245 | .unwrap_err(); 246 | write!(f, "first error: {}", first_error)?; 247 | } 248 | Ok(()) 249 | } 250 | } 251 | } 252 | } 253 | 254 | impl fmt::Debug for ProducerError { 255 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 256 | match self { 257 | ProducerError::Connection(e) => write!(f, "Connection({:?})", e), 258 | ProducerError::Custom(msg) => write!(f, "Custom({:?})", msg), 259 | ProducerError::Io(e) => write!(f, "Connection({:?})", e), 260 | ProducerError::Batch(e) => write!(f, "Connection({:?})", e), 261 | ProducerError::PartialSend(parts) => { 262 | write!(f, "PartialSend(")?; 263 | for (i, part) in parts.iter().enumerate() { 264 | match part { 265 | Ok(_) => write!(f, "Ok(SendFuture)")?, 266 | Err(e) => write!(f, "Err({:?})", e)?, 267 | } 268 | if i < (parts.len() - 1) { 269 | write!(f, ", ")?; 270 | } 271 | } 272 | write!(f, ")") 273 | } 274 | } 275 | } 276 | } 277 | 278 | impl std::error::Error for ProducerError { 279 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 280 | match self { 281 | ProducerError::Connection(e) => Some(e), 282 | ProducerError::Io(e) => Some(e), 283 | ProducerError::Batch(e) => Some(e.as_ref()), 284 | ProducerError::PartialSend(parts) => parts 285 | .iter() 286 | .find(|r| r.is_err()) 287 | .map(|r| r.as_ref().map(drop).unwrap_err() as _), 288 | ProducerError::Custom(_) => None, 289 | } 290 | } 291 | } 292 | 293 | #[derive(Debug)] 294 | pub enum ServiceDiscoveryError { 295 | Connection(ConnectionError), 296 | Query(Option, Option), 297 | NotFound, 298 | DnsLookupError, 299 | Canceled, 300 | Shutdown, 301 | Dummy, 302 | } 303 | 304 | impl From for ServiceDiscoveryError { 305 | fn from(err: ConnectionError) -> Self { 306 | ServiceDiscoveryError::Connection(err) 307 | } 308 | } 309 | 310 | impl fmt::Display for ServiceDiscoveryError { 311 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 312 | match self { 313 | ServiceDiscoveryError::Connection(e) => write!(f, "Connection error: {}", e), 314 | ServiceDiscoveryError::Query(e, s) => { 315 | write!(f, "Query error ({:?}): {}", e, s.as_deref().unwrap_or("")) 316 | } 317 | ServiceDiscoveryError::NotFound => write!(f, "cannot find topic"), 318 | ServiceDiscoveryError::DnsLookupError => write!(f, "cannot lookup broker address"), 319 | ServiceDiscoveryError::Canceled => write!(f, "canceled request"), 320 | ServiceDiscoveryError::Shutdown => write!(f, "service discovery engine not responding"), 321 | ServiceDiscoveryError::Dummy => write!(f, "placeholder error"), 322 | } 323 | } 324 | } 325 | 326 | impl std::error::Error for ServiceDiscoveryError { 327 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 328 | match self { 329 | ServiceDiscoveryError::Connection(e) => Some(e), 330 | _ => None, 331 | } 332 | } 333 | } 334 | 335 | #[derive(Debug)] 336 | pub enum AuthenticationError { 337 | Custom(String) 338 | } 339 | 340 | impl fmt::Display for AuthenticationError { 341 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 342 | match self { 343 | AuthenticationError::Custom(m) => write!(f, "authentication error [{}]", m) 344 | } 345 | } 346 | } 347 | 348 | impl std::error::Error for AuthenticationError { 349 | 350 | } 351 | 352 | #[derive(Clone)] 353 | pub(crate) struct SharedError { 354 | error_set: Arc, 355 | error: Arc>>, 356 | } 357 | 358 | impl SharedError { 359 | pub fn new() -> SharedError { 360 | SharedError { 361 | error_set: Arc::new(AtomicBool::new(false)), 362 | error: Arc::new(Mutex::new(None)), 363 | } 364 | } 365 | 366 | pub fn is_set(&self) -> bool { 367 | self.error_set.load(Ordering::Relaxed) 368 | } 369 | 370 | pub fn remove(&self) -> Option { 371 | let mut lock = self.error.lock().unwrap(); 372 | let error = lock.take(); 373 | self.error_set.store(false, Ordering::Release); 374 | error 375 | } 376 | 377 | pub fn set(&self, error: ConnectionError) { 378 | let mut lock = self.error.lock().unwrap(); 379 | *lock = Some(error); 380 | self.error_set.store(true, Ordering::Release); 381 | } 382 | } 383 | 384 | use crate::message::proto::ServerError; 385 | use crate::producer::SendFuture; 386 | 387 | pub(crate) fn server_error(i: i32) -> Option { 388 | match i { 389 | 0 => Some(ServerError::UnknownError), 390 | 1 => Some(ServerError::MetadataError), 391 | 2 => Some(ServerError::PersistenceError), 392 | 3 => Some(ServerError::AuthenticationError), 393 | 4 => Some(ServerError::AuthorizationError), 394 | 5 => Some(ServerError::ConsumerBusy), 395 | 6 => Some(ServerError::ServiceNotReady), 396 | 7 => Some(ServerError::ProducerBlockedQuotaExceededError), 397 | 8 => Some(ServerError::ProducerBlockedQuotaExceededException), 398 | 9 => Some(ServerError::ChecksumError), 399 | 10 => Some(ServerError::UnsupportedVersionError), 400 | 11 => Some(ServerError::TopicNotFound), 401 | 12 => Some(ServerError::SubscriptionNotFound), 402 | 13 => Some(ServerError::ConsumerNotFound), 403 | 14 => Some(ServerError::TooManyRequests), 404 | 15 => Some(ServerError::TopicTerminatedError), 405 | 16 => Some(ServerError::ProducerBusy), 406 | 17 => Some(ServerError::InvalidTopicName), 407 | /* FIXME: why aren't they found by the compiler? Max enum size? 408 | 18 => Some(ServerError::IncompatibleSchema), 409 | 19 => Some(ServerError::ConsumerAssignError), 410 | 20 => Some(ServerError::TransactionCoordinatorNotFound), 411 | 21 => Some(ServerError::InvalidTxnStatus), 412 | */ 413 | _ => None, 414 | } 415 | } 416 | -------------------------------------------------------------------------------- /src/executor.rs: -------------------------------------------------------------------------------- 1 | //! executor abstraction 2 | //! 3 | //! this crate is compatible with Tokio and async-std, by assembling them 4 | //! under the [Executor] trait 5 | use futures::{Future, Stream}; 6 | use std::{ops::Deref, pin::Pin, sync::Arc}; 7 | 8 | /// indicates which executor is used 9 | pub enum ExecutorKind { 10 | /// Tokio executor 11 | Tokio, 12 | /// async-std executor 13 | AsyncStd, 14 | } 15 | 16 | /// Wrapper trait abstracting the Tokio and async-std executors 17 | pub trait Executor: Clone + Send + Sync + 'static { 18 | /// spawns a new task 19 | #[allow(clippy::clippy::result_unit_err)] 20 | fn spawn(&self, f: Pin + Send>>) -> Result<(), ()>; 21 | /// spawns a new blocking task 22 | fn spawn_blocking(&self, f: F) -> JoinHandle 23 | where 24 | F: FnOnce() -> Res + Send + 'static, 25 | Res: Send + 'static; 26 | 27 | /// returns a Stream that will produce at regular intervals 28 | fn interval(&self, duration: std::time::Duration) -> Interval; 29 | /// waits for a configurable time 30 | fn delay(&self, duration: std::time::Duration) -> Delay; 31 | 32 | /// returns which executor is currently used 33 | // test at runtime and manually choose the implementation 34 | // because we cannot (yet) have async trait methods, 35 | // so we cannot move the TCP connection here 36 | fn kind(&self) -> ExecutorKind; 37 | } 38 | 39 | /// Wrapper for the Tokio executor 40 | #[cfg(feature = "tokio-runtime")] 41 | #[derive(Clone, Debug)] 42 | pub struct TokioExecutor; 43 | 44 | #[cfg(feature = "tokio-runtime")] 45 | impl Executor for TokioExecutor { 46 | fn spawn(&self, f: Pin + Send>>) -> Result<(), ()> { 47 | tokio::task::spawn(f); 48 | Ok(()) 49 | } 50 | 51 | fn spawn_blocking(&self, f: F) -> JoinHandle 52 | where 53 | F: FnOnce() -> Res + Send + 'static, 54 | Res: Send + 'static, 55 | { 56 | JoinHandle::Tokio(tokio::task::spawn_blocking(f)) 57 | } 58 | 59 | fn interval(&self, duration: std::time::Duration) -> Interval { 60 | Interval::Tokio(tokio::time::interval(duration)) 61 | } 62 | 63 | fn delay(&self, duration: std::time::Duration) -> Delay { 64 | Delay::Tokio(tokio::time::sleep(duration)) 65 | } 66 | 67 | fn kind(&self) -> ExecutorKind { 68 | ExecutorKind::Tokio 69 | } 70 | } 71 | 72 | /// Wrapper for the async-std executor 73 | #[cfg(feature = "async-std-runtime")] 74 | #[derive(Clone, Debug)] 75 | pub struct AsyncStdExecutor; 76 | 77 | #[cfg(feature = "async-std-runtime")] 78 | impl Executor for AsyncStdExecutor { 79 | fn spawn(&self, f: Pin + Send>>) -> Result<(), ()> { 80 | async_std::task::spawn(f); 81 | Ok(()) 82 | } 83 | 84 | fn spawn_blocking(&self, f: F) -> JoinHandle 85 | where 86 | F: FnOnce() -> Res + Send + 'static, 87 | Res: Send + 'static, 88 | { 89 | JoinHandle::AsyncStd(async_std::task::spawn_blocking(f)) 90 | } 91 | 92 | fn interval(&self, duration: std::time::Duration) -> Interval { 93 | Interval::AsyncStd(async_std::stream::interval(duration)) 94 | } 95 | 96 | fn delay(&self, duration: std::time::Duration) -> Delay { 97 | use async_std::prelude::FutureExt; 98 | Delay::AsyncStd(Box::pin(async_std::future::ready(()).delay(duration))) 99 | } 100 | 101 | fn kind(&self) -> ExecutorKind { 102 | ExecutorKind::AsyncStd 103 | } 104 | } 105 | 106 | impl Executor for Arc { 107 | fn spawn(&self, f: Pin + Send>>) -> Result<(), ()> { 108 | self.deref().spawn(f) 109 | } 110 | 111 | fn spawn_blocking(&self, f: F) -> JoinHandle 112 | where 113 | F: FnOnce() -> Res + Send + 'static, 114 | Res: Send + 'static, 115 | { 116 | self.deref().spawn_blocking(f) 117 | } 118 | 119 | fn interval(&self, duration: std::time::Duration) -> Interval { 120 | self.deref().interval(duration) 121 | } 122 | 123 | fn delay(&self, duration: std::time::Duration) -> Delay { 124 | self.deref().delay(duration) 125 | } 126 | 127 | fn kind(&self) -> ExecutorKind { 128 | self.deref().kind() 129 | } 130 | } 131 | 132 | /// future returned by [Executor::spawn_blocking] to await on the task's result 133 | pub enum JoinHandle { 134 | /// wrapper for tokio's `JoinHandle` 135 | #[cfg(feature = "tokio-runtime")] 136 | Tokio(tokio::task::JoinHandle), 137 | /// wrapper for async-std's `JoinHandle` 138 | #[cfg(feature = "async-std-runtime")] 139 | AsyncStd(async_std::task::JoinHandle), 140 | // here to avoid a compilation error since T is not used 141 | #[cfg(all(not(feature = "tokio-runtime"), not(feature = "async-std-runtime")))] 142 | PlaceHolder(T), 143 | } 144 | 145 | use std::task::Poll; 146 | impl Future for JoinHandle { 147 | type Output = Option; 148 | 149 | fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll { 150 | match self.get_mut() { 151 | #[cfg(feature = "tokio-runtime")] 152 | JoinHandle::Tokio(j) => match Pin::new(j).poll(cx) { 153 | Poll::Pending => Poll::Pending, 154 | Poll::Ready(v) => Poll::Ready(v.ok()), 155 | }, 156 | #[cfg(feature = "async-std-runtime")] 157 | JoinHandle::AsyncStd(j) => match Pin::new(j).poll(cx) { 158 | Poll::Pending => Poll::Pending, 159 | Poll::Ready(v) => Poll::Ready(Some(v)), 160 | }, 161 | #[cfg(all(not(feature = "tokio-runtime"), not(feature = "async-std-runtime")))] 162 | JoinHandle::PlaceHolder(t) => { 163 | unimplemented!("please activate one of the following cargo features: tokio-runtime, async-std-runtime") 164 | } 165 | } 166 | } 167 | } 168 | 169 | /// a `Stream` producing a `()` at rgular time intervals 170 | pub enum Interval { 171 | /// wrapper for tokio's interval 172 | #[cfg(feature = "tokio-runtime")] 173 | Tokio(tokio::time::Interval), 174 | /// wrapper for async-std's interval 175 | #[cfg(feature = "async-std-runtime")] 176 | AsyncStd(async_std::stream::Interval), 177 | #[cfg(all(not(feature = "tokio-runtime"), not(feature = "async-std-runtime")))] 178 | PlaceHolder, 179 | } 180 | 181 | impl Stream for Interval { 182 | type Item = (); 183 | 184 | fn poll_next( 185 | self: Pin<&mut Self>, 186 | cx: &mut std::task::Context, 187 | ) -> std::task::Poll> { 188 | unsafe { 189 | match Pin::get_unchecked_mut(self) { 190 | #[cfg(feature = "tokio-runtime")] 191 | Interval::Tokio(j) => match Pin::new_unchecked(j).poll_tick(cx) { 192 | Poll::Pending => Poll::Pending, 193 | Poll::Ready(_) => Poll::Ready(Some(())), 194 | }, 195 | #[cfg(feature = "async-std-runtime")] 196 | Interval::AsyncStd(j) => match Pin::new_unchecked(j).poll_next(cx) { 197 | Poll::Pending => Poll::Pending, 198 | Poll::Ready(v) => Poll::Ready(v), 199 | }, 200 | #[cfg(all(not(feature = "tokio-runtime"), not(feature = "async-std-runtime")))] 201 | Interval::PlaceHolder => { 202 | unimplemented!("please activate one of the following cargo features: tokio-runtime, async-std-runtime") 203 | } 204 | } 205 | } 206 | } 207 | } 208 | 209 | /// a future producing a `()` after some time 210 | pub enum Delay { 211 | /// wrapper around tokio's `Sleep` 212 | #[cfg(feature = "tokio-runtime")] 213 | Tokio(tokio::time::Sleep), 214 | /// wrapper around async-std's `Delay` 215 | #[cfg(feature = "async-std-runtime")] 216 | AsyncStd(Pin + Send>>), 217 | } 218 | 219 | impl Future for Delay { 220 | type Output = (); 221 | 222 | fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context) -> std::task::Poll { 223 | unsafe { 224 | match Pin::get_unchecked_mut(self) { 225 | #[cfg(feature = "tokio-runtime")] 226 | Delay::Tokio(d) => match Pin::new_unchecked(d).poll(cx) { 227 | Poll::Pending => Poll::Pending, 228 | Poll::Ready(_) => Poll::Ready(()), 229 | }, 230 | #[cfg(feature = "async-std-runtime")] 231 | Delay::AsyncStd(j) => match Pin::new_unchecked(j).poll(cx) { 232 | Poll::Pending => Poll::Pending, 233 | Poll::Ready(_) => Poll::Ready(()), 234 | }, 235 | } 236 | } 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Pure Rust async await client for Apache Pulsar 2 | //! 3 | //! This is a pure Rust client for Apache Pulsar that does not depend on the 4 | //! C++ Pulsar library. It provides an async/await based API, compatible with 5 | //! [Tokio](https://tokio.rs/) and [async-std](https://async.rs/). 6 | //! 7 | //! Features: 8 | //! - URL based (`pulsar://` and `pulsar+ssl://`) connections with DNS lookup 9 | //! - multi topic consumers (based on a regex) 10 | //! - TLS connection 11 | //! - configurable executor (Tokio or async-std) 12 | //! - automatic reconnection with exponential back off 13 | //! - message batching 14 | //! - compression with LZ4, zlib, zstd or Snappy (can be deactivated with Cargo features) 15 | //! 16 | //! ## Examples 17 | //! 18 | //! Copy this into your project's Cargo.toml: 19 | //! 20 | //! ```toml 21 | //! [dependencies] 22 | //! env_logger = "0.9" 23 | //! pulsar = "4.1.1" 24 | //! serde = { version = "1.0", features = ["derive"] } 25 | //! serde_json = "1.0" 26 | //! tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } 27 | //! log = "0.4.6" 28 | //! futures = "0.3" 29 | //! ``` 30 | //! 31 | //! ### Producing 32 | //! ```rust,no_run 33 | //! use pulsar::{ 34 | //! message::proto, producer, Error as PulsarError, Pulsar, SerializeMessage, TokioExecutor, 35 | //! }; 36 | //! use serde::{Deserialize, Serialize}; 37 | //! 38 | //! #[derive(Serialize, Deserialize)] 39 | //! struct TestData { 40 | //! data: String, 41 | //! } 42 | //! 43 | //! impl SerializeMessage for TestData { 44 | //! fn serialize_message(input: Self) -> Result { 45 | //! let payload = serde_json::to_vec(&input).map_err(|e| PulsarError::Custom(e.to_string()))?; 46 | //! Ok(producer::Message { 47 | //! payload, 48 | //! ..Default::default() 49 | //! }) 50 | //! } 51 | //! } 52 | //! 53 | //! #[tokio::main] 54 | //! async fn main() -> Result<(), pulsar::Error> { 55 | //! env_logger::init(); 56 | //! 57 | //! let addr = "pulsar://127.0.0.1:6650"; 58 | //! let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 59 | //! let mut producer = pulsar 60 | //! .producer() 61 | //! .with_topic("non-persistent://public/default/test") 62 | //! .with_name("my producer") 63 | //! .with_options(producer::ProducerOptions { 64 | //! schema: Some(proto::Schema { 65 | //! r#type: proto::schema::Type::String as i32, 66 | //! ..Default::default() 67 | //! }), 68 | //! ..Default::default() 69 | //! }) 70 | //! .build() 71 | //! .await?; 72 | //! 73 | //! let mut counter = 0usize; 74 | //! loop { 75 | //! producer 76 | //! .send(TestData { 77 | //! data: "data".to_string(), 78 | //! }) 79 | //! .await?; 80 | //! 81 | //! counter += 1; 82 | //! println!("{} messages", counter); 83 | //! tokio::time::sleep(std::time::Duration::from_millis(2000)).await; 84 | //! } 85 | //! } 86 | //! ``` 87 | //! 88 | //! ### Consuming 89 | //! ```rust,no_run 90 | //! use futures::TryStreamExt; 91 | //! use pulsar::{ 92 | //! message::proto::command_subscribe::SubType, message::Payload, Consumer, DeserializeMessage, 93 | //! Pulsar, TokioExecutor, 94 | //! }; 95 | //! use serde::{Deserialize, Serialize}; 96 | //! 97 | //! #[derive(Serialize, Deserialize)] 98 | //! struct TestData { 99 | //! data: String, 100 | //! } 101 | //! 102 | //! impl DeserializeMessage for TestData { 103 | //! type Output = Result; 104 | //! 105 | //! fn deserialize_message(payload: &Payload) -> Self::Output { 106 | //! serde_json::from_slice(&payload.data) 107 | //! } 108 | //! } 109 | //! 110 | //! #[tokio::main] 111 | //! async fn main() -> Result<(), pulsar::Error> { 112 | //! env_logger::init(); 113 | //! 114 | //! let addr = "pulsar://127.0.0.1:6650"; 115 | //! let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await?; 116 | //! 117 | //! let mut consumer: Consumer = pulsar 118 | //! .consumer() 119 | //! .with_topic("test") 120 | //! .with_consumer_name("test_consumer") 121 | //! .with_subscription_type(SubType::Exclusive) 122 | //! .with_subscription("test_subscription") 123 | //! .build() 124 | //! .await?; 125 | //! 126 | //! let mut counter = 0usize; 127 | //! while let Some(msg) = consumer.try_next().await? { 128 | //! consumer.ack(&msg).await?; 129 | //! let data = match msg.deserialize() { 130 | //! Ok(data) => data, 131 | //! Err(e) => { 132 | //! log::error!("could not deserialize message: {:?}", e); 133 | //! break; 134 | //! } 135 | //! }; 136 | //! 137 | //! if data.data.as_str() != "data" { 138 | //! log::error!("Unexpected payload: {}", &data.data); 139 | //! break; 140 | //! } 141 | //! counter += 1; 142 | //! log::info!("got {} messages", counter); 143 | //! } 144 | //! 145 | //! Ok(()) 146 | //! } 147 | //! ``` 148 | #![allow(clippy::too_many_arguments)] 149 | #![allow(clippy::large_enum_variant)] 150 | extern crate futures; 151 | #[macro_use] 152 | extern crate log; 153 | extern crate nom; 154 | extern crate prost_derive; 155 | 156 | #[cfg(test)] 157 | #[macro_use] 158 | extern crate serde; 159 | 160 | pub use client::{DeserializeMessage, Pulsar, PulsarBuilder, SerializeMessage}; 161 | pub use connection::Authentication; 162 | pub use connection_manager::{ 163 | BrokerAddress, ConnectionRetryOptions, OperationRetryOptions, TlsOptions, 164 | }; 165 | pub use consumer::{Consumer, ConsumerBuilder, ConsumerOptions}; 166 | pub use error::Error; 167 | #[cfg(feature = "async-std-runtime")] 168 | pub use executor::AsyncStdExecutor; 169 | pub use executor::Executor; 170 | #[cfg(feature = "tokio-runtime")] 171 | pub use executor::TokioExecutor; 172 | pub use message::proto::command_subscribe::SubType; 173 | pub use message::{ 174 | proto::{self, CommandSendReceipt}, 175 | Payload, 176 | }; 177 | pub use producer::{MultiTopicProducer, Producer, ProducerOptions}; 178 | 179 | mod client; 180 | mod connection; 181 | mod connection_manager; 182 | pub mod consumer; 183 | pub mod error; 184 | pub mod executor; 185 | pub mod message; 186 | pub mod producer; 187 | pub mod reader; 188 | pub mod authentication; 189 | mod service_discovery; 190 | 191 | #[cfg(test)] 192 | mod tests { 193 | use futures::{future::try_join_all, StreamExt}; 194 | use log::{LevelFilter, Metadata, Record}; 195 | use std::collections::BTreeSet; 196 | use std::time::{Duration, Instant}; 197 | 198 | #[cfg(feature = "tokio-runtime")] 199 | use tokio::time::timeout; 200 | 201 | #[cfg(feature = "tokio-runtime")] 202 | use crate::executor::TokioExecutor; 203 | 204 | use crate::client::SerializeMessage; 205 | use crate::consumer::{InitialPosition, Message}; 206 | use crate::message::proto::command_subscribe::SubType; 207 | use crate::message::Payload; 208 | use crate::Error as PulsarError; 209 | 210 | use super::*; 211 | 212 | #[derive(Debug, Serialize, Deserialize)] 213 | struct TestData { 214 | pub id: u64, 215 | pub data: String, 216 | } 217 | 218 | impl<'a> SerializeMessage for &'a TestData { 219 | fn serialize_message(input: Self) -> Result { 220 | let payload = 221 | serde_json::to_vec(input).map_err(|e| PulsarError::Custom(e.to_string()))?; 222 | Ok(producer::Message { 223 | payload, 224 | ..Default::default() 225 | }) 226 | } 227 | } 228 | 229 | impl DeserializeMessage for TestData { 230 | type Output = Result; 231 | 232 | fn deserialize_message(payload: &Payload) -> Self::Output { 233 | serde_json::from_slice(&payload.data) 234 | } 235 | } 236 | 237 | #[derive(Debug)] 238 | enum Error { 239 | Pulsar(PulsarError), 240 | Timeout(std::io::Error), 241 | Serde(serde_json::Error), 242 | Utf8(std::string::FromUtf8Error), 243 | } 244 | 245 | impl From for Error { 246 | fn from(e: std::io::Error) -> Self { 247 | Error::Timeout(e) 248 | } 249 | } 250 | 251 | impl From for Error { 252 | fn from(e: PulsarError) -> Self { 253 | Error::Pulsar(e) 254 | } 255 | } 256 | 257 | impl From for Error { 258 | fn from(e: serde_json::Error) -> Self { 259 | Error::Serde(e) 260 | } 261 | } 262 | 263 | impl From for Error { 264 | fn from(err: std::string::FromUtf8Error) -> Self { 265 | Error::Utf8(err) 266 | } 267 | } 268 | 269 | impl std::fmt::Display for Error { 270 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 271 | match self { 272 | Error::Pulsar(e) => write!(f, "{}", e), 273 | Error::Timeout(e) => write!(f, "{}", e), 274 | Error::Serde(e) => write!(f, "{}", e), 275 | Error::Utf8(e) => write!(f, "{}", e), 276 | } 277 | } 278 | } 279 | 280 | pub struct SimpleLogger { 281 | pub tag: &'static str, 282 | } 283 | impl log::Log for SimpleLogger { 284 | fn enabled(&self, _metadata: &Metadata) -> bool { 285 | //metadata.level() <= Level::Info 286 | true 287 | } 288 | 289 | fn log(&self, record: &Record) { 290 | if self.enabled(record.metadata()) { 291 | println!( 292 | "{} {} {}\t{}\t{}", 293 | chrono::Utc::now(), 294 | self.tag, 295 | record.level(), 296 | record.module_path().unwrap(), 297 | record.args() 298 | ); 299 | } 300 | } 301 | fn flush(&self) {} 302 | } 303 | 304 | pub static TEST_LOGGER: SimpleLogger = SimpleLogger { tag: "" }; 305 | 306 | #[tokio::test] 307 | #[cfg(feature = "tokio-runtime")] 308 | async fn round_trip() { 309 | let _ = log::set_logger(&TEST_LOGGER); 310 | let _ = log::set_max_level(LevelFilter::Debug); 311 | 312 | let addr = "pulsar://127.0.0.1:6650"; 313 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await.unwrap(); 314 | 315 | // random topic to better allow multiple test runs while debugging 316 | let topic = format!("test_{}", rand::random::()); 317 | 318 | let mut producer = pulsar.producer().with_topic(&topic).build().await.unwrap(); 319 | info!("producer created"); 320 | 321 | let message_ids: BTreeSet = (0..100).collect(); 322 | 323 | info!("will send message"); 324 | let mut sends = Vec::new(); 325 | for &id in &message_ids { 326 | let message = TestData { 327 | data: "data".to_string(), 328 | id, 329 | }; 330 | sends.push(producer.send(&message).await.unwrap()); 331 | } 332 | try_join_all(sends).await.unwrap(); 333 | 334 | info!("sent"); 335 | 336 | let mut consumer: Consumer = pulsar 337 | .consumer() 338 | .with_topic(&topic) 339 | .with_consumer_name("test_consumer") 340 | .with_subscription_type(SubType::Exclusive) 341 | .with_subscription("test_subscription") 342 | .with_options(ConsumerOptions { 343 | initial_position: InitialPosition::Earliest, 344 | ..Default::default() 345 | }) 346 | .build() 347 | .await 348 | .unwrap(); 349 | 350 | info!("consumer created"); 351 | 352 | let topics = consumer.topics(); 353 | debug!("consumer connected to {:?}", topics); 354 | assert_eq!(topics.len(), 1); 355 | assert!(topics[0].ends_with(&topic)); 356 | 357 | let mut received = BTreeSet::new(); 358 | while let Ok(Some(msg)) = timeout(Duration::from_secs(10), consumer.next()).await { 359 | let msg: Message = msg.unwrap(); 360 | info!("id: {:?}", msg.message_id()); 361 | received.insert(msg.deserialize().unwrap().id); 362 | consumer.ack(&msg).await.unwrap(); 363 | if received.len() == message_ids.len() { 364 | break; 365 | } 366 | } 367 | assert_eq!(received.len(), message_ids.len()); 368 | assert_eq!(received, message_ids); 369 | } 370 | 371 | #[tokio::test] 372 | #[cfg(feature = "tokio-runtime")] 373 | async fn unsized_data() { 374 | let _ = log::set_logger(&TEST_LOGGER); 375 | let _ = log::set_max_level(LevelFilter::Debug); 376 | 377 | let addr = "pulsar://127.0.0.1:6650"; 378 | let test_id: u16 = rand::random(); 379 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await.unwrap(); 380 | 381 | // test &str 382 | { 383 | let topic = format!("test_unsized_data_str_{}", test_id); 384 | let send_data = "some unsized data"; 385 | 386 | pulsar 387 | .send(&topic, send_data.to_string()) 388 | .await 389 | .unwrap() 390 | .await 391 | .unwrap(); 392 | 393 | let mut consumer = pulsar 394 | .consumer() 395 | .with_topic(&topic) 396 | .with_subscription_type(SubType::Exclusive) 397 | .with_subscription("test_subscription") 398 | .with_options(ConsumerOptions { 399 | initial_position: InitialPosition::Earliest, 400 | ..Default::default() 401 | }) 402 | .build::() 403 | .await 404 | .unwrap(); 405 | 406 | let msg = timeout(Duration::from_secs(1), consumer.next()) 407 | .await 408 | .unwrap() 409 | .unwrap() 410 | .unwrap(); 411 | consumer.ack(&msg).await.unwrap(); 412 | 413 | let data = msg.deserialize().unwrap(); 414 | if data.as_str() != send_data { 415 | panic!("Unexpected payload in &str test: {}", &data); 416 | } 417 | } 418 | 419 | // test &[u8] 420 | { 421 | let topic = format!("test_unsized_data_bytes_{}", test_id); 422 | let send_data: &[u8] = &[0, 1, 2, 3]; 423 | 424 | pulsar 425 | .send(&topic, send_data.to_vec()) 426 | .await 427 | .unwrap() 428 | .await 429 | .unwrap(); 430 | 431 | let mut consumer = pulsar 432 | .consumer() 433 | .with_topic(&topic) 434 | .with_subscription_type(SubType::Exclusive) 435 | .with_subscription("test_subscription") 436 | .with_options(ConsumerOptions { 437 | initial_position: InitialPosition::Earliest, 438 | ..Default::default() 439 | }) 440 | .build::>() 441 | .await 442 | .unwrap(); 443 | 444 | let msg: Message> = timeout(Duration::from_secs(1), consumer.next()) 445 | .await 446 | .unwrap() 447 | .unwrap() 448 | .unwrap(); 449 | consumer.ack(&msg).await.unwrap(); 450 | let data = msg.deserialize(); 451 | if data.as_slice() != send_data { 452 | panic!("Unexpected payload in &[u8] test: {:?}", &data); 453 | } 454 | } 455 | } 456 | 457 | #[tokio::test] 458 | #[cfg(feature = "tokio-runtime")] 459 | async fn redelivery() { 460 | let _ = log::set_logger(&TEST_LOGGER); 461 | let _ = log::set_max_level(LevelFilter::Debug); 462 | 463 | let addr = "pulsar://127.0.0.1:6650"; 464 | let topic = format!("test_redelivery_{}", rand::random::()); 465 | 466 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await.unwrap(); 467 | pulsar 468 | .send(&topic, String::from("data")) 469 | .await 470 | .unwrap() 471 | .await 472 | .unwrap(); 473 | 474 | let mut consumer: Consumer = pulsar 475 | .consumer() 476 | .with_topic(topic) 477 | .with_unacked_message_resend_delay(Some(Duration::from_millis(100))) 478 | .with_options(ConsumerOptions { 479 | initial_position: InitialPosition::Earliest, 480 | ..Default::default() 481 | }) 482 | .build() 483 | .await 484 | .unwrap(); 485 | 486 | let _first_receipt = timeout(Duration::from_secs(2), consumer.next()) 487 | .await 488 | .unwrap() 489 | .unwrap() 490 | .unwrap(); 491 | let first_received = Instant::now(); 492 | let second_receipt = timeout(Duration::from_secs(2), consumer.next()) 493 | .await 494 | .unwrap() 495 | .unwrap() 496 | .unwrap(); 497 | let redelivery = first_received.elapsed(); 498 | consumer.ack(&second_receipt).await.unwrap(); 499 | 500 | assert!(redelivery < Duration::from_secs(1)); 501 | } 502 | 503 | #[tokio::test] 504 | #[cfg(feature = "tokio-runtime")] 505 | async fn batching() { 506 | let _ = log::set_logger(&TEST_LOGGER); 507 | let _ = log::set_max_level(LevelFilter::Debug); 508 | 509 | let addr = "pulsar://127.0.0.1:6650"; 510 | let topic = format!("test_batching_{}", rand::random::()); 511 | 512 | let pulsar: Pulsar<_> = Pulsar::builder(addr, TokioExecutor).build().await.unwrap(); 513 | let mut producer = pulsar 514 | .producer() 515 | .with_topic(&topic) 516 | .with_options(ProducerOptions { 517 | batch_size: Some(5), 518 | ..Default::default() 519 | }) 520 | .build() 521 | .await 522 | .unwrap(); 523 | 524 | let mut consumer: Consumer = 525 | pulsar.consumer().with_topic(topic).build().await.unwrap(); 526 | 527 | let mut send_receipts = Vec::new(); 528 | for i in 0..4 { 529 | send_receipts.push(producer.send(i.to_string()).await.unwrap()); 530 | } 531 | assert!(timeout(Duration::from_millis(100), consumer.next()) 532 | .await 533 | .is_err()); 534 | 535 | send_receipts.push(producer.send(5.to_string()).await.unwrap()); 536 | 537 | timeout(Duration::from_millis(100), try_join_all(send_receipts)) 538 | .await 539 | .unwrap() 540 | .unwrap(); 541 | 542 | let mut count = 0; 543 | while let Some(message) = timeout(Duration::from_millis(100), consumer.next()) 544 | .await 545 | .unwrap() 546 | { 547 | let message = message.unwrap(); 548 | count += 1; 549 | let _ = consumer.ack(&message).await; 550 | if count >= 5 { 551 | break; 552 | } 553 | } 554 | 555 | assert_eq!(count, 5); 556 | let mut send_receipts = Vec::new(); 557 | for i in 5..9 { 558 | send_receipts.push(producer.send(i.to_string()).await.unwrap()); 559 | } 560 | producer.send_batch().await.unwrap(); 561 | timeout(Duration::from_millis(100), try_join_all(send_receipts)) 562 | .await 563 | .unwrap() 564 | .unwrap(); 565 | while let Some(message) = timeout(Duration::from_millis(100), consumer.next()) 566 | .await 567 | .unwrap() 568 | { 569 | let message = message.unwrap(); 570 | count += 1; 571 | let _ = consumer.ack(&message).await; 572 | if count >= 9 { 573 | break; 574 | } 575 | } 576 | assert_eq!(count, 9); 577 | } 578 | } 579 | -------------------------------------------------------------------------------- /src/message.rs: -------------------------------------------------------------------------------- 1 | //! low level structures used to send and process raw messages 2 | use crate::connection::RequestKey; 3 | use crate::error::ConnectionError; 4 | use bytes::{Buf, BufMut, BytesMut}; 5 | use nom::{ 6 | bytes::streaming::take, 7 | combinator::{map_res, verify}, 8 | number::streaming::{be_u16, be_u32}, 9 | IResult, 10 | }; 11 | use prost::{self, Message as ImplProtobuf}; 12 | use std::convert::TryFrom; 13 | use std::io::Cursor; 14 | 15 | const CRC_CASTAGNOLI: crc::Crc = crc::Crc::::new(&crc::CRC_32_ISCSI); 16 | 17 | pub use self::proto::BaseCommand; 18 | pub use self::proto::MessageMetadata as Metadata; 19 | 20 | use self::proto::*; 21 | 22 | /// Pulsar binary message 23 | /// 24 | /// this structure holds any command sent to pulsar, like looking up a topic or 25 | /// subscribing on a topic 26 | #[derive(Debug, Clone)] 27 | pub struct Message { 28 | /// Basic pulsar command, as defined in Pulsar's protobuf file 29 | pub command: BaseCommand, 30 | /// payload for topic messages 31 | pub payload: Option, 32 | } 33 | 34 | impl Message { 35 | /// returns the message's RequestKey if present 36 | pub fn request_key(&self) -> Option { 37 | match &self.command { 38 | BaseCommand { 39 | subscribe: Some(CommandSubscribe { request_id, .. }), 40 | .. 41 | } 42 | | BaseCommand { 43 | partition_metadata: Some(CommandPartitionedTopicMetadata { request_id, .. }), 44 | .. 45 | } 46 | | BaseCommand { 47 | partition_metadata_response: 48 | Some(CommandPartitionedTopicMetadataResponse { request_id, .. }), 49 | .. 50 | } 51 | | BaseCommand { 52 | lookup_topic: Some(CommandLookupTopic { request_id, .. }), 53 | .. 54 | } 55 | | BaseCommand { 56 | lookup_topic_response: Some(CommandLookupTopicResponse { request_id, .. }), 57 | .. 58 | } 59 | | BaseCommand { 60 | producer: Some(CommandProducer { request_id, .. }), 61 | .. 62 | } 63 | | BaseCommand { 64 | producer_success: Some(CommandProducerSuccess { request_id, .. }), 65 | .. 66 | } 67 | | BaseCommand { 68 | unsubscribe: Some(CommandUnsubscribe { request_id, .. }), 69 | .. 70 | } 71 | | BaseCommand { 72 | seek: Some(CommandSeek { request_id, .. }), 73 | .. 74 | } 75 | | BaseCommand { 76 | close_producer: Some(CommandCloseProducer { request_id, .. }), 77 | .. 78 | } 79 | | BaseCommand { 80 | success: Some(CommandSuccess { request_id, .. }), 81 | .. 82 | } 83 | | BaseCommand { 84 | error: Some(CommandError { request_id, .. }), 85 | .. 86 | } 87 | | BaseCommand { 88 | consumer_stats: Some(CommandConsumerStats { request_id, .. }), 89 | .. 90 | } 91 | | BaseCommand { 92 | consumer_stats_response: Some(CommandConsumerStatsResponse { request_id, .. }), 93 | .. 94 | } 95 | | BaseCommand { 96 | get_last_message_id: Some(CommandGetLastMessageId { request_id, .. }), 97 | .. 98 | } 99 | | BaseCommand { 100 | get_last_message_id_response: 101 | Some(CommandGetLastMessageIdResponse { request_id, .. }), 102 | .. 103 | } 104 | | BaseCommand { 105 | get_topics_of_namespace: Some(CommandGetTopicsOfNamespace { request_id, .. }), 106 | .. 107 | } 108 | | BaseCommand { 109 | get_topics_of_namespace_response: 110 | Some(CommandGetTopicsOfNamespaceResponse { request_id, .. }), 111 | .. 112 | } 113 | | BaseCommand { 114 | get_schema: Some(CommandGetSchema { request_id, .. }), 115 | .. 116 | } 117 | | BaseCommand { 118 | get_schema_response: Some(CommandGetSchemaResponse { request_id, .. }), 119 | .. 120 | } => Some(RequestKey::RequestId(*request_id)), 121 | BaseCommand { 122 | send: 123 | Some(CommandSend { 124 | producer_id, 125 | sequence_id, 126 | .. 127 | }), 128 | .. 129 | } 130 | | BaseCommand { 131 | send_error: 132 | Some(CommandSendError { 133 | producer_id, 134 | sequence_id, 135 | .. 136 | }), 137 | .. 138 | } 139 | | BaseCommand { 140 | send_receipt: 141 | Some(CommandSendReceipt { 142 | producer_id, 143 | sequence_id, 144 | .. 145 | }), 146 | .. 147 | } => Some(RequestKey::ProducerSend { 148 | producer_id: *producer_id, 149 | sequence_id: *sequence_id, 150 | }), 151 | BaseCommand { 152 | active_consumer_change: Some(CommandActiveConsumerChange { consumer_id, .. }), 153 | .. 154 | } 155 | | BaseCommand { 156 | message: Some(CommandMessage { consumer_id, .. }), 157 | .. 158 | } 159 | | BaseCommand { 160 | flow: Some(CommandFlow { consumer_id, .. }), 161 | .. 162 | } 163 | | BaseCommand { 164 | redeliver_unacknowledged_messages: 165 | Some(CommandRedeliverUnacknowledgedMessages { consumer_id, .. }), 166 | .. 167 | } 168 | | BaseCommand { 169 | reached_end_of_topic: Some(CommandReachedEndOfTopic { consumer_id }), 170 | .. 171 | } 172 | | BaseCommand { 173 | ack: Some(CommandAck { consumer_id, .. }), 174 | .. 175 | } => Some(RequestKey::Consumer { 176 | consumer_id: *consumer_id, 177 | }), 178 | BaseCommand { 179 | close_consumer: 180 | Some(CommandCloseConsumer { 181 | consumer_id, 182 | request_id, 183 | }), 184 | .. 185 | } => Some(RequestKey::CloseConsumer { 186 | consumer_id: *consumer_id, 187 | request_id: *request_id, 188 | }), 189 | BaseCommand { 190 | connect: Some(_), .. 191 | } 192 | | BaseCommand { 193 | connected: Some(_), .. 194 | } 195 | | BaseCommand { ping: Some(_), .. } 196 | | BaseCommand { pong: Some(_), .. } => None, 197 | _ => { 198 | match base_command::Type::try_from(self.command.r#type) { 199 | Ok(type_) => { 200 | warn!( 201 | "Unexpected payload for command of type {:?}. This is likely a bug!", 202 | type_ 203 | ); 204 | } 205 | Err(()) => { 206 | warn!( 207 | "Received BaseCommand of unexpected type: {}", 208 | self.command.r#type 209 | ); 210 | } 211 | } 212 | None 213 | } 214 | } 215 | } 216 | } 217 | 218 | /// tokio and async-std codec for Pulsar messages 219 | pub struct Codec; 220 | 221 | #[cfg(feature = "tokio-runtime")] 222 | impl tokio_util::codec::Encoder for Codec { 223 | type Error = ConnectionError; 224 | 225 | fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), ConnectionError> { 226 | let command_size = item.command.encoded_len(); 227 | let metadata_size = item 228 | .payload 229 | .as_ref() 230 | .map(|p| p.metadata.encoded_len()) 231 | .unwrap_or(0); 232 | let payload_size = item.payload.as_ref().map(|p| p.data.len()).unwrap_or(0); 233 | let header_size = if item.payload.is_some() { 18 } else { 8 }; 234 | // Total size does not include the size of the 'totalSize' field, so we subtract 4 235 | let total_size = command_size + metadata_size + payload_size + header_size - 4; 236 | let mut buf = Vec::with_capacity(total_size + 4); 237 | 238 | // Simple command frame 239 | buf.put_u32(total_size as u32); 240 | buf.put_u32(command_size as u32); 241 | item.command.encode(&mut buf)?; 242 | 243 | // Payload command frame 244 | if let Some(payload) = &item.payload { 245 | buf.put_u16(0x0e01); 246 | 247 | let crc_offset = buf.len(); 248 | buf.put_u32(0); // NOTE: Checksum (CRC32c). Overrwritten later to avoid copying. 249 | 250 | let metdata_offset = buf.len(); 251 | buf.put_u32(metadata_size as u32); 252 | payload.metadata.encode(&mut buf)?; 253 | buf.put(&payload.data[..]); 254 | 255 | let crc = CRC_CASTAGNOLI.checksum(&buf[metdata_offset..]); 256 | let mut crc_buf: &mut [u8] = &mut buf[crc_offset..metdata_offset]; 257 | crc_buf.put_u32(crc); 258 | } 259 | if dst.remaining_mut() < buf.len() { 260 | dst.reserve(buf.len()); 261 | } 262 | dst.put_slice(&buf); 263 | trace!("Encoder sending {} bytes", buf.len()); 264 | // println!("Wrote message {:?}", item); 265 | Ok(()) 266 | } 267 | } 268 | 269 | #[cfg(feature = "tokio-runtime")] 270 | impl tokio_util::codec::Decoder for Codec { 271 | type Item = Message; 272 | type Error = ConnectionError; 273 | 274 | fn decode(&mut self, src: &mut BytesMut) -> Result, ConnectionError> { 275 | trace!("Decoder received {} bytes", src.len()); 276 | if src.len() >= 4 { 277 | let mut buf = Cursor::new(src); 278 | // `messageSize` refers only to _remaining_ message size, so we add 4 to get total frame size 279 | let message_size = buf.get_u32() as usize + 4; 280 | let src = buf.into_inner(); 281 | if src.len() >= message_size { 282 | let msg = { 283 | let (buf, command_frame) = 284 | command_frame(&src[..message_size]).map_err(|err| { 285 | ConnectionError::Decoding(format!( 286 | "Error decoding command frame: {:?}", 287 | err 288 | )) 289 | })?; 290 | let command = BaseCommand::decode(command_frame.command)?; 291 | 292 | let payload = if !buf.is_empty() { 293 | let (buf, payload_frame) = payload_frame(buf).map_err(|err| { 294 | ConnectionError::Decoding(format!( 295 | "Error decoding payload frame: {:?}", 296 | err 297 | )) 298 | })?; 299 | 300 | // TODO: Check crc32 of payload data 301 | 302 | let metadata = Metadata::decode(payload_frame.metadata)?; 303 | Some(Payload { 304 | metadata, 305 | data: buf.to_vec(), 306 | }) 307 | } else { 308 | None 309 | }; 310 | 311 | Message { command, payload } 312 | }; 313 | 314 | //TODO advance as we read, rather than this weird post thing 315 | src.advance(message_size); 316 | // println!("Read message {:?}", &msg); 317 | return Ok(Some(msg)); 318 | } 319 | } 320 | Ok(None) 321 | } 322 | } 323 | 324 | #[cfg(feature = "async-std-runtime")] 325 | impl asynchronous_codec::Encoder for Codec { 326 | type Item = Message; 327 | type Error = ConnectionError; 328 | 329 | fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), ConnectionError> { 330 | let command_size = item.command.encoded_len(); 331 | let metadata_size = item 332 | .payload 333 | .as_ref() 334 | .map(|p| p.metadata.encoded_len()) 335 | .unwrap_or(0); 336 | let payload_size = item.payload.as_ref().map(|p| p.data.len()).unwrap_or(0); 337 | let header_size = if item.payload.is_some() { 18 } else { 8 }; 338 | // Total size does not include the size of the 'totalSize' field, so we subtract 4 339 | let total_size = command_size + metadata_size + payload_size + header_size - 4; 340 | let mut buf = Vec::with_capacity(total_size + 4); 341 | 342 | // Simple command frame 343 | buf.put_u32(total_size as u32); 344 | buf.put_u32(command_size as u32); 345 | item.command.encode(&mut buf)?; 346 | 347 | // Payload command frame 348 | if let Some(payload) = &item.payload { 349 | buf.put_u16(0x0e01); 350 | 351 | let crc_offset = buf.len(); 352 | buf.put_u32(0); // NOTE: Checksum (CRC32c). Overrwritten later to avoid copying. 353 | 354 | let metdata_offset = buf.len(); 355 | buf.put_u32(metadata_size as u32); 356 | payload.metadata.encode(&mut buf)?; 357 | buf.put(&payload.data[..]); 358 | 359 | let crc = CRC_CASTAGNOLI.checksum(&buf[metdata_offset..]); 360 | let mut crc_buf: &mut [u8] = &mut buf[crc_offset..metdata_offset]; 361 | crc_buf.put_u32(crc); 362 | } 363 | if dst.remaining_mut() < buf.len() { 364 | dst.reserve(buf.len()); 365 | } 366 | dst.put_slice(&buf); 367 | trace!("Encoder sending {} bytes", buf.len()); 368 | // println!("Wrote message {:?}", item); 369 | Ok(()) 370 | } 371 | } 372 | 373 | #[cfg(feature = "async-std-runtime")] 374 | impl asynchronous_codec::Decoder for Codec { 375 | type Item = Message; 376 | type Error = ConnectionError; 377 | 378 | fn decode(&mut self, src: &mut BytesMut) -> Result, ConnectionError> { 379 | trace!("Decoder received {} bytes", src.len()); 380 | if src.len() >= 4 { 381 | let mut buf = Cursor::new(src); 382 | // `messageSize` refers only to _remaining_ message size, so we add 4 to get total frame size 383 | let message_size = buf.get_u32() as usize + 4; 384 | let src = buf.into_inner(); 385 | if src.len() >= message_size { 386 | let msg = { 387 | let (buf, command_frame) = 388 | command_frame(&src[..message_size]).map_err(|err| { 389 | ConnectionError::Decoding(format!( 390 | "Error decoding command frame: {:?}", 391 | err 392 | )) 393 | })?; 394 | let command = BaseCommand::decode(command_frame.command)?; 395 | 396 | let payload = if !buf.is_empty() { 397 | let (buf, payload_frame) = payload_frame(buf).map_err(|err| { 398 | ConnectionError::Decoding(format!( 399 | "Error decoding payload frame: {:?}", 400 | err 401 | )) 402 | })?; 403 | 404 | // TODO: Check crc32 of payload data 405 | 406 | let metadata = Metadata::decode(payload_frame.metadata)?; 407 | Some(Payload { 408 | metadata, 409 | data: buf.to_vec(), 410 | }) 411 | } else { 412 | None 413 | }; 414 | 415 | Message { command, payload } 416 | }; 417 | 418 | //TODO advance as we read, rather than this weird post thing 419 | src.advance(message_size); 420 | // println!("Read message {:?}", &msg); 421 | return Ok(Some(msg)); 422 | } 423 | } 424 | Ok(None) 425 | } 426 | } 427 | 428 | /// message payload 429 | #[derive(Debug, Clone)] 430 | pub struct Payload { 431 | /// message metadata added by Pulsar 432 | pub metadata: Metadata, 433 | /// raw message data 434 | pub data: Vec, 435 | } 436 | 437 | struct CommandFrame<'a> { 438 | #[allow(dead_code)] 439 | total_size: u32, 440 | #[allow(dead_code)] 441 | command_size: u32, 442 | command: &'a [u8], 443 | } 444 | 445 | fn command_frame(i: &[u8]) -> IResult<&[u8], CommandFrame> { 446 | let (i, total_size) = be_u32(i)?; 447 | let (i, command_size) = be_u32(i)?; 448 | let (i, command) = take(command_size)(i)?; 449 | 450 | Ok(( 451 | i, 452 | CommandFrame { 453 | total_size, 454 | command_size, 455 | command, 456 | }, 457 | )) 458 | } 459 | 460 | struct PayloadFrame<'a> { 461 | #[allow(dead_code)] 462 | magic_number: u16, 463 | #[allow(dead_code)] 464 | checksum: u32, 465 | #[allow(dead_code)] 466 | metadata_size: u32, 467 | metadata: &'a [u8], 468 | } 469 | 470 | fn payload_frame(i: &[u8]) -> IResult<&[u8], PayloadFrame> { 471 | let (i, magic_number) = be_u16(i)?; 472 | let (i, checksum) = be_u32(i)?; 473 | let (i, metadata_size) = be_u32(i)?; 474 | let (i, metadata) = take(metadata_size)(i)?; 475 | 476 | Ok(( 477 | i, 478 | PayloadFrame { 479 | magic_number, 480 | checksum, 481 | metadata_size, 482 | metadata, 483 | }, 484 | )) 485 | } 486 | 487 | pub(crate) struct BatchedMessage { 488 | pub metadata: proto::SingleMessageMetadata, 489 | pub payload: Vec, 490 | } 491 | 492 | fn batched_message(i: &[u8]) -> IResult<&[u8], BatchedMessage> { 493 | let (i, metadata_size) = be_u32(i)?; 494 | let (i, metadata) = verify( 495 | map_res(take(metadata_size), proto::SingleMessageMetadata::decode), 496 | // payload_size is defined as i32 in protobuf 497 | |metadata| metadata.payload_size >= 0, 498 | )(i)?; 499 | 500 | let (i, payload) = take(metadata.payload_size as u32)(i)?; 501 | 502 | Ok(( 503 | i, 504 | BatchedMessage { 505 | metadata, 506 | payload: payload.to_vec(), 507 | }, 508 | )) 509 | } 510 | 511 | pub(crate) fn parse_batched_message( 512 | count: u32, 513 | payload: &[u8], 514 | ) -> Result, ConnectionError> { 515 | let (_, result) = 516 | nom::multi::count(batched_message, count as usize)(payload).map_err(|err| { 517 | ConnectionError::Decoding(format!("Error decoding batched messages: {:?}", err)) 518 | })?; 519 | Ok(result) 520 | } 521 | 522 | impl BatchedMessage { 523 | pub(crate) fn serialize(&self, w: &mut Vec) { 524 | w.put_u32(self.metadata.encoded_len() as u32); 525 | let _ = self.metadata.encode(w); 526 | w.put_slice(&self.payload); 527 | } 528 | } 529 | 530 | #[rustfmt::skip] 531 | pub mod proto { 532 | include!(concat!(env!("OUT_DIR"), "/pulsar.proto.rs")); 533 | 534 | //trait implementations used in Consumer::unacked_messages 535 | impl std::cmp::Eq for MessageIdData {} 536 | 537 | impl std::hash::Hash for MessageIdData { 538 | fn hash(&self, state: &mut H) { 539 | self.ledger_id.hash(state); 540 | self.entry_id.hash(state); 541 | self.partition.hash(state); 542 | self.batch_index.hash(state); 543 | self.ack_set.hash(state); 544 | self.batch_size.hash(state); 545 | } 546 | } 547 | } 548 | 549 | impl TryFrom for proto::base_command::Type { 550 | type Error = (); 551 | 552 | fn try_from(value: i32) -> Result { 553 | match value { 554 | 2 => Ok(proto::base_command::Type::Connect), 555 | 3 => Ok(proto::base_command::Type::Connected), 556 | 4 => Ok(proto::base_command::Type::Subscribe), 557 | 5 => Ok(proto::base_command::Type::Producer), 558 | 6 => Ok(proto::base_command::Type::Send), 559 | 7 => Ok(proto::base_command::Type::SendReceipt), 560 | 8 => Ok(proto::base_command::Type::SendError), 561 | 9 => Ok(proto::base_command::Type::Message), 562 | 10 => Ok(proto::base_command::Type::Ack), 563 | 11 => Ok(proto::base_command::Type::Flow), 564 | 12 => Ok(proto::base_command::Type::Unsubscribe), 565 | 13 => Ok(proto::base_command::Type::Success), 566 | 14 => Ok(proto::base_command::Type::Error), 567 | 15 => Ok(proto::base_command::Type::CloseProducer), 568 | 16 => Ok(proto::base_command::Type::CloseConsumer), 569 | 17 => Ok(proto::base_command::Type::ProducerSuccess), 570 | 18 => Ok(proto::base_command::Type::Ping), 571 | 19 => Ok(proto::base_command::Type::Pong), 572 | 20 => Ok(proto::base_command::Type::RedeliverUnacknowledgedMessages), 573 | 21 => Ok(proto::base_command::Type::PartitionedMetadata), 574 | 22 => Ok(proto::base_command::Type::PartitionedMetadataResponse), 575 | 23 => Ok(proto::base_command::Type::Lookup), 576 | 24 => Ok(proto::base_command::Type::LookupResponse), 577 | 25 => Ok(proto::base_command::Type::ConsumerStats), 578 | 26 => Ok(proto::base_command::Type::ConsumerStatsResponse), 579 | 27 => Ok(proto::base_command::Type::ReachedEndOfTopic), 580 | 28 => Ok(proto::base_command::Type::Seek), 581 | 29 => Ok(proto::base_command::Type::GetLastMessageId), 582 | 30 => Ok(proto::base_command::Type::GetLastMessageIdResponse), 583 | 31 => Ok(proto::base_command::Type::ActiveConsumerChange), 584 | 32 => Ok(proto::base_command::Type::GetTopicsOfNamespace), 585 | 33 => Ok(proto::base_command::Type::GetTopicsOfNamespaceResponse), 586 | 34 => Ok(proto::base_command::Type::GetSchema), 587 | 35 => Ok(proto::base_command::Type::GetSchemaResponse), 588 | _ => Err(()), 589 | } 590 | } 591 | } 592 | 593 | impl From for ConnectionError { 594 | fn from(e: prost::EncodeError) -> Self { 595 | ConnectionError::Encoding(e.to_string()) 596 | } 597 | } 598 | 599 | impl From for ConnectionError { 600 | fn from(e: prost::DecodeError) -> Self { 601 | ConnectionError::Decoding(e.to_string()) 602 | } 603 | } 604 | 605 | #[cfg(test)] 606 | mod tests { 607 | use crate::message::Codec; 608 | use bytes::BytesMut; 609 | use std::convert::TryFrom; 610 | use tokio_util::codec::{Decoder, Encoder}; 611 | 612 | #[test] 613 | fn parse_simple_command() { 614 | let input: &[u8] = &[ 615 | 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x1E, 0x08, 0x02, 0x12, 0x1A, 0x0A, 0x10, 616 | 0x32, 0x2E, 0x30, 0x2E, 0x31, 0x2D, 0x69, 0x6E, 0x63, 0x75, 0x62, 0x61, 0x74, 0x69, 617 | 0x6E, 0x67, 0x20, 0x0C, 0x2A, 0x04, 0x6E, 0x6F, 0x6E, 0x65, 618 | ]; 619 | 620 | let message = Codec.decode(&mut input.into()).unwrap().unwrap(); 621 | 622 | { 623 | let connect = message.command.connect.as_ref().unwrap(); 624 | assert_eq!(connect.client_version, "2.0.1-incubating"); 625 | assert_eq!(connect.auth_method_name.as_ref().unwrap(), "none"); 626 | assert_eq!(connect.protocol_version.as_ref().unwrap(), &12); 627 | } 628 | 629 | let mut output = BytesMut::with_capacity(38); 630 | Codec.encode(message, &mut output).unwrap(); 631 | assert_eq!(&output, input); 632 | } 633 | 634 | #[test] 635 | fn parse_payload_command() { 636 | let input: &[u8] = &[ 637 | 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x08, 0x08, 0x06, 0x32, 0x04, 0x08, 0x00, 638 | 0x10, 0x08, 0x0E, 0x01, 0x42, 0x83, 0x54, 0xB5, 0x00, 0x00, 0x00, 0x19, 0x0A, 0x0E, 639 | 0x73, 0x74, 0x61, 0x6E, 0x64, 0x61, 0x6C, 0x6F, 0x6E, 0x65, 0x2D, 0x30, 0x2D, 0x33, 640 | 0x10, 0x08, 0x18, 0xBE, 0xC0, 0xFC, 0x84, 0xD2, 0x2C, 0x68, 0x65, 0x6C, 0x6C, 0x6F, 641 | 0x2D, 0x70, 0x75, 0x6C, 0x73, 0x61, 0x72, 0x2D, 0x38, 642 | ]; 643 | 644 | let message = Codec.decode(&mut input.into()).unwrap().unwrap(); 645 | { 646 | let send = message.command.send.as_ref().unwrap(); 647 | assert_eq!(send.producer_id, 0); 648 | assert_eq!(send.sequence_id, 8); 649 | } 650 | 651 | { 652 | let payload = message.payload.as_ref().unwrap(); 653 | assert_eq!(payload.metadata.producer_name, "standalone-0-3"); 654 | assert_eq!(payload.metadata.sequence_id, 8); 655 | assert_eq!(payload.metadata.publish_time, 1533850624062); 656 | } 657 | 658 | let mut output = BytesMut::with_capacity(65); 659 | Codec.encode(message, &mut output).unwrap(); 660 | assert_eq!(&output, input); 661 | } 662 | 663 | #[test] 664 | fn base_command_type_parsing() { 665 | use super::proto::base_command::Type; 666 | let mut successes = 0; 667 | for i in 0..40 { 668 | if let Ok(type_) = Type::try_from(i) { 669 | successes += 1; 670 | assert_eq!(type_ as i32, i); 671 | } 672 | } 673 | assert_eq!(successes, 34); 674 | } 675 | } 676 | -------------------------------------------------------------------------------- /src/reader.rs: -------------------------------------------------------------------------------- 1 | use crate::client::DeserializeMessage; 2 | use crate::consumer::{ConsumerOptions, DeadLetterPolicy, EngineMessage, Message, TopicConsumer}; 3 | use crate::error::Error; 4 | use crate::executor::Executor; 5 | use crate::message::proto::{command_subscribe::SubType, MessageIdData}; 6 | use chrono::{DateTime, Utc}; 7 | use futures::channel::mpsc::SendError; 8 | use futures::task::{Context, Poll}; 9 | use futures::{Future, SinkExt, Stream}; 10 | use std::pin::Pin; 11 | use url::Url; 12 | 13 | /// A client that acknowledges messages systematically 14 | pub struct Reader { 15 | pub(crate) consumer: TopicConsumer, 16 | pub(crate) state: Option>, 17 | } 18 | 19 | impl Unpin for Reader {} 20 | 21 | pub enum State { 22 | PollingConsumer, 23 | PollingAck( 24 | Message, 25 | Pin>>>, 26 | ), 27 | } 28 | 29 | impl Stream for Reader { 30 | type Item = Result, Error>; 31 | 32 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 33 | let this = self.get_mut(); 34 | match this.state.take().unwrap() { 35 | State::PollingConsumer => match Pin::new(&mut this.consumer).poll_next(cx) { 36 | Poll::Pending => { 37 | this.state = Some(State::PollingConsumer); 38 | Poll::Pending 39 | } 40 | 41 | Poll::Ready(None) => { 42 | this.state = Some(State::PollingConsumer); 43 | Poll::Ready(None) 44 | } 45 | 46 | Poll::Ready(Some(Ok(msg))) => { 47 | let mut acker = this.consumer.acker(); 48 | let message_id = msg.message_id.clone(); 49 | this.state = Some(State::PollingAck( 50 | msg, 51 | Box::pin( 52 | async move { acker.send(EngineMessage::Ack(message_id, false)).await }, 53 | ), 54 | )); 55 | Pin::new(this).poll_next(cx) 56 | } 57 | 58 | Poll::Ready(Some(Err(e))) => { 59 | this.state = Some(State::PollingConsumer); 60 | Poll::Ready(Some(Err(e))) 61 | } 62 | }, 63 | State::PollingAck(msg, mut ack_fut) => match ack_fut.as_mut().poll(cx) { 64 | Poll::Pending => { 65 | this.state = Some(State::PollingAck(msg, ack_fut)); 66 | Poll::Pending 67 | } 68 | 69 | Poll::Ready(res) => { 70 | this.state = Some(State::PollingConsumer); 71 | Poll::Ready(Some( 72 | res.map_err(|err| Error::Consumer(err.into())).map(|()| msg), 73 | )) 74 | } 75 | }, 76 | } 77 | } 78 | } 79 | 80 | impl Reader { 81 | // this is totally useless as calling ConsumerBuilder::new(&pulsar_client) 82 | // does just the same 83 | /* 84 | /// creates a [ReaderBuilder] from a client instance 85 | pub fn builder(pulsar: &Pulsar) -> ConsumerBuilder { 86 | ConsumerBuilder::new(pulsar) 87 | } 88 | */ 89 | 90 | /// test that the connections to the Pulsar brokers are still valid 91 | pub async fn check_connection(&mut self) -> Result<(), Error> { 92 | self.consumer.check_connection().await 93 | } 94 | 95 | /// returns topic this reader is subscribed on 96 | pub fn topic(&self) -> String { 97 | self.consumer.topic() 98 | } 99 | 100 | /// returns a list of broker URLs this reader is connnected to 101 | pub async fn connections(&mut self) -> Result { 102 | Ok(self.consumer.connection().await?.url().clone()) 103 | } 104 | /// returns the consumer's configuration options 105 | pub fn options(&self) -> &ConsumerOptions { 106 | &self.consumer.config.options 107 | } 108 | 109 | // is this necessary? 110 | /// returns the consumer's dead letter policy options 111 | pub fn dead_letter_policy(&self) -> Option<&DeadLetterPolicy> { 112 | self.consumer.dead_letter_policy.as_ref() 113 | } 114 | 115 | /// returns the readers's subscription name 116 | pub fn subscription(&self) -> &str { 117 | &self.consumer.config.subscription 118 | } 119 | /// returns the reader's subscription type 120 | pub fn sub_type(&self) -> SubType { 121 | self.consumer.config.sub_type 122 | } 123 | 124 | /// returns the reader's batch size 125 | pub fn batch_size(&self) -> Option { 126 | self.consumer.config.batch_size 127 | } 128 | 129 | /// returns the reader's name 130 | pub fn reader_name(&self) -> Option<&str> { 131 | self.consumer.config.consumer_name.as_deref() 132 | } 133 | 134 | /// returns the reader's id 135 | pub fn reader_id(&self) -> u64 { 136 | self.consumer.consumer_id 137 | } 138 | 139 | pub async fn seek( 140 | &mut self, 141 | message_id: Option, 142 | timestamp: Option, 143 | ) -> Result<(), Error> { 144 | self.consumer.seek(message_id, timestamp).await 145 | } 146 | 147 | /// returns the date of the last message reception 148 | pub fn last_message_received(&self) -> Option> { 149 | self.consumer.last_message_received() 150 | } 151 | 152 | /// returns the current number of messages received 153 | pub fn messages_received(&self) -> u64 { 154 | self.consumer.messages_received() 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/service_discovery.rs: -------------------------------------------------------------------------------- 1 | use crate::connection_manager::{BrokerAddress, ConnectionManager}; 2 | use crate::error::{ConnectionError, ServiceDiscoveryError}; 3 | use crate::executor::Executor; 4 | use crate::message::proto::{ 5 | command_lookup_topic_response, command_partitioned_topic_metadata_response, 6 | CommandLookupTopicResponse, 7 | }; 8 | use futures::{future::try_join_all, FutureExt}; 9 | use std::sync::Arc; 10 | use url::Url; 11 | use crate::error::ServiceDiscoveryError::NotFound; 12 | 13 | /// Look up broker addresses for topics and partitioned topics 14 | /// 15 | /// The ServiceDiscovery object provides a single interface to start 16 | /// interacting with a cluster. It will automatically follow redirects 17 | /// or use a proxy, and aggregate broker connections 18 | #[derive(Clone)] 19 | pub struct ServiceDiscovery { 20 | manager: Arc>, 21 | } 22 | 23 | impl ServiceDiscovery { 24 | pub fn with_manager(manager: Arc>) -> Self { 25 | ServiceDiscovery { manager } 26 | } 27 | 28 | /// get the broker address for a topic 29 | pub async fn lookup_topic>( 30 | &self, 31 | topic: S, 32 | ) -> Result { 33 | let topic = topic.into(); 34 | let mut proxied_query = false; 35 | let mut conn = self.manager.get_base_connection().await?; 36 | let base_url = self.manager.url.clone(); 37 | let mut is_authoritative = false; 38 | let mut broker_address = self.manager.get_base_address(); 39 | 40 | let mut current_retries = 0u32; 41 | let start = std::time::Instant::now(); 42 | let operation_retry_options = self.manager.operation_retry_options.clone(); 43 | 44 | loop { 45 | let response = match conn 46 | .sender() 47 | .lookup_topic(topic.to_string(), is_authoritative) 48 | .await 49 | { 50 | Ok(res) => res, 51 | Err(ConnectionError::Disconnected) => { 52 | error!("tried to lookup a topic but connection was closed, reconnecting..."); 53 | conn = self.manager.get_connection(&broker_address).await?; 54 | conn.sender() 55 | .lookup_topic(topic.to_string(), is_authoritative) 56 | .await? 57 | } 58 | Err(e) => return Err(e.into()), 59 | }; 60 | 61 | if response.response.is_none() 62 | || response.response 63 | == Some(command_lookup_topic_response::LookupType::Failed as i32) 64 | { 65 | let error = response.error.and_then(crate::error::server_error); 66 | if error == Some(crate::message::proto::ServerError::ServiceNotReady) { 67 | if operation_retry_options.max_retries.is_none() 68 | || operation_retry_options.max_retries.unwrap() > current_retries 69 | { 70 | error!("lookup({}) answered ServiceNotReady, retrying request after {}ms (max_retries = {:?})", topic, operation_retry_options.retry_delay.as_millis(), operation_retry_options.max_retries); 71 | current_retries += 1; 72 | self.manager 73 | .executor 74 | .delay(operation_retry_options.retry_delay) 75 | .await; 76 | continue; 77 | } else { 78 | error!("lookup({}) reached max retries", topic); 79 | } 80 | } 81 | return Err(ServiceDiscoveryError::Query( 82 | error, 83 | response.message.clone(), 84 | )); 85 | } 86 | 87 | if current_retries > 0 { 88 | let dur = (std::time::Instant::now() - start).as_secs(); 89 | log::info!( 90 | "lookup({}) success after {} retries over {} seconds", 91 | topic, 92 | current_retries + 1, 93 | dur 94 | ); 95 | } 96 | let LookupResponse { 97 | broker_url, 98 | broker_url_tls, 99 | proxy, 100 | redirect, 101 | authoritative, 102 | } = convert_lookup_response(&response)?; 103 | is_authoritative = authoritative; 104 | 105 | // use the TLS connection if available 106 | let connection_url = if let Some(u) = &broker_url_tls { 107 | u.clone() 108 | } else if let Some(u) = &broker_url { 109 | u.clone() 110 | } else { 111 | return Err(ServiceDiscoveryError::NotFound); 112 | }; 113 | 114 | // if going through a proxy, we use the base URL 115 | let url = if proxied_query || proxy { 116 | base_url.clone() 117 | } else { 118 | connection_url.clone() 119 | }; 120 | 121 | let broker_url = if let Some(u) = broker_url_tls { 122 | format!("{}:{}", u.host_str().unwrap(), u.port().unwrap_or(6651)) 123 | } else if let Some(u) = broker_url { 124 | format!("{}:{}", u.host_str().unwrap(), u.port().unwrap_or(6650)) 125 | } else { 126 | return Err(ServiceDiscoveryError::NotFound); 127 | }; 128 | 129 | broker_address = BrokerAddress { 130 | url, 131 | broker_url, 132 | proxy: proxied_query || proxy, 133 | }; 134 | 135 | // if the response indicated a redirect, do another query 136 | // to the target broker 137 | if redirect { 138 | conn = self.manager.get_connection(&broker_address).await?; 139 | proxied_query = broker_address.proxy; 140 | continue; 141 | } else { 142 | let res = self 143 | .manager 144 | .get_connection(&broker_address) 145 | .await 146 | .map(|_| broker_address) 147 | .map_err(ServiceDiscoveryError::Connection); 148 | break res; 149 | } 150 | } 151 | } 152 | 153 | /// get the number of partitions for a partitioned topic 154 | pub async fn lookup_partitioned_topic_number>( 155 | &self, 156 | topic: S, 157 | ) -> Result { 158 | let mut connection = self.manager.get_base_connection().await?; 159 | let topic = topic.into(); 160 | 161 | let mut current_retries = 0u32; 162 | let start = std::time::Instant::now(); 163 | let operation_retry_options = self.manager.operation_retry_options.clone(); 164 | 165 | let response = loop { 166 | let response = match connection.sender().lookup_partitioned_topic(&topic).await { 167 | Ok(res) => res, 168 | Err(ConnectionError::Disconnected) => { 169 | error!("tried to lookup a topic but connection was closed, reconnecting..."); 170 | connection = self.manager.get_base_connection().await?; 171 | connection.sender().lookup_partitioned_topic(&topic).await? 172 | } 173 | Err(e) => return Err(e.into()), 174 | }; 175 | 176 | if response.response.is_none() 177 | || response.response 178 | == Some(command_partitioned_topic_metadata_response::LookupType::Failed as i32) 179 | { 180 | let error = response.error.and_then(crate::error::server_error); 181 | if error == Some(crate::message::proto::ServerError::ServiceNotReady) { 182 | if operation_retry_options.max_retries.is_none() 183 | || operation_retry_options.max_retries.unwrap() > current_retries 184 | { 185 | error!("lookup_partitioned_topic_number({}) answered ServiceNotReady, retrying request after {}ms (max_retries = {:?})", 186 | topic, operation_retry_options.retry_delay.as_millis(), 187 | operation_retry_options.max_retries); 188 | 189 | current_retries += 1; 190 | self.manager 191 | .executor 192 | .delay(operation_retry_options.retry_delay) 193 | .await; 194 | continue; 195 | } else { 196 | error!( 197 | "lookup_partitioned_topic_number({}) reached max retries", 198 | topic 199 | ); 200 | } 201 | } 202 | return Err(ServiceDiscoveryError::Query( 203 | error, 204 | response.message.clone(), 205 | )); 206 | } 207 | 208 | break response; 209 | }; 210 | 211 | if current_retries > 0 { 212 | let dur = (std::time::Instant::now() - start).as_secs(); 213 | log::info!( 214 | "lookup_partitioned_topic_number({}) success after {} retries over {} seconds", 215 | topic, 216 | current_retries + 1, 217 | dur 218 | ); 219 | } 220 | 221 | match response.partitions { 222 | Some(partitions) => Ok(partitions), 223 | None => Err(ServiceDiscoveryError::Query( 224 | response.error.and_then(crate::error::server_error), 225 | response.message, 226 | )), 227 | } 228 | } 229 | 230 | /// Lookup a topic, returning a list of the partitions (if partitioned) and addresses 231 | /// associated with that topic. 232 | pub async fn lookup_partitioned_topic>( 233 | &self, 234 | topic: S, 235 | ) -> Result, ServiceDiscoveryError> { 236 | let topic = topic.into(); 237 | let partitions = self.lookup_partitioned_topic_number(&topic).await?; 238 | trace!("Partitions for topic {}: {}", &topic, &partitions); 239 | let topics = match partitions { 240 | 0 => vec![topic], 241 | _ => (0..partitions) 242 | .map(|n| format!("{}-partition-{}", &topic, n)) 243 | .collect(), 244 | }; 245 | try_join_all(topics.into_iter().map(|topic| { 246 | self.lookup_topic(topic.clone()) 247 | .map(move |address_res| match address_res { 248 | Err(e) => Err(e), 249 | Ok(address) => Ok((topic, address)), 250 | }) 251 | })) 252 | .await 253 | } 254 | } 255 | 256 | struct LookupResponse { 257 | pub broker_url: Option, 258 | pub broker_url_tls: Option, 259 | pub proxy: bool, 260 | pub redirect: bool, 261 | pub authoritative: bool, 262 | } 263 | 264 | /// extracts information from a lookup response 265 | fn convert_lookup_response( 266 | response: &CommandLookupTopicResponse, 267 | ) -> Result { 268 | let proxy = response.proxy_through_service_url.unwrap_or(false); 269 | let authoritative = response.authoritative.unwrap_or(false); 270 | let redirect = 271 | response.response == Some(command_lookup_topic_response::LookupType::Redirect as i32); 272 | 273 | let broker_url = match response.broker_service_url.as_ref() { 274 | Some(u) => Some(Url::parse(&response.broker_service_url.clone().unwrap()).map_err(|e| { 275 | error!("error parsing URL: {:?}", e); 276 | ServiceDiscoveryError::NotFound 277 | })?), 278 | None => None, 279 | }; 280 | 281 | let broker_url_tls = match response.broker_service_url_tls.as_ref() { 282 | Some(u) => Some(Url::parse(u).map_err(|e| { 283 | error!("error parsing URL: {:?}", e); 284 | ServiceDiscoveryError::NotFound 285 | })?), 286 | None => None, 287 | }; 288 | 289 | Ok(LookupResponse { 290 | broker_url, 291 | broker_url_tls, 292 | proxy, 293 | redirect, 294 | authoritative, 295 | }) 296 | } 297 | --------------------------------------------------------------------------------