├── .github └── workflows │ ├── deploy.yml │ └── rust.yml ├── .gitignore ├── CHANGELOG ├── Cargo.toml ├── LICENSE ├── README.md ├── codegen.rs ├── protos ├── es6 │ ├── operations.proto │ ├── persistent.proto │ ├── projections.proto │ ├── shared.proto │ ├── streams.proto │ └── users.proto └── tcp │ ├── google │ └── protobuf │ │ ├── any.proto │ │ ├── api.proto │ │ ├── compiler │ │ └── plugin.proto │ │ ├── descriptor.proto │ │ ├── duration.proto │ │ ├── empty.proto │ │ ├── field_mask.proto │ │ ├── source_context.proto │ │ ├── struct.proto │ │ ├── timestamp.proto │ │ ├── type.proto │ │ └── wrappers.proto │ ├── messages.proto │ └── rustproto.proto ├── rustfmt.toml ├── src ├── connection.rs ├── discovery.rs ├── discovery │ ├── cluster.rs │ └── constant.rs ├── es6 │ ├── commands.rs │ ├── connection.rs │ ├── grpc │ │ ├── event_store │ │ │ ├── client │ │ │ │ ├── mod.rs │ │ │ │ ├── persistent.rs │ │ │ │ ├── shared.rs │ │ │ │ └── streams.rs │ │ │ └── mod.rs │ │ └── mod.rs │ ├── mod.rs │ └── types.rs ├── internal.rs ├── internal │ ├── command.rs │ ├── commands.rs │ ├── connection.rs │ ├── driver.rs │ ├── endpoint.rs │ ├── messages.rs │ ├── messaging.rs │ ├── operations.rs │ ├── package.rs │ ├── registry.rs │ └── timespan.rs ├── lib.rs └── types.rs └── tests └── integration.rs /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deployment 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | deploy: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v1 15 | - name: Install latest Rust stable toolchain 16 | run: | 17 | rustup update 18 | rustup default stable 19 | - name: Login on Crates.IO 20 | env: 21 | TOKEN: ${{ secrets.CRATES_IO_TOKEN }} 22 | run: cargo login $TOKEN 23 | - name: Upload 24 | run: cargo publish 25 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | schedule: 9 | - cron: '0 3 * * 0' # Every sunday at 3am UTC. 10 | 11 | jobs: 12 | build: 13 | name: Build 14 | strategy: 15 | matrix: 16 | os: [ubuntu-18.04, windows-2019, macos-10.15] 17 | rust: [stable, beta, nightly] 18 | 19 | runs-on: ${{ matrix.os }} 20 | 21 | steps: 22 | - uses: actions/checkout@v1 23 | - name: Install 24 | run: rustup default ${{ matrix.rust }} 25 | - name: Build 26 | run: cargo check --all-features 27 | continue-on-error: ${{ matrix.rust == 'nightly' }} 28 | tests: 29 | name: Tests on ES Stable 30 | needs: build 31 | runs-on: ubuntu-18.04 32 | services: 33 | eventstore: 34 | image: eventstore/eventstore:release-5.0.7 35 | env: 36 | EVENTSTORE_DEV: true 37 | ports: 38 | - 1113:1113 39 | - 2113:2113 40 | steps: 41 | - uses: actions/checkout@v1 42 | - name: Run Tests 43 | run: RUST_BACKTRACE='1' RUST_LOG='eventstore=debug' cargo test 44 | 45 | tests-next: 46 | name: Tests on 20.6.0 Stable 47 | needs: tests 48 | runs-on: ubuntu-18.04 49 | services: 50 | eventstore: 51 | image: eventstore/eventstore:20.6.0-bionic 52 | env: 53 | EVENTSTORE_DEV: true 54 | ports: 55 | - 2113:2113 56 | steps: 57 | - uses: actions/checkout@v1 58 | - name: Run Tests 59 | run: RUST_BACKTRACE='1' RUST_LOG='integration=debug' cargo test --all-features es6::es6_20_6_test -- --exact --ignored 60 | 61 | linting: 62 | name: Linting 63 | needs: tests-next 64 | runs-on: ubuntu-18.04 65 | steps: 66 | - uses: actions/checkout@v1 67 | - name: Install 68 | run: | 69 | rustup update 70 | rustup component add clippy-preview 71 | rustup component add rustfmt 72 | - name: Linting 73 | run: cargo clippy --all-features -- -D warnings 74 | - name: Formatting 75 | run: cargo fmt -- --check 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | **/*.rs.bk 12 | Cargo.lock 13 | 14 | # Directories generated during CI workflow. 15 | cached_cargo/ 16 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | 0.8.2 2 | ===== 3 | * [es6] Adding `read_through` function when reading a stream (`$all` included). 4 | * [es6] Support latest `message_timeout_ms` and `checkpoint_after_ms` persistent.proto changes. 5 | * [es6] Fix persistent subscription id retrieval. 6 | 7 | 0.8.1 8 | ===== 9 | * Bump protobuf version. 10 | * Bump tonic version. 11 | * Make Connection cloneable. 12 | * Support DNS discovery for the TCP API. 13 | 14 | 0.8.0 15 | ===== 16 | * Implement secure connection for the TCP API. 17 | * Support 20.6 stable version (behind 20.6). 18 | 19 | 0.7.5 20 | ===== 21 | * Make connection timeout period configurable. 22 | * Don't panic on second call if server is off. 23 | * Migrate gRPC interface to ES6 preview 3 version. 24 | 25 | 0.7.4 26 | ===== 27 | * No change, only updated website link. 28 | 29 | 0.7.3 30 | ===== 31 | * Expose subscription events so the user can be notified when a subscription has been confirmed or dropped by the server. 32 | * Upgrade to uuid 0.8.* version. 33 | * Fix memory leak in registry when dealing with subscriptions. 34 | * Allow user to convert UUID to GUID when sending events. 35 | * Fix persistent subscription regression when ack/nak. 36 | 37 | 0.7.2 38 | ===== 39 | * Idiomatic streaming interface for subscriptions and batch reads (tcp API). 40 | * Fix UUID/GUID serialization. 41 | 42 | 0.7.1 43 | ===== 44 | * Ask Docs.rs to also build documentation for es6 module. 45 | 46 | 0.7.0 47 | ===== 48 | * Expose ES 6 gRPC interface. 49 | 50 | 0.6.0 51 | ===== 52 | * Move to tokio 0.2 53 | 54 | 0.5.1 55 | ===== 56 | * Pinned `protobuf` to 2.8.1 version. 57 | 58 | 0.5.0 59 | ===== 60 | * Support Rust 1.39 61 | * Remove protobuf::Chars from public API. 62 | 63 | 0.4.5 64 | ===== 65 | * Update persistent subscription default settings. 66 | * Internal connection refactoring. 67 | * Implement `iterate_over_batch`. 68 | 69 | 0.4.4 70 | ===== 71 | * Remove debugging leftovers. 72 | * Add `Pinned` system consumer strategy. 73 | 74 | 0.4.3 75 | ===== 76 | * Fix reading a deleted stream event in $streams stream, causing a read command to abort. 77 | 78 | 0.4.2 79 | ===== 80 | * Fix compiler warnings. 81 | * Bump dependencies version. 82 | 83 | 0.4.1 84 | ===== 85 | * Fix stream metadata and ACL JSON (de)serialization. 86 | 87 | 0.4.0 88 | ===== 89 | * Implement cluster-mode connection. 90 | * Internal refactoring. 91 | * `start` and `start_with_runtime` are renamed `single_node_connection` and `single_node_connection_with_runtime`. 92 | 93 | 0.3.0 94 | ===== 95 | * Migrate `iterate_over` from iterator to asynchronous stream. 96 | 97 | 0.2.4 98 | ===== 99 | * Fix possible connection issues if Authentication or Identification processes take too long to complete. 100 | 101 | 0.2.3 102 | ===== 103 | * Remove an unnecessary OS thread. 104 | * Implement `ConnectionBuilder::start_with_runtime` to use an existing tokio runtime. 105 | * Fix rare issue where the user sends a command before the connection is confirmed with the 106 | server, causing that operation to be sent only after a `operation timeout` time. 107 | * No longer terminate the connection in case of identification timeout. 108 | 109 | 0.2.2 110 | ===== 111 | * Implement stream streaming ($all included). 112 | 113 | 0.2.1 114 | ===== 115 | * BUGFIX: Fix next event number for stream reads. 116 | 117 | 0.2.0 118 | ===== 119 | * Simplify public eventstore module. 120 | * Move to a typeful representation of `resolve_link_tos` setting. 121 | * Implement connection state-machine graceful exit. 122 | * Introduce new connection api. 123 | 124 | 0.1.3 125 | ===== 126 | * Migrate to `uuid` 0.7. 127 | * Move to tokio multithreaded runtime. 128 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eventstore" 3 | version = "0.8.2" 4 | authors = ["Yorick Laupa "] 5 | edition = "2018" 6 | 7 | # Uncomment if you want to update messages.rs code-gen. 8 | # We disabled codegen.rs because it requires having `protoc` installed on your machine 9 | # in order to build that library. 10 | # build = "codegen.rs" 11 | 12 | license = "MIT" 13 | description = "EventStore TCP client" 14 | repository = "https://github.com/YoEight/eventstore-rs" 15 | readme = "README.md" 16 | keywords = ["database", "eventsourcing", "event", "eventstore", "ges"] 17 | exclude = [ 18 | ".gitignore", 19 | ".gitlab-ci.yml", 20 | ".travis.yml", 21 | ".github", 22 | "rustfmt.toml" 23 | ] 24 | 25 | categories = ["database", "api-bindings"] 26 | 27 | [features] 28 | default = [] 29 | es6 = [ 30 | "byteorder", 31 | "http", 32 | "tonic", 33 | "prost", 34 | "prost-derive", 35 | "prost-types", 36 | "rustls", 37 | "webpki", 38 | "base64" 39 | ] 40 | tls = [ 41 | "tokio-rustls", 42 | "rustls", 43 | "webpki" 44 | ] 45 | 46 | [dependencies] 47 | tokio = { version = "0.2", features = ["net", "stream", "time"] } 48 | tokio-byteorder = "0.2" 49 | futures = "0.3" 50 | uuid = { version = "0.8", features = [ "v4", "serde" ] } 51 | bytes = "0.5" 52 | protobuf = { version = "2.10", features = ["with-bytes"] } 53 | serde = "1.0" 54 | serde_json = "1.0" 55 | serde_derive = "1.0" 56 | log = "0.4" 57 | reqwest = { version = "0.10", features = ["json"] } 58 | vec1 = "1.4" 59 | rand = { version = "0.7", features = ["getrandom", "small_rng"] } 60 | byteorder = { version = "1.2", optional = true } 61 | http = { version = "0.2", optional = true } 62 | tonic = { version = "0.3", features = ["tls", "tls-roots"], optional = true } 63 | prost = { version = "0.6", optional = true } 64 | prost-derive = { version = "0.6", optional = true } 65 | prost-types = { version = "0.6", optional = true } 66 | rustls = { version = "0.18", features = ["dangerous_configuration"], optional = true } 67 | webpki = { version = "0.21", optional = true } 68 | base64 = { version = "^0.11", optional = true } 69 | tokio-rustls = { version = "0.14", optional = true } 70 | trust-dns-resolver = "0.19" 71 | 72 | [build-dependencies] 73 | protoc-rust = "2.16" 74 | tonic-build = "0.2" 75 | 76 | [[test]] 77 | name = "integration" 78 | 79 | [dev-dependencies] 80 | env_logger = "0.6" 81 | tokio-test = "0.2" 82 | 83 | [package.metadata.docs.rs] 84 | all-features = true 85 | rustdoc-args = ["--cfg", "docsrs"] 86 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Yorick Laupa 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # REPO HAS MOVED! 3 | ## This client is now officiallly supported by EventStore Ltd. The code is now hosted here: https://github.com/EventStore/EventStoreDB-Client-Rust 4 | 5 | # eventstore-rs 6 | ![Crates.io](https://img.shields.io/crates/v/eventstore.svg) 7 | ![Crates.io](https://img.shields.io/crates/d/eventstore.svg) 8 | ![Github action CI workflow](https://github.com/YoEight/eventstore-rs/workflows/CI/badge.svg?branch=master) 9 | ![Discord](https://img.shields.io/discord/415421715385155584.svg) 10 | ![Crates.io](https://img.shields.io/crates/l/eventstore.svg) 11 | 12 | Rust [EventStore] TCP Client. 13 | 14 | [Talk and exchange ideas in our dedicated Discord Server] 15 | 16 | ## State of implemented features 17 | 18 | - [x] Can connect to GetEventStore >=4.* servers (for version 20.6 and above enable the `es6` feature flag and use the `es6` module). 19 | - [x] Connection health tracking. 20 | - [x] Operation timeout detection and retry. 21 | - [x] Write events. 22 | - [x] Read events (including `$all` stream). 23 | - [x] Read/Write stream metadata. 24 | - [x] Transactions. 25 | - [x] Delete stream. 26 | - [x] Volatile Subscriptions. 27 | - [x] Catchup Subscriptions. 28 | - [x] Persistent Subscriptions. 29 | - [x] Support connection to server clusters. (through gossip seeds or DNS) 30 | - [x] Support SSL connection. 31 | 32 | # Example 33 | 34 | ```rust 35 | #[macro_use] 36 | extern crate serde_json; 37 | 38 | use eventstore::{ Connection, EventData }; 39 | use futures::Future; 40 | 41 | #[tokio::main] 42 | async fn main() -> Result<(), Box> { 43 | let addr = "127.0.0.1:1113".parse()?; 44 | let connection = Connection::builder() 45 | .single_node_connection(addr) 46 | .await; 47 | 48 | // It is not mandatory to use JSON as a data format however GetEventStore 49 | // provides great additional value if you do so. 50 | let payload = json!({ 51 | "is_rust_a_nice_language": true, 52 | }); 53 | 54 | let event = EventData::json("language-poll", payload)?; 55 | 56 | let result = connection 57 | .write_events("language-stream") 58 | .push_event(event) 59 | .execute() 60 | .await?; 61 | 62 | // Do something productive with the result. 63 | println!("{:?}", result); 64 | 65 | Ok(()) 66 | } 67 | ``` 68 | 69 | ## Notes 70 | 71 | That library was tested on Linux and OSX. 72 | 73 | Contributions and bug reports are welcome! 74 | 75 | MIT License 76 | 77 | [GetEventStore]: https://eventstore.com/ 78 | [Talk and exchange ideas in our dedicated Discord Server]: https://discord.gg/x7q37jJ 79 | [EventStore]: https://eventstore.com/ 80 | 81 | -------------------------------------------------------------------------------- /codegen.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(feature = "es6"))] 2 | pub mod tcp { 3 | use protoc_rust::Customize; 4 | use std::path::Path; 5 | 6 | pub fn generate() { 7 | let generated_file = Path::new("src/internal/messages.rs"); 8 | 9 | if !generated_file.exists() { 10 | protoc_rust::Codegen::new() 11 | .out_dir("src/internal") 12 | .inputs(&["protos/tcp/messages.proto"]) 13 | .include("protos/tcp") 14 | .customize(Customize { 15 | carllerche_bytes_for_bytes: Some(true), 16 | carllerche_bytes_for_string: Some(true), 17 | ..Default::default() 18 | }) 19 | .run() 20 | .expect("Running protoc failed"); 21 | } 22 | } 23 | } 24 | 25 | #[cfg(feature = "es6")] 26 | pub mod es6 { 27 | pub fn generate() { 28 | let out_dir = "src/es6/grpc/event_store/client"; 29 | let files = [ 30 | "protos/es6/persistent.proto", 31 | "protos/es6/streams.proto", 32 | "protos/es6/shared.proto", 33 | ]; 34 | 35 | tonic_build::configure() 36 | .build_server(false) 37 | .out_dir(out_dir) 38 | .compile(&files, &["protos/es6"]) 39 | .unwrap(); 40 | 41 | let gen_dir = std::fs::read_dir(out_dir).unwrap(); 42 | 43 | for entry in gen_dir { 44 | let file = entry.unwrap(); 45 | let filename_string = file.file_name().into_string().unwrap(); 46 | if filename_string.starts_with("event_store.client.") { 47 | let remaining = filename_string.trim_start_matches("event_store.client."); 48 | let new_file_name = if remaining == "persistent_subscriptions.rs" { 49 | "persistent.rs" 50 | } else { 51 | remaining 52 | }; 53 | 54 | let new_file = file.path().parent().unwrap().join(new_file_name); 55 | 56 | std::fs::rename(file.path(), new_file).unwrap(); 57 | } 58 | } 59 | } 60 | } 61 | 62 | #[cfg(feature = "es6")] 63 | use self::es6::generate; 64 | #[cfg(not(feature = "es6"))] 65 | use self::tcp::generate; 66 | 67 | fn main() { 68 | generate(); 69 | } 70 | -------------------------------------------------------------------------------- /protos/es6/operations.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.operations; 3 | option java_package = "com.eventstore.dbclient.proto.operations"; 4 | 5 | import "shared.proto"; 6 | 7 | service Operations { 8 | rpc StartScavenge (StartScavengeReq) returns (ScavengeResp); 9 | rpc StopScavenge (StopScavengeReq) returns (ScavengeResp); 10 | rpc Shutdown (event_store.client.shared.Empty) returns (event_store.client.shared.Empty); 11 | rpc MergeIndexes (event_store.client.shared.Empty) returns (event_store.client.shared.Empty); 12 | rpc ResignNode (event_store.client.shared.Empty) returns (event_store.client.shared.Empty); 13 | rpc SetNodePriority (SetNodePriorityReq) returns (event_store.client.shared.Empty); 14 | } 15 | 16 | message StartScavengeReq { 17 | Options options = 1; 18 | message Options { 19 | int32 thread_count = 1; 20 | int32 start_from_chunk = 2; 21 | } 22 | } 23 | 24 | message StopScavengeReq { 25 | Options options = 1; 26 | message Options { 27 | string scavenge_id = 1; 28 | } 29 | } 30 | 31 | message ScavengeResp { 32 | string scavenge_id = 1; 33 | ScavengeResult scavenge_result = 2; 34 | 35 | enum ScavengeResult { 36 | Started = 0; 37 | InProgress = 1; 38 | Stopped = 2; 39 | } 40 | } 41 | 42 | message SetNodePriorityReq { 43 | int32 priority = 1; 44 | } -------------------------------------------------------------------------------- /protos/es6/persistent.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.persistent_subscriptions; 3 | option java_package = "com.eventstore.dbclient.proto.persistentsubscriptions"; 4 | 5 | import "shared.proto"; 6 | 7 | service PersistentSubscriptions { 8 | rpc Create (CreateReq) returns (CreateResp); 9 | rpc Update (UpdateReq) returns (UpdateResp); 10 | rpc Delete (DeleteReq) returns (DeleteResp); 11 | rpc Read (stream ReadReq) returns (stream ReadResp); 12 | } 13 | 14 | message ReadReq { 15 | oneof content { 16 | Options options = 1; 17 | Ack ack = 2; 18 | Nack nack = 3; 19 | } 20 | 21 | message Options { 22 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 23 | string group_name = 2; 24 | int32 buffer_size = 3; 25 | UUIDOption uuid_option = 4; 26 | 27 | message UUIDOption { 28 | oneof content { 29 | event_store.client.shared.Empty structured = 1; 30 | event_store.client.shared.Empty string = 2; 31 | } 32 | } 33 | } 34 | 35 | message Ack { 36 | bytes id = 1; 37 | repeated event_store.client.shared.UUID ids = 2; 38 | } 39 | 40 | message Nack { 41 | bytes id = 1; 42 | repeated event_store.client.shared.UUID ids = 2; 43 | Action action = 3; 44 | string reason = 4; 45 | 46 | enum Action { 47 | Unknown = 0; 48 | Park = 1; 49 | Retry = 2; 50 | Skip = 3; 51 | Stop = 4; 52 | } 53 | } 54 | } 55 | 56 | message ReadResp { 57 | oneof content { 58 | ReadEvent event = 1; 59 | SubscriptionConfirmation subscription_confirmation = 2; 60 | } 61 | message ReadEvent { 62 | RecordedEvent event = 1; 63 | RecordedEvent link = 2; 64 | oneof position { 65 | uint64 commit_position = 3; 66 | event_store.client.shared.Empty no_position = 4; 67 | } 68 | oneof count { 69 | int32 retry_count = 5; 70 | event_store.client.shared.Empty no_retry_count = 6; 71 | } 72 | message RecordedEvent { 73 | event_store.client.shared.UUID id = 1; 74 | event_store.client.shared.StreamIdentifier stream_identifier = 2; 75 | uint64 stream_revision = 3; 76 | uint64 prepare_position = 4; 77 | uint64 commit_position = 5; 78 | map metadata = 6; 79 | bytes custom_metadata = 7; 80 | bytes data = 8; 81 | } 82 | } 83 | message SubscriptionConfirmation { 84 | string subscription_id = 1; 85 | } 86 | } 87 | 88 | message CreateReq { 89 | Options options = 1; 90 | 91 | message Options { 92 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 93 | string group_name = 2; 94 | Settings settings = 3; 95 | } 96 | 97 | message Settings { 98 | bool resolve_links = 1; 99 | uint64 revision = 2; 100 | bool extra_statistics = 3; 101 | int32 max_retry_count = 5; 102 | int32 min_checkpoint_count = 7; 103 | int32 max_checkpoint_count = 8; 104 | int32 max_subscriber_count = 9; 105 | int32 live_buffer_size = 10; 106 | int32 read_batch_size = 11; 107 | int32 history_buffer_size = 12; 108 | ConsumerStrategy named_consumer_strategy = 13; 109 | oneof message_timeout { 110 | int64 message_timeout_ticks = 4; 111 | int32 message_timeout_ms = 14; 112 | } 113 | oneof checkpoint_after { 114 | int64 checkpoint_after_ticks = 6; 115 | int32 checkpoint_after_ms = 15; 116 | } 117 | } 118 | 119 | enum ConsumerStrategy { 120 | DispatchToSingle = 0; 121 | RoundRobin = 1; 122 | Pinned = 2; 123 | } 124 | } 125 | 126 | message CreateResp { 127 | } 128 | 129 | message UpdateReq { 130 | Options options = 1; 131 | 132 | message Options { 133 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 134 | string group_name = 2; 135 | Settings settings = 3; 136 | } 137 | 138 | message Settings { 139 | bool resolve_links = 1; 140 | uint64 revision = 2; 141 | bool extra_statistics = 3; 142 | int32 max_retry_count = 5; 143 | int32 min_checkpoint_count = 7; 144 | int32 max_checkpoint_count = 8; 145 | int32 max_subscriber_count = 9; 146 | int32 live_buffer_size = 10; 147 | int32 read_batch_size = 11; 148 | int32 history_buffer_size = 12; 149 | ConsumerStrategy named_consumer_strategy = 13; 150 | oneof message_timeout { 151 | int64 message_timeout_ticks = 4; 152 | int32 message_timeout_ms = 14; 153 | } 154 | oneof checkpoint_after { 155 | int64 checkpoint_after_ticks = 6; 156 | int32 checkpoint_after_ms = 15; 157 | } 158 | } 159 | 160 | enum ConsumerStrategy { 161 | DispatchToSingle = 0; 162 | RoundRobin = 1; 163 | Pinned = 2; 164 | } 165 | } 166 | 167 | message UpdateResp { 168 | } 169 | 170 | message DeleteReq { 171 | Options options = 1; 172 | 173 | message Options { 174 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 175 | string group_name = 2; 176 | } 177 | } 178 | 179 | message DeleteResp { 180 | } -------------------------------------------------------------------------------- /protos/es6/projections.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.projections; 3 | option java_package = "com.eventstore.dbclient.proto.projections"; 4 | 5 | import "google/protobuf/struct.proto"; 6 | import "shared.proto"; 7 | 8 | service Projections { 9 | rpc Create (CreateReq) returns (CreateResp); 10 | rpc Update (UpdateReq) returns (UpdateResp); 11 | rpc Delete (DeleteReq) returns (DeleteResp); 12 | rpc Statistics (StatisticsReq) returns (stream StatisticsResp); 13 | rpc Disable (DisableReq) returns (DisableResp); 14 | rpc Enable (EnableReq) returns (EnableResp); 15 | rpc Reset (ResetReq) returns (ResetResp); 16 | rpc State (StateReq) returns (StateResp); 17 | rpc Result (ResultReq) returns (ResultResp); 18 | rpc RestartSubsystem (event_store.client.shared.Empty) returns (event_store.client.shared.Empty); 19 | } 20 | 21 | message CreateReq { 22 | Options options = 1; 23 | 24 | message Options { 25 | oneof mode { 26 | event_store.client.shared.Empty one_time = 1; 27 | Transient transient = 2; 28 | Continuous continuous = 3; 29 | } 30 | string query = 4; 31 | 32 | message Transient { 33 | string name = 1; 34 | } 35 | message Continuous { 36 | string name = 1; 37 | bool track_emitted_streams = 2; 38 | } 39 | } 40 | } 41 | 42 | message CreateResp { 43 | } 44 | 45 | message UpdateReq { 46 | Options options = 1; 47 | 48 | message Options { 49 | string name = 1; 50 | string query = 2; 51 | oneof emit_option { 52 | bool emit_enabled = 3; 53 | event_store.client.shared.Empty no_emit_options = 4; 54 | } 55 | } 56 | } 57 | 58 | message UpdateResp { 59 | } 60 | 61 | message DeleteReq { 62 | Options options = 1; 63 | 64 | message Options { 65 | string name = 1; 66 | bool delete_emitted_streams = 2; 67 | bool delete_state_stream = 3; 68 | bool delete_checkpoint_stream = 4; 69 | } 70 | } 71 | 72 | message DeleteResp { 73 | } 74 | 75 | message StatisticsReq { 76 | Options options = 1; 77 | message Options { 78 | oneof mode { 79 | string name = 1; 80 | event_store.client.shared.Empty all = 2; 81 | event_store.client.shared.Empty transient = 3; 82 | event_store.client.shared.Empty continuous = 4; 83 | event_store.client.shared.Empty one_time = 5; 84 | } 85 | } 86 | } 87 | 88 | message StatisticsResp { 89 | Details details = 1; 90 | 91 | message Details { 92 | int64 coreProcessingTime = 1; 93 | int64 version = 2; 94 | int64 epoch = 3; 95 | string effectiveName = 4; 96 | int32 writesInProgress = 5; 97 | int32 readsInProgress = 6; 98 | int32 partitionsCached = 7; 99 | string status = 8; 100 | string stateReason = 9; 101 | string name = 10; 102 | string mode = 11; 103 | string position = 12; 104 | float progress = 13; 105 | string lastCheckpoint = 14; 106 | int64 eventsProcessedAfterRestart = 15; 107 | string checkpointStatus = 16; 108 | int64 bufferedEvents = 17; 109 | int32 writePendingEventsBeforeCheckpoint = 18; 110 | int32 writePendingEventsAfterCheckpoint = 19; 111 | } 112 | } 113 | 114 | message StateReq { 115 | Options options = 1; 116 | 117 | message Options { 118 | string name = 1; 119 | string partition = 2; 120 | } 121 | } 122 | 123 | message StateResp { 124 | google.protobuf.Value state = 1; 125 | } 126 | 127 | message ResultReq { 128 | Options options = 1; 129 | 130 | message Options { 131 | string name = 1; 132 | string partition = 2; 133 | } 134 | } 135 | 136 | message ResultResp { 137 | google.protobuf.Value result = 1; 138 | } 139 | 140 | message ResetReq { 141 | Options options = 1; 142 | 143 | message Options { 144 | string name = 1; 145 | bool write_checkpoint = 2; 146 | } 147 | } 148 | 149 | message ResetResp { 150 | } 151 | 152 | 153 | message EnableReq { 154 | Options options = 1; 155 | 156 | message Options { 157 | string name = 1; 158 | } 159 | } 160 | 161 | message EnableResp { 162 | } 163 | 164 | message DisableReq { 165 | Options options = 1; 166 | 167 | message Options { 168 | string name = 1; 169 | bool write_checkpoint = 2; 170 | } 171 | } 172 | 173 | message DisableResp { 174 | } -------------------------------------------------------------------------------- /protos/es6/shared.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.shared; 3 | option java_package = "com.eventstore.dbclient.proto.shared"; 4 | 5 | message UUID { 6 | oneof value { 7 | Structured structured = 1; 8 | string string = 2; 9 | } 10 | 11 | message Structured { 12 | int64 most_significant_bits = 1; 13 | int64 least_significant_bits = 2; 14 | } 15 | } 16 | message Empty { 17 | } 18 | 19 | message StreamIdentifier { 20 | reserved 1 to 2; 21 | bytes streamName = 3; 22 | } -------------------------------------------------------------------------------- /protos/es6/streams.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.streams; 3 | option java_package = "com.eventstore.dbclient.proto.streams"; 4 | 5 | import "shared.proto"; 6 | 7 | service Streams { 8 | rpc Read (ReadReq) returns (stream ReadResp); 9 | rpc Append (stream AppendReq) returns (AppendResp); 10 | rpc Delete (DeleteReq) returns (DeleteResp); 11 | rpc Tombstone (TombstoneReq) returns (TombstoneResp); 12 | } 13 | 14 | message ReadReq { 15 | Options options = 1; 16 | 17 | message Options { 18 | oneof stream_option { 19 | StreamOptions stream = 1; 20 | AllOptions all = 2; 21 | } 22 | ReadDirection read_direction = 3; 23 | bool resolve_links = 4; 24 | oneof count_option { 25 | uint64 count = 5; 26 | SubscriptionOptions subscription = 6; 27 | } 28 | oneof filter_option { 29 | FilterOptions filter = 7; 30 | event_store.client.shared.Empty no_filter = 8; 31 | } 32 | UUIDOption uuid_option = 9; 33 | 34 | enum ReadDirection { 35 | Forwards = 0; 36 | Backwards = 1; 37 | } 38 | message StreamOptions { 39 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 40 | oneof revision_option { 41 | uint64 revision = 2; 42 | event_store.client.shared.Empty start = 3; 43 | event_store.client.shared.Empty end = 4; 44 | } 45 | } 46 | message AllOptions { 47 | oneof all_option { 48 | Position position = 1; 49 | event_store.client.shared.Empty start = 2; 50 | event_store.client.shared.Empty end = 3; 51 | } 52 | } 53 | message SubscriptionOptions { 54 | } 55 | message Position { 56 | uint64 commit_position = 1; 57 | uint64 prepare_position = 2; 58 | } 59 | message FilterOptions { 60 | oneof filter { 61 | Expression stream_identifier = 1; 62 | Expression event_type = 2; 63 | } 64 | oneof window { 65 | uint32 max = 3; 66 | event_store.client.shared.Empty count = 4; 67 | } 68 | uint32 checkpointIntervalMultiplier = 5; 69 | 70 | message Expression { 71 | string regex = 1; 72 | repeated string prefix = 2; 73 | } 74 | } 75 | message UUIDOption { 76 | oneof content { 77 | event_store.client.shared.Empty structured = 1; 78 | event_store.client.shared.Empty string = 2; 79 | } 80 | } 81 | } 82 | } 83 | 84 | message ReadResp { 85 | oneof content { 86 | ReadEvent event = 1; 87 | SubscriptionConfirmation confirmation = 2; 88 | Checkpoint checkpoint = 3; 89 | StreamNotFound stream_not_found = 4; 90 | } 91 | 92 | message ReadEvent { 93 | RecordedEvent event = 1; 94 | RecordedEvent link = 2; 95 | oneof position { 96 | uint64 commit_position = 3; 97 | event_store.client.shared.Empty no_position = 4; 98 | } 99 | 100 | message RecordedEvent { 101 | event_store.client.shared.UUID id = 1; 102 | event_store.client.shared.StreamIdentifier stream_identifier = 2; 103 | uint64 stream_revision = 3; 104 | uint64 prepare_position = 4; 105 | uint64 commit_position = 5; 106 | map metadata = 6; 107 | bytes custom_metadata = 7; 108 | bytes data = 8; 109 | } 110 | } 111 | message SubscriptionConfirmation { 112 | string subscription_id = 1; 113 | } 114 | message Checkpoint { 115 | uint64 commit_position = 1; 116 | uint64 prepare_position = 2; 117 | } 118 | message StreamNotFound { 119 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 120 | } 121 | } 122 | 123 | message AppendReq { 124 | oneof content { 125 | Options options = 1; 126 | ProposedMessage proposed_message = 2; 127 | } 128 | 129 | message Options { 130 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 131 | oneof expected_stream_revision { 132 | uint64 revision = 2; 133 | event_store.client.shared.Empty no_stream = 3; 134 | event_store.client.shared.Empty any = 4; 135 | event_store.client.shared.Empty stream_exists = 5; 136 | } 137 | } 138 | message ProposedMessage { 139 | event_store.client.shared.UUID id = 1; 140 | map metadata = 2; 141 | bytes custom_metadata = 3; 142 | bytes data = 4; 143 | } 144 | } 145 | 146 | message AppendResp { 147 | oneof result { 148 | Success success = 1; 149 | WrongExpectedVersion wrong_expected_version = 2; 150 | } 151 | 152 | message Position { 153 | uint64 commit_position = 1; 154 | uint64 prepare_position = 2; 155 | } 156 | 157 | message Success { 158 | oneof current_revision_option { 159 | uint64 current_revision = 1; 160 | event_store.client.shared.Empty no_stream = 2; 161 | } 162 | oneof position_option { 163 | Position position = 3; 164 | event_store.client.shared.Empty no_position = 4; 165 | } 166 | } 167 | 168 | message WrongExpectedVersion { 169 | oneof current_revision_option { 170 | uint64 current_revision = 1; 171 | event_store.client.shared.Empty no_stream = 2; 172 | } 173 | oneof expected_revision_option { 174 | uint64 expected_revision = 3; 175 | event_store.client.shared.Empty any = 4; 176 | event_store.client.shared.Empty stream_exists = 5; 177 | } 178 | } 179 | } 180 | 181 | message DeleteReq { 182 | Options options = 1; 183 | 184 | message Options { 185 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 186 | oneof expected_stream_revision { 187 | uint64 revision = 2; 188 | event_store.client.shared.Empty no_stream = 3; 189 | event_store.client.shared.Empty any = 4; 190 | event_store.client.shared.Empty stream_exists = 5; 191 | } 192 | } 193 | } 194 | 195 | message DeleteResp { 196 | oneof position_option { 197 | Position position = 1; 198 | event_store.client.shared.Empty no_position = 2; 199 | } 200 | 201 | message Position { 202 | uint64 commit_position = 1; 203 | uint64 prepare_position = 2; 204 | } 205 | } 206 | 207 | message TombstoneReq { 208 | Options options = 1; 209 | 210 | message Options { 211 | event_store.client.shared.StreamIdentifier stream_identifier = 1; 212 | oneof expected_stream_revision { 213 | uint64 revision = 2; 214 | event_store.client.shared.Empty no_stream = 3; 215 | event_store.client.shared.Empty any = 4; 216 | event_store.client.shared.Empty stream_exists = 5; 217 | } 218 | } 219 | } 220 | 221 | message TombstoneResp { 222 | oneof position_option { 223 | Position position = 1; 224 | event_store.client.shared.Empty no_position = 2; 225 | } 226 | 227 | message Position { 228 | uint64 commit_position = 1; 229 | uint64 prepare_position = 2; 230 | } 231 | } -------------------------------------------------------------------------------- /protos/es6/users.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package event_store.client.users; 3 | option java_package = "com.eventstore.dbclient.proto.users"; 4 | 5 | service Users { 6 | rpc Create (CreateReq) returns (CreateResp); 7 | rpc Update (UpdateReq) returns (UpdateResp); 8 | rpc Delete (DeleteReq) returns (DeleteResp); 9 | rpc Disable (DisableReq) returns (DisableResp); 10 | rpc Enable (EnableReq) returns (EnableResp); 11 | rpc Details (DetailsReq) returns (stream DetailsResp); 12 | rpc ChangePassword (ChangePasswordReq) returns (ChangePasswordResp); 13 | rpc ResetPassword (ResetPasswordReq) returns (ResetPasswordResp); 14 | } 15 | 16 | message CreateReq { 17 | Options options = 1; 18 | message Options { 19 | string login_name = 1; 20 | string password = 2; 21 | string full_name = 3; 22 | repeated string groups = 4; 23 | } 24 | } 25 | 26 | message CreateResp { 27 | 28 | } 29 | 30 | message UpdateReq { 31 | Options options = 1; 32 | message Options { 33 | string login_name = 1; 34 | string password = 2; 35 | string full_name = 3; 36 | repeated string groups = 4; 37 | } 38 | } 39 | 40 | message UpdateResp { 41 | 42 | } 43 | 44 | message DeleteReq { 45 | Options options = 1; 46 | message Options { 47 | string login_name = 1; 48 | } 49 | } 50 | 51 | message DeleteResp { 52 | 53 | } 54 | 55 | message EnableReq { 56 | Options options = 1; 57 | message Options { 58 | string login_name = 1; 59 | } 60 | } 61 | 62 | message EnableResp { 63 | 64 | } 65 | 66 | message DisableReq { 67 | Options options = 1; 68 | message Options { 69 | string login_name = 1; 70 | } 71 | } 72 | 73 | message DisableResp { 74 | } 75 | 76 | message DetailsReq { 77 | Options options = 1; 78 | message Options { 79 | string login_name = 1; 80 | } 81 | } 82 | 83 | message DetailsResp { 84 | UserDetails user_details = 1; 85 | message UserDetails { 86 | string login_name = 1; 87 | string full_name = 2; 88 | repeated string groups = 3; 89 | DateTime last_updated = 4; 90 | bool disabled = 5; 91 | 92 | message DateTime { 93 | int64 ticks_since_epoch = 1; 94 | } 95 | } 96 | } 97 | 98 | message ChangePasswordReq { 99 | Options options = 1; 100 | message Options { 101 | string login_name = 1; 102 | string current_password = 2; 103 | string new_password = 3; 104 | } 105 | } 106 | 107 | message ChangePasswordResp { 108 | } 109 | 110 | message ResetPasswordReq { 111 | Options options = 1; 112 | message Options { 113 | string login_name = 1; 114 | string new_password = 2; 115 | } 116 | } 117 | 118 | message ResetPasswordResp { 119 | } -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/any.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option go_package = "github.com/golang/protobuf/ptypes/any"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "AnyProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | 42 | // `Any` contains an arbitrary serialized protocol buffer message along with a 43 | // URL that describes the type of the serialized message. 44 | // 45 | // Protobuf library provides support to pack/unpack Any values in the form 46 | // of utility functions or additional generated methods of the Any type. 47 | // 48 | // Example 1: Pack and unpack a message in C++. 49 | // 50 | // Foo foo = ...; 51 | // Any any; 52 | // any.PackFrom(foo); 53 | // ... 54 | // if (any.UnpackTo(&foo)) { 55 | // ... 56 | // } 57 | // 58 | // Example 2: Pack and unpack a message in Java. 59 | // 60 | // Foo foo = ...; 61 | // Any any = Any.pack(foo); 62 | // ... 63 | // if (any.is(Foo.class)) { 64 | // foo = any.unpack(Foo.class); 65 | // } 66 | // 67 | // Example 3: Pack and unpack a message in Python. 68 | // 69 | // foo = Foo(...) 70 | // any = Any() 71 | // any.Pack(foo) 72 | // ... 73 | // if any.Is(Foo.DESCRIPTOR): 74 | // any.Unpack(foo) 75 | // ... 76 | // 77 | // The pack methods provided by protobuf library will by default use 78 | // 'type.googleapis.com/full.type.name' as the type URL and the unpack 79 | // methods only use the fully qualified type name after the last '/' 80 | // in the type URL, for example "foo.bar.com/x/y.z" will yield type 81 | // name "y.z". 82 | // 83 | // 84 | // JSON 85 | // ==== 86 | // The JSON representation of an `Any` value uses the regular 87 | // representation of the deserialized, embedded message, with an 88 | // additional field `@type` which contains the type URL. Example: 89 | // 90 | // package google.profile; 91 | // message Person { 92 | // string first_name = 1; 93 | // string last_name = 2; 94 | // } 95 | // 96 | // { 97 | // "@type": "type.googleapis.com/google.profile.Person", 98 | // "firstName": , 99 | // "lastName": 100 | // } 101 | // 102 | // If the embedded message type is well-known and has a custom JSON 103 | // representation, that representation will be embedded adding a field 104 | // `value` which holds the custom JSON in addition to the `@type` 105 | // field. Example (for message [google.protobuf.Duration][]): 106 | // 107 | // { 108 | // "@type": "type.googleapis.com/google.protobuf.Duration", 109 | // "value": "1.212s" 110 | // } 111 | // 112 | message Any { 113 | // A URL/resource name whose content describes the type of the 114 | // serialized protocol buffer message. 115 | // 116 | // For URLs which use the scheme `http`, `https`, or no scheme, the 117 | // following restrictions and interpretations apply: 118 | // 119 | // * If no scheme is provided, `https` is assumed. 120 | // * The last segment of the URL's path must represent the fully 121 | // qualified name of the type (as in `path/google.protobuf.Duration`). 122 | // The name should be in a canonical form (e.g., leading "." is 123 | // not accepted). 124 | // * An HTTP GET on the URL must yield a [google.protobuf.Type][] 125 | // value in binary format, or produce an error. 126 | // * Applications are allowed to cache lookup results based on the 127 | // URL, or have them precompiled into a binary to avoid any 128 | // lookup. Therefore, binary compatibility needs to be preserved 129 | // on changes to types. (Use versioned type names to manage 130 | // breaking changes.) 131 | // 132 | // Schemes other than `http`, `https` (or the empty scheme) might be 133 | // used with implementation specific semantics. 134 | // 135 | string type_url = 1; 136 | 137 | // Must be a valid serialized protocol buffer of the above specified type. 138 | bytes value = 2; 139 | } 140 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/api.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | import "google/protobuf/source_context.proto"; 36 | import "google/protobuf/type.proto"; 37 | 38 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 39 | option java_package = "com.google.protobuf"; 40 | option java_outer_classname = "ApiProto"; 41 | option java_multiple_files = true; 42 | option objc_class_prefix = "GPB"; 43 | 44 | // Api is a light-weight descriptor for a protocol buffer service. 45 | message Api { 46 | 47 | // The fully qualified name of this api, including package name 48 | // followed by the api's simple name. 49 | string name = 1; 50 | 51 | // The methods of this api, in unspecified order. 52 | repeated Method methods = 2; 53 | 54 | // Any metadata attached to the API. 55 | repeated Option options = 3; 56 | 57 | // A version string for this api. If specified, must have the form 58 | // `major-version.minor-version`, as in `1.10`. If the minor version 59 | // is omitted, it defaults to zero. If the entire version field is 60 | // empty, the major version is derived from the package name, as 61 | // outlined below. If the field is not empty, the version in the 62 | // package name will be verified to be consistent with what is 63 | // provided here. 64 | // 65 | // The versioning schema uses [semantic 66 | // versioning](http://semver.org) where the major version number 67 | // indicates a breaking change and the minor version an additive, 68 | // non-breaking change. Both version numbers are signals to users 69 | // what to expect from different versions, and should be carefully 70 | // chosen based on the product plan. 71 | // 72 | // The major version is also reflected in the package name of the 73 | // API, which must end in `v`, as in 74 | // `google.feature.v1`. For major versions 0 and 1, the suffix can 75 | // be omitted. Zero major versions must only be used for 76 | // experimental, none-GA apis. 77 | // 78 | // 79 | string version = 4; 80 | 81 | // Source context for the protocol buffer service represented by this 82 | // message. 83 | SourceContext source_context = 5; 84 | 85 | // Included APIs. See [Mixin][]. 86 | repeated Mixin mixins = 6; 87 | 88 | // The source syntax of the service. 89 | Syntax syntax = 7; 90 | } 91 | 92 | // Method represents a method of an api. 93 | message Method { 94 | 95 | // The simple name of this method. 96 | string name = 1; 97 | 98 | // A URL of the input message type. 99 | string request_type_url = 2; 100 | 101 | // If true, the request is streamed. 102 | bool request_streaming = 3; 103 | 104 | // The URL of the output message type. 105 | string response_type_url = 4; 106 | 107 | // If true, the response is streamed. 108 | bool response_streaming = 5; 109 | 110 | // Any metadata attached to the method. 111 | repeated Option options = 6; 112 | 113 | // The source syntax of this method. 114 | Syntax syntax = 7; 115 | } 116 | 117 | // Declares an API to be included in this API. The including API must 118 | // redeclare all the methods from the included API, but documentation 119 | // and options are inherited as follows: 120 | // 121 | // - If after comment and whitespace stripping, the documentation 122 | // string of the redeclared method is empty, it will be inherited 123 | // from the original method. 124 | // 125 | // - Each annotation belonging to the service config (http, 126 | // visibility) which is not set in the redeclared method will be 127 | // inherited. 128 | // 129 | // - If an http annotation is inherited, the path pattern will be 130 | // modified as follows. Any version prefix will be replaced by the 131 | // version of the including API plus the [root][] path if specified. 132 | // 133 | // Example of a simple mixin: 134 | // 135 | // package google.acl.v1; 136 | // service AccessControl { 137 | // // Get the underlying ACL object. 138 | // rpc GetAcl(GetAclRequest) returns (Acl) { 139 | // option (google.api.http).get = "/v1/{resource=**}:getAcl"; 140 | // } 141 | // } 142 | // 143 | // package google.storage.v2; 144 | // service Storage { 145 | // rpc GetAcl(GetAclRequest) returns (Acl); 146 | // 147 | // // Get a data record. 148 | // rpc GetData(GetDataRequest) returns (Data) { 149 | // option (google.api.http).get = "/v2/{resource=**}"; 150 | // } 151 | // } 152 | // 153 | // Example of a mixin configuration: 154 | // 155 | // apis: 156 | // - name: google.storage.v2.Storage 157 | // mixins: 158 | // - name: google.acl.v1.AccessControl 159 | // 160 | // The mixin construct implies that all methods in `AccessControl` are 161 | // also declared with same name and request/response types in 162 | // `Storage`. A documentation generator or annotation processor will 163 | // see the effective `Storage.GetAcl` method after inherting 164 | // documentation and annotations as follows: 165 | // 166 | // service Storage { 167 | // // Get the underlying ACL object. 168 | // rpc GetAcl(GetAclRequest) returns (Acl) { 169 | // option (google.api.http).get = "/v2/{resource=**}:getAcl"; 170 | // } 171 | // ... 172 | // } 173 | // 174 | // Note how the version in the path pattern changed from `v1` to `v2`. 175 | // 176 | // If the `root` field in the mixin is specified, it should be a 177 | // relative path under which inherited HTTP paths are placed. Example: 178 | // 179 | // apis: 180 | // - name: google.storage.v2.Storage 181 | // mixins: 182 | // - name: google.acl.v1.AccessControl 183 | // root: acls 184 | // 185 | // This implies the following inherited HTTP annotation: 186 | // 187 | // service Storage { 188 | // // Get the underlying ACL object. 189 | // rpc GetAcl(GetAclRequest) returns (Acl) { 190 | // option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; 191 | // } 192 | // ... 193 | // } 194 | message Mixin { 195 | // The fully qualified name of the API which is included. 196 | string name = 1; 197 | 198 | // If non-empty specifies a path under which inherited HTTP paths 199 | // are rooted. 200 | string root = 2; 201 | } 202 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/compiler/plugin.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | // Author: kenton@google.com (Kenton Varda) 32 | // 33 | // WARNING: The plugin interface is currently EXPERIMENTAL and is subject to 34 | // change. 35 | // 36 | // protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is 37 | // just a program that reads a CodeGeneratorRequest from stdin and writes a 38 | // CodeGeneratorResponse to stdout. 39 | // 40 | // Plugins written using C++ can use google/protobuf/compiler/plugin.h instead 41 | // of dealing with the raw protocol defined here. 42 | // 43 | // A plugin executable needs only to be placed somewhere in the path. The 44 | // plugin should be named "protoc-gen-$NAME", and will then be used when the 45 | // flag "--${NAME}_out" is passed to protoc. 46 | 47 | syntax = "proto2"; 48 | package google.protobuf.compiler; 49 | option java_package = "com.google.protobuf.compiler"; 50 | option java_outer_classname = "PluginProtos"; 51 | 52 | option go_package = "plugin_go"; 53 | 54 | import "google/protobuf/descriptor.proto"; 55 | 56 | // An encoded CodeGeneratorRequest is written to the plugin's stdin. 57 | message CodeGeneratorRequest { 58 | // The .proto files that were explicitly listed on the command-line. The 59 | // code generator should generate code only for these files. Each file's 60 | // descriptor will be included in proto_file, below. 61 | repeated string file_to_generate = 1; 62 | 63 | // The generator parameter passed on the command-line. 64 | optional string parameter = 2; 65 | 66 | // FileDescriptorProtos for all files in files_to_generate and everything 67 | // they import. The files will appear in topological order, so each file 68 | // appears before any file that imports it. 69 | // 70 | // protoc guarantees that all proto_files will be written after 71 | // the fields above, even though this is not technically guaranteed by the 72 | // protobuf wire format. This theoretically could allow a plugin to stream 73 | // in the FileDescriptorProtos and handle them one by one rather than read 74 | // the entire set into memory at once. However, as of this writing, this 75 | // is not similarly optimized on protoc's end -- it will store all fields in 76 | // memory at once before sending them to the plugin. 77 | repeated FileDescriptorProto proto_file = 15; 78 | } 79 | 80 | // The plugin writes an encoded CodeGeneratorResponse to stdout. 81 | message CodeGeneratorResponse { 82 | // Error message. If non-empty, code generation failed. The plugin process 83 | // should exit with status code zero even if it reports an error in this way. 84 | // 85 | // This should be used to indicate errors in .proto files which prevent the 86 | // code generator from generating correct code. Errors which indicate a 87 | // problem in protoc itself -- such as the input CodeGeneratorRequest being 88 | // unparseable -- should be reported by writing a message to stderr and 89 | // exiting with a non-zero status code. 90 | optional string error = 1; 91 | 92 | // Represents a single generated file. 93 | message File { 94 | // The file name, relative to the output directory. The name must not 95 | // contain "." or ".." components and must be relative, not be absolute (so, 96 | // the file cannot lie outside the output directory). "/" must be used as 97 | // the path separator, not "\". 98 | // 99 | // If the name is omitted, the content will be appended to the previous 100 | // file. This allows the generator to break large files into small chunks, 101 | // and allows the generated text to be streamed back to protoc so that large 102 | // files need not reside completely in memory at one time. Note that as of 103 | // this writing protoc does not optimize for this -- it will read the entire 104 | // CodeGeneratorResponse before writing files to disk. 105 | optional string name = 1; 106 | 107 | // If non-empty, indicates that the named file should already exist, and the 108 | // content here is to be inserted into that file at a defined insertion 109 | // point. This feature allows a code generator to extend the output 110 | // produced by another code generator. The original generator may provide 111 | // insertion points by placing special annotations in the file that look 112 | // like: 113 | // @@protoc_insertion_point(NAME) 114 | // The annotation can have arbitrary text before and after it on the line, 115 | // which allows it to be placed in a comment. NAME should be replaced with 116 | // an identifier naming the point -- this is what other generators will use 117 | // as the insertion_point. Code inserted at this point will be placed 118 | // immediately above the line containing the insertion point (thus multiple 119 | // insertions to the same point will come out in the order they were added). 120 | // The double-@ is intended to make it unlikely that the generated code 121 | // could contain things that look like insertion points by accident. 122 | // 123 | // For example, the C++ code generator places the following line in the 124 | // .pb.h files that it generates: 125 | // // @@protoc_insertion_point(namespace_scope) 126 | // This line appears within the scope of the file's package namespace, but 127 | // outside of any particular class. Another plugin can then specify the 128 | // insertion_point "namespace_scope" to generate additional classes or 129 | // other declarations that should be placed in this scope. 130 | // 131 | // Note that if the line containing the insertion point begins with 132 | // whitespace, the same whitespace will be added to every line of the 133 | // inserted text. This is useful for languages like Python, where 134 | // indentation matters. In these languages, the insertion point comment 135 | // should be indented the same amount as any inserted code will need to be 136 | // in order to work correctly in that context. 137 | // 138 | // The code generator that generates the initial file and the one which 139 | // inserts into it must both run as part of a single invocation of protoc. 140 | // Code generators are executed in the order in which they appear on the 141 | // command line. 142 | // 143 | // If |insertion_point| is present, |name| must also be present. 144 | optional string insertion_point = 2; 145 | 146 | // The file contents. 147 | optional string content = 15; 148 | } 149 | repeated File file = 15; 150 | } 151 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/duration.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option cc_enable_arenas = true; 37 | option go_package = "github.com/golang/protobuf/ptypes/duration"; 38 | option java_package = "com.google.protobuf"; 39 | option java_outer_classname = "DurationProto"; 40 | option java_multiple_files = true; 41 | option objc_class_prefix = "GPB"; 42 | 43 | // A Duration represents a signed, fixed-length span of time represented 44 | // as a count of seconds and fractions of seconds at nanosecond 45 | // resolution. It is independent of any calendar and concepts like "day" 46 | // or "month". It is related to Timestamp in that the difference between 47 | // two Timestamp values is a Duration and it can be added or subtracted 48 | // from a Timestamp. Range is approximately +-10,000 years. 49 | // 50 | // Example 1: Compute Duration from two Timestamps in pseudo code. 51 | // 52 | // Timestamp start = ...; 53 | // Timestamp end = ...; 54 | // Duration duration = ...; 55 | // 56 | // duration.seconds = end.seconds - start.seconds; 57 | // duration.nanos = end.nanos - start.nanos; 58 | // 59 | // if (duration.seconds < 0 && duration.nanos > 0) { 60 | // duration.seconds += 1; 61 | // duration.nanos -= 1000000000; 62 | // } else if (durations.seconds > 0 && duration.nanos < 0) { 63 | // duration.seconds -= 1; 64 | // duration.nanos += 1000000000; 65 | // } 66 | // 67 | // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. 68 | // 69 | // Timestamp start = ...; 70 | // Duration duration = ...; 71 | // Timestamp end = ...; 72 | // 73 | // end.seconds = start.seconds + duration.seconds; 74 | // end.nanos = start.nanos + duration.nanos; 75 | // 76 | // if (end.nanos < 0) { 77 | // end.seconds -= 1; 78 | // end.nanos += 1000000000; 79 | // } else if (end.nanos >= 1000000000) { 80 | // end.seconds += 1; 81 | // end.nanos -= 1000000000; 82 | // } 83 | // 84 | // Example 3: Compute Duration from datetime.timedelta in Python. 85 | // 86 | // td = datetime.timedelta(days=3, minutes=10) 87 | // duration = Duration() 88 | // duration.FromTimedelta(td) 89 | // 90 | // 91 | message Duration { 92 | 93 | // Signed seconds of the span of time. Must be from -315,576,000,000 94 | // to +315,576,000,000 inclusive. 95 | int64 seconds = 1; 96 | 97 | // Signed fractions of a second at nanosecond resolution of the span 98 | // of time. Durations less than one second are represented with a 0 99 | // `seconds` field and a positive or negative `nanos` field. For durations 100 | // of one second or more, a non-zero value for the `nanos` field must be 101 | // of the same sign as the `seconds` field. Must be from -999,999,999 102 | // to +999,999,999 inclusive. 103 | int32 nanos = 2; 104 | } 105 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/empty.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option go_package = "github.com/golang/protobuf/ptypes/empty"; 37 | option java_package = "com.google.protobuf"; 38 | option java_outer_classname = "EmptyProto"; 39 | option java_multiple_files = true; 40 | option objc_class_prefix = "GPB"; 41 | option cc_enable_arenas = true; 42 | 43 | // A generic empty message that you can re-use to avoid defining duplicated 44 | // empty messages in your APIs. A typical example is to use it as the request 45 | // or the response type of an API method. For instance: 46 | // 47 | // service Foo { 48 | // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 49 | // } 50 | // 51 | // The JSON representation for `Empty` is empty JSON object `{}`. 52 | message Empty {} 53 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/field_mask.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option java_package = "com.google.protobuf"; 37 | option java_outer_classname = "FieldMaskProto"; 38 | option java_multiple_files = true; 39 | option objc_class_prefix = "GPB"; 40 | 41 | // `FieldMask` represents a set of symbolic field paths, for example: 42 | // 43 | // paths: "f.a" 44 | // paths: "f.b.d" 45 | // 46 | // Here `f` represents a field in some root message, `a` and `b` 47 | // fields in the message found in `f`, and `d` a field found in the 48 | // message in `f.b`. 49 | // 50 | // Field masks are used to specify a subset of fields that should be 51 | // returned by a get operation or modified by an update operation. 52 | // Field masks also have a custom JSON encoding (see below). 53 | // 54 | // # Field Masks in Projections 55 | // 56 | // When used in the context of a projection, a response message or 57 | // sub-message is filtered by the API to only contain those fields as 58 | // specified in the mask. For example, if the mask in the previous 59 | // example is applied to a response message as follows: 60 | // 61 | // f { 62 | // a : 22 63 | // b { 64 | // d : 1 65 | // x : 2 66 | // } 67 | // y : 13 68 | // } 69 | // z: 8 70 | // 71 | // The result will not contain specific values for fields x,y and z 72 | // (their value will be set to the default, and omitted in proto text 73 | // output): 74 | // 75 | // 76 | // f { 77 | // a : 22 78 | // b { 79 | // d : 1 80 | // } 81 | // } 82 | // 83 | // A repeated field is not allowed except at the last position of a 84 | // paths string. 85 | // 86 | // If a FieldMask object is not present in a get operation, the 87 | // operation applies to all fields (as if a FieldMask of all fields 88 | // had been specified). 89 | // 90 | // Note that a field mask does not necessarily apply to the 91 | // top-level response message. In case of a REST get operation, the 92 | // field mask applies directly to the response, but in case of a REST 93 | // list operation, the mask instead applies to each individual message 94 | // in the returned resource list. In case of a REST custom method, 95 | // other definitions may be used. Where the mask applies will be 96 | // clearly documented together with its declaration in the API. In 97 | // any case, the effect on the returned resource/resources is required 98 | // behavior for APIs. 99 | // 100 | // # Field Masks in Update Operations 101 | // 102 | // A field mask in update operations specifies which fields of the 103 | // targeted resource are going to be updated. The API is required 104 | // to only change the values of the fields as specified in the mask 105 | // and leave the others untouched. If a resource is passed in to 106 | // describe the updated values, the API ignores the values of all 107 | // fields not covered by the mask. 108 | // 109 | // If a repeated field is specified for an update operation, the existing 110 | // repeated values in the target resource will be overwritten by the new values. 111 | // Note that a repeated field is only allowed in the last position of a `paths` 112 | // string. 113 | // 114 | // If a sub-message is specified in the last position of the field mask for an 115 | // update operation, then the existing sub-message in the target resource is 116 | // overwritten. Given the target message: 117 | // 118 | // f { 119 | // b { 120 | // d : 1 121 | // x : 2 122 | // } 123 | // c : 1 124 | // } 125 | // 126 | // And an update message: 127 | // 128 | // f { 129 | // b { 130 | // d : 10 131 | // } 132 | // } 133 | // 134 | // then if the field mask is: 135 | // 136 | // paths: "f.b" 137 | // 138 | // then the result will be: 139 | // 140 | // f { 141 | // b { 142 | // d : 10 143 | // } 144 | // c : 1 145 | // } 146 | // 147 | // However, if the update mask was: 148 | // 149 | // paths: "f.b.d" 150 | // 151 | // then the result would be: 152 | // 153 | // f { 154 | // b { 155 | // d : 10 156 | // x : 2 157 | // } 158 | // c : 1 159 | // } 160 | // 161 | // In order to reset a field's value to the default, the field must 162 | // be in the mask and set to the default value in the provided resource. 163 | // Hence, in order to reset all fields of a resource, provide a default 164 | // instance of the resource and set all fields in the mask, or do 165 | // not provide a mask as described below. 166 | // 167 | // If a field mask is not present on update, the operation applies to 168 | // all fields (as if a field mask of all fields has been specified). 169 | // Note that in the presence of schema evolution, this may mean that 170 | // fields the client does not know and has therefore not filled into 171 | // the request will be reset to their default. If this is unwanted 172 | // behavior, a specific service may require a client to always specify 173 | // a field mask, producing an error if not. 174 | // 175 | // As with get operations, the location of the resource which 176 | // describes the updated values in the request message depends on the 177 | // operation kind. In any case, the effect of the field mask is 178 | // required to be honored by the API. 179 | // 180 | // ## Considerations for HTTP REST 181 | // 182 | // The HTTP kind of an update operation which uses a field mask must 183 | // be set to PATCH instead of PUT in order to satisfy HTTP semantics 184 | // (PUT must only be used for full updates). 185 | // 186 | // # JSON Encoding of Field Masks 187 | // 188 | // In JSON, a field mask is encoded as a single string where paths are 189 | // separated by a comma. Fields name in each path are converted 190 | // to/from lower-camel naming conventions. 191 | // 192 | // As an example, consider the following message declarations: 193 | // 194 | // message Profile { 195 | // User user = 1; 196 | // Photo photo = 2; 197 | // } 198 | // message User { 199 | // string display_name = 1; 200 | // string address = 2; 201 | // } 202 | // 203 | // In proto a field mask for `Profile` may look as such: 204 | // 205 | // mask { 206 | // paths: "user.display_name" 207 | // paths: "photo" 208 | // } 209 | // 210 | // In JSON, the same mask is represented as below: 211 | // 212 | // { 213 | // mask: "user.displayName,photo" 214 | // } 215 | // 216 | // # Field Masks and Oneof Fields 217 | // 218 | // Field masks treat fields in oneofs just as regular fields. Consider the 219 | // following message: 220 | // 221 | // message SampleMessage { 222 | // oneof test_oneof { 223 | // string name = 4; 224 | // SubMessage sub_message = 9; 225 | // } 226 | // } 227 | // 228 | // The field mask can be: 229 | // 230 | // mask { 231 | // paths: "name" 232 | // } 233 | // 234 | // Or: 235 | // 236 | // mask { 237 | // paths: "sub_message" 238 | // } 239 | // 240 | // Note that oneof type names ("test_oneof" in this case) cannot be used in 241 | // paths. 242 | message FieldMask { 243 | // The set of field mask paths. 244 | repeated string paths = 1; 245 | } 246 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/source_context.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option java_package = "com.google.protobuf"; 37 | option java_outer_classname = "SourceContextProto"; 38 | option java_multiple_files = true; 39 | option objc_class_prefix = "GPB"; 40 | 41 | // `SourceContext` represents information about the source of a 42 | // protobuf element, like the file in which it is defined. 43 | message SourceContext { 44 | // The path-qualified name of the .proto file that contained the associated 45 | // protobuf element. For example: `"google/protobuf/source_context.proto"`. 46 | string file_name = 1; 47 | } 48 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/struct.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option cc_enable_arenas = true; 37 | option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; 38 | option java_package = "com.google.protobuf"; 39 | option java_outer_classname = "StructProto"; 40 | option java_multiple_files = true; 41 | option objc_class_prefix = "GPB"; 42 | 43 | 44 | // `Struct` represents a structured data value, consisting of fields 45 | // which map to dynamically typed values. In some languages, `Struct` 46 | // might be supported by a native representation. For example, in 47 | // scripting languages like JS a struct is represented as an 48 | // object. The details of that representation are described together 49 | // with the proto support for the language. 50 | // 51 | // The JSON representation for `Struct` is JSON object. 52 | message Struct { 53 | // Unordered map of dynamically typed values. 54 | map fields = 1; 55 | } 56 | 57 | // `Value` represents a dynamically typed value which can be either 58 | // null, a number, a string, a boolean, a recursive struct value, or a 59 | // list of values. A producer of value is expected to set one of that 60 | // variants, absence of any variant indicates an error. 61 | // 62 | // The JSON representation for `Value` is JSON value. 63 | message Value { 64 | // The kind of value. 65 | oneof kind { 66 | // Represents a null value. 67 | NullValue null_value = 1; 68 | // Represents a double value. 69 | double number_value = 2; 70 | // Represents a string value. 71 | string string_value = 3; 72 | // Represents a boolean value. 73 | bool bool_value = 4; 74 | // Represents a structured value. 75 | Struct struct_value = 5; 76 | // Represents a repeated `Value`. 77 | ListValue list_value = 6; 78 | } 79 | } 80 | 81 | // `NullValue` is a singleton enumeration to represent the null value for the 82 | // `Value` type union. 83 | // 84 | // The JSON representation for `NullValue` is JSON `null`. 85 | enum NullValue { 86 | // Null value. 87 | NULL_VALUE = 0; 88 | } 89 | 90 | // `ListValue` is a wrapper around a repeated field of values. 91 | // 92 | // The JSON representation for `ListValue` is JSON array. 93 | message ListValue { 94 | // Repeated field of dynamically typed values. 95 | repeated Value values = 1; 96 | } 97 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/timestamp.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 36 | option cc_enable_arenas = true; 37 | option go_package = "github.com/golang/protobuf/ptypes/timestamp"; 38 | option java_package = "com.google.protobuf"; 39 | option java_outer_classname = "TimestampProto"; 40 | option java_multiple_files = true; 41 | option objc_class_prefix = "GPB"; 42 | 43 | // A Timestamp represents a point in time independent of any time zone 44 | // or calendar, represented as seconds and fractions of seconds at 45 | // nanosecond resolution in UTC Epoch time. It is encoded using the 46 | // Proleptic Gregorian Calendar which extends the Gregorian calendar 47 | // backwards to year one. It is encoded assuming all minutes are 60 48 | // seconds long, i.e. leap seconds are "smeared" so that no leap second 49 | // table is needed for interpretation. Range is from 50 | // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. 51 | // By restricting to that range, we ensure that we can convert to 52 | // and from RFC 3339 date strings. 53 | // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). 54 | // 55 | // Example 1: Compute Timestamp from POSIX `time()`. 56 | // 57 | // Timestamp timestamp; 58 | // timestamp.set_seconds(time(NULL)); 59 | // timestamp.set_nanos(0); 60 | // 61 | // Example 2: Compute Timestamp from POSIX `gettimeofday()`. 62 | // 63 | // struct timeval tv; 64 | // gettimeofday(&tv, NULL); 65 | // 66 | // Timestamp timestamp; 67 | // timestamp.set_seconds(tv.tv_sec); 68 | // timestamp.set_nanos(tv.tv_usec * 1000); 69 | // 70 | // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. 71 | // 72 | // FILETIME ft; 73 | // GetSystemTimeAsFileTime(&ft); 74 | // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; 75 | // 76 | // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z 77 | // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. 78 | // Timestamp timestamp; 79 | // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); 80 | // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); 81 | // 82 | // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. 83 | // 84 | // long millis = System.currentTimeMillis(); 85 | // 86 | // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) 87 | // .setNanos((int) ((millis % 1000) * 1000000)).build(); 88 | // 89 | // 90 | // Example 5: Compute Timestamp from current time in Python. 91 | // 92 | // timestamp = Timestamp() 93 | // timestamp.GetCurrentTime() 94 | // 95 | // 96 | message Timestamp { 97 | 98 | // Represents seconds of UTC time since Unix epoch 99 | // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 100 | // 9999-12-31T23:59:59Z inclusive. 101 | int64 seconds = 1; 102 | 103 | // Non-negative fractions of a second at nanosecond resolution. Negative 104 | // second values with fractions must still have non-negative nanos values 105 | // that count forward in time. Must be from 0 to 999,999,999 106 | // inclusive. 107 | int32 nanos = 2; 108 | } 109 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/type.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | syntax = "proto3"; 32 | 33 | package google.protobuf; 34 | 35 | import "google/protobuf/any.proto"; 36 | import "google/protobuf/source_context.proto"; 37 | 38 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 39 | option cc_enable_arenas = true; 40 | option java_package = "com.google.protobuf"; 41 | option java_outer_classname = "TypeProto"; 42 | option java_multiple_files = true; 43 | option objc_class_prefix = "GPB"; 44 | 45 | // A protocol buffer message type. 46 | message Type { 47 | // The fully qualified message name. 48 | string name = 1; 49 | // The list of fields. 50 | repeated Field fields = 2; 51 | // The list of types appearing in `oneof` definitions in this type. 52 | repeated string oneofs = 3; 53 | // The protocol buffer options. 54 | repeated Option options = 4; 55 | // The source context. 56 | SourceContext source_context = 5; 57 | // The source syntax. 58 | Syntax syntax = 6; 59 | } 60 | 61 | // A single field of a message type. 62 | message Field { 63 | // Basic field types. 64 | enum Kind { 65 | // Field type unknown. 66 | TYPE_UNKNOWN = 0; 67 | // Field type double. 68 | TYPE_DOUBLE = 1; 69 | // Field type float. 70 | TYPE_FLOAT = 2; 71 | // Field type int64. 72 | TYPE_INT64 = 3; 73 | // Field type uint64. 74 | TYPE_UINT64 = 4; 75 | // Field type int32. 76 | TYPE_INT32 = 5; 77 | // Field type fixed64. 78 | TYPE_FIXED64 = 6; 79 | // Field type fixed32. 80 | TYPE_FIXED32 = 7; 81 | // Field type bool. 82 | TYPE_BOOL = 8; 83 | // Field type string. 84 | TYPE_STRING = 9; 85 | // Field type group. Proto2 syntax only, and deprecated. 86 | TYPE_GROUP = 10; 87 | // Field type message. 88 | TYPE_MESSAGE = 11; 89 | // Field type bytes. 90 | TYPE_BYTES = 12; 91 | // Field type uint32. 92 | TYPE_UINT32 = 13; 93 | // Field type enum. 94 | TYPE_ENUM = 14; 95 | // Field type sfixed32. 96 | TYPE_SFIXED32 = 15; 97 | // Field type sfixed64. 98 | TYPE_SFIXED64 = 16; 99 | // Field type sint32. 100 | TYPE_SINT32 = 17; 101 | // Field type sint64. 102 | TYPE_SINT64 = 18; 103 | }; 104 | 105 | // Whether a field is optional, required, or repeated. 106 | enum Cardinality { 107 | // For fields with unknown cardinality. 108 | CARDINALITY_UNKNOWN = 0; 109 | // For optional fields. 110 | CARDINALITY_OPTIONAL = 1; 111 | // For required fields. Proto2 syntax only. 112 | CARDINALITY_REQUIRED = 2; 113 | // For repeated fields. 114 | CARDINALITY_REPEATED = 3; 115 | }; 116 | 117 | // The field type. 118 | Kind kind = 1; 119 | // The field cardinality. 120 | Cardinality cardinality = 2; 121 | // The field number. 122 | int32 number = 3; 123 | // The field name. 124 | string name = 4; 125 | // The field type URL, without the scheme, for message or enumeration 126 | // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. 127 | string type_url = 6; 128 | // The index of the field type in `Type.oneofs`, for message or enumeration 129 | // types. The first type has index 1; zero means the type is not in the list. 130 | int32 oneof_index = 7; 131 | // Whether to use alternative packed wire representation. 132 | bool packed = 8; 133 | // The protocol buffer options. 134 | repeated Option options = 9; 135 | // The field JSON name. 136 | string json_name = 10; 137 | // The string value of the default value of this field. Proto2 syntax only. 138 | string default_value = 11; 139 | } 140 | 141 | // Enum type definition. 142 | message Enum { 143 | // Enum type name. 144 | string name = 1; 145 | // Enum value definitions. 146 | repeated EnumValue enumvalue = 2; 147 | // Protocol buffer options. 148 | repeated Option options = 3; 149 | // The source context. 150 | SourceContext source_context = 4; 151 | // The source syntax. 152 | Syntax syntax = 5; 153 | } 154 | 155 | // Enum value definition. 156 | message EnumValue { 157 | // Enum value name. 158 | string name = 1; 159 | // Enum value number. 160 | int32 number = 2; 161 | // Protocol buffer options. 162 | repeated Option options = 3; 163 | } 164 | 165 | // A protocol buffer option, which can be attached to a message, field, 166 | // enumeration, etc. 167 | message Option { 168 | // The option's name. For example, `"java_package"`. 169 | string name = 1; 170 | // The option's value. For example, `"com.google.protobuf"`. 171 | Any value = 2; 172 | } 173 | 174 | // The syntax in which a protocol buffer element is defined. 175 | enum Syntax { 176 | // Syntax `proto2`. 177 | SYNTAX_PROTO2 = 0; 178 | // Syntax `proto3`. 179 | SYNTAX_PROTO3 = 1; 180 | } 181 | -------------------------------------------------------------------------------- /protos/tcp/google/protobuf/wrappers.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers - Google's data interchange format 2 | // Copyright 2008 Google Inc. All rights reserved. 3 | // https://developers.google.com/protocol-buffers/ 4 | // 5 | // Redistribution and use in source and binary forms, with or without 6 | // modification, are permitted provided that the following conditions are 7 | // met: 8 | // 9 | // * Redistributions of source code must retain the above copyright 10 | // notice, this list of conditions and the following disclaimer. 11 | // * Redistributions in binary form must reproduce the above 12 | // copyright notice, this list of conditions and the following disclaimer 13 | // in the documentation and/or other materials provided with the 14 | // distribution. 15 | // * Neither the name of Google Inc. nor the names of its 16 | // contributors may be used to endorse or promote products derived from 17 | // this software without specific prior written permission. 18 | // 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | // Wrappers for primitive (non-message) types. These types are useful 32 | // for embedding primitives in the `google.protobuf.Any` type and for places 33 | // where we need to distinguish between the absence of a primitive 34 | // typed field and its default value. 35 | 36 | syntax = "proto3"; 37 | 38 | package google.protobuf; 39 | 40 | option csharp_namespace = "Google.Protobuf.WellKnownTypes"; 41 | option cc_enable_arenas = true; 42 | option go_package = "github.com/golang/protobuf/ptypes/wrappers"; 43 | option java_package = "com.google.protobuf"; 44 | option java_outer_classname = "WrappersProto"; 45 | option java_multiple_files = true; 46 | option objc_class_prefix = "GPB"; 47 | 48 | // Wrapper message for `double`. 49 | // 50 | // The JSON representation for `DoubleValue` is JSON number. 51 | message DoubleValue { 52 | // The double value. 53 | double value = 1; 54 | } 55 | 56 | // Wrapper message for `float`. 57 | // 58 | // The JSON representation for `FloatValue` is JSON number. 59 | message FloatValue { 60 | // The float value. 61 | float value = 1; 62 | } 63 | 64 | // Wrapper message for `int64`. 65 | // 66 | // The JSON representation for `Int64Value` is JSON string. 67 | message Int64Value { 68 | // The int64 value. 69 | int64 value = 1; 70 | } 71 | 72 | // Wrapper message for `uint64`. 73 | // 74 | // The JSON representation for `UInt64Value` is JSON string. 75 | message UInt64Value { 76 | // The uint64 value. 77 | uint64 value = 1; 78 | } 79 | 80 | // Wrapper message for `int32`. 81 | // 82 | // The JSON representation for `Int32Value` is JSON number. 83 | message Int32Value { 84 | // The int32 value. 85 | int32 value = 1; 86 | } 87 | 88 | // Wrapper message for `uint32`. 89 | // 90 | // The JSON representation for `UInt32Value` is JSON number. 91 | message UInt32Value { 92 | // The uint32 value. 93 | uint32 value = 1; 94 | } 95 | 96 | // Wrapper message for `bool`. 97 | // 98 | // The JSON representation for `BoolValue` is JSON `true` and `false`. 99 | message BoolValue { 100 | // The bool value. 101 | bool value = 1; 102 | } 103 | 104 | // Wrapper message for `string`. 105 | // 106 | // The JSON representation for `StringValue` is JSON string. 107 | message StringValue { 108 | // The string value. 109 | string value = 1; 110 | } 111 | 112 | // Wrapper message for `bytes`. 113 | // 114 | // The JSON representation for `BytesValue` is JSON string. 115 | message BytesValue { 116 | // The bytes value. 117 | bytes value = 1; 118 | } 119 | -------------------------------------------------------------------------------- /protos/tcp/messages.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | package EventStore.Client.Messages; 4 | 5 | import "rustproto.proto"; 6 | 7 | enum OperationResult 8 | { 9 | Success = 0; 10 | PrepareTimeout = 1; 11 | CommitTimeout = 2; 12 | ForwardTimeout = 3; 13 | WrongExpectedVersion = 4; 14 | StreamDeleted = 5; 15 | InvalidTransaction = 6; 16 | AccessDenied = 7; 17 | } 18 | 19 | message NewEvent { 20 | required bytes event_id = 1; 21 | required string event_type = 2; 22 | required int32 data_content_type = 3; 23 | required int32 metadata_content_type = 4; 24 | required bytes data = 5; 25 | optional bytes metadata = 6; 26 | } 27 | 28 | message EventRecord { 29 | required string event_stream_id = 1; 30 | required int64 event_number = 2; 31 | required bytes event_id = 3; 32 | required string event_type = 4; 33 | required int32 data_content_type = 5; 34 | required int32 metadata_content_type = 6; 35 | required bytes data = 7; 36 | optional bytes metadata = 8; 37 | optional int64 created = 9; 38 | optional int64 created_epoch = 10; 39 | } 40 | 41 | message ResolvedIndexedEvent { 42 | optional EventRecord event = 1; 43 | optional EventRecord link = 2; 44 | } 45 | 46 | message ResolvedEvent { 47 | required EventRecord event = 1; 48 | optional EventRecord link = 2; 49 | required int64 commit_position = 3; 50 | required int64 prepare_position = 4; 51 | } 52 | 53 | message WriteEvents { 54 | required string event_stream_id = 1; 55 | required int64 expected_version = 2; 56 | repeated NewEvent events = 3; 57 | required bool require_master = 4; 58 | } 59 | 60 | message WriteEventsCompleted { 61 | required OperationResult result = 1; 62 | optional string message = 2; 63 | required int64 first_event_number = 3; 64 | required int64 last_event_number = 4; 65 | optional int64 prepare_position = 5; 66 | optional int64 commit_position = 6; 67 | optional int64 current_version = 7; 68 | } 69 | 70 | message DeleteStream { 71 | required string event_stream_id = 1; 72 | required int64 expected_version = 2; 73 | required bool require_master = 3; 74 | optional bool hard_delete = 4; 75 | } 76 | 77 | message DeleteStreamCompleted { 78 | required OperationResult result = 1; 79 | optional string message = 2; 80 | optional int64 prepare_position = 3; 81 | optional int64 commit_position = 4; 82 | } 83 | 84 | message TransactionStart { 85 | required string event_stream_id = 1; 86 | required int64 expected_version = 2; 87 | required bool require_master = 3; 88 | } 89 | 90 | message TransactionStartCompleted { 91 | required int64 transaction_id = 1; 92 | required OperationResult result = 2; 93 | optional string message = 3; 94 | } 95 | 96 | message TransactionWrite { 97 | required int64 transaction_id = 1; 98 | repeated NewEvent events = 2; 99 | required bool require_master = 3; 100 | } 101 | 102 | message TransactionWriteCompleted { 103 | required int64 transaction_id = 1; 104 | required OperationResult result = 2; 105 | optional string message = 3; 106 | } 107 | 108 | message TransactionCommit { 109 | required int64 transaction_id = 1; 110 | required bool require_master = 2; 111 | } 112 | 113 | message TransactionCommitCompleted { 114 | required int64 transaction_id = 1; 115 | required OperationResult result = 2; 116 | optional string message = 3; 117 | required int64 first_event_number = 4; 118 | required int64 last_event_number = 5; 119 | optional int64 prepare_position = 6; 120 | optional int64 commit_position = 7; 121 | } 122 | 123 | message ReadEvent { 124 | required string event_stream_id = 1; 125 | required int64 event_number = 2; 126 | required bool resolve_link_tos = 3; 127 | required bool require_master = 4; 128 | } 129 | 130 | message ReadEventCompleted { 131 | enum ReadEventResult { 132 | Success = 0; 133 | NotFound = 1; 134 | NoStream = 2; 135 | StreamDeleted = 3; 136 | Error = 4; 137 | AccessDenied = 5; 138 | } 139 | 140 | required ReadEventResult result = 1; 141 | required ResolvedIndexedEvent event = 2; 142 | 143 | optional string error = 3; 144 | } 145 | 146 | message ReadStreamEvents { 147 | required string event_stream_id = 1; 148 | required int64 from_event_number = 2; 149 | required int32 max_count = 3; 150 | required bool resolve_link_tos = 4; 151 | required bool require_master = 5; 152 | } 153 | 154 | message ReadStreamEventsCompleted { 155 | enum ReadStreamResult { 156 | Success = 0; 157 | NoStream = 1; 158 | StreamDeleted = 2; 159 | NotModified = 3; 160 | Error = 4; 161 | AccessDenied = 5; 162 | } 163 | 164 | repeated ResolvedIndexedEvent events = 1; 165 | required ReadStreamResult result = 2; 166 | required int64 next_event_number = 3; 167 | required int64 last_event_number = 4; 168 | required bool is_end_of_stream = 5; 169 | required int64 last_commit_position = 6; 170 | 171 | optional string error = 7; 172 | } 173 | 174 | message ReadAllEvents { 175 | required int64 commit_position = 1; 176 | required int64 prepare_position = 2; 177 | required int32 max_count = 3; 178 | required bool resolve_link_tos = 4; 179 | required bool require_master = 5; 180 | } 181 | 182 | message ReadAllEventsCompleted { 183 | enum ReadAllResult { 184 | Success = 0; 185 | NotModified = 1; 186 | Error = 2; 187 | AccessDenied = 3; 188 | } 189 | 190 | required int64 commit_position = 1; 191 | required int64 prepare_position = 2; 192 | repeated ResolvedEvent events = 3; 193 | required int64 next_commit_position = 4; 194 | required int64 next_prepare_position = 5; 195 | 196 | optional ReadAllResult result = 6 [default = Success]; 197 | optional string error = 7; 198 | } 199 | 200 | message CreatePersistentSubscription { 201 | required string subscription_group_name = 1; 202 | required string event_stream_id = 2; 203 | required bool resolve_link_tos = 3; 204 | required int64 start_from = 4; 205 | required int32 message_timeout_milliseconds = 5; 206 | required bool record_statistics = 6; 207 | required int32 live_buffer_size = 7; 208 | required int32 read_batch_size = 8; 209 | required int32 buffer_size = 9; 210 | required int32 max_retry_count = 10; 211 | required bool prefer_round_robin = 11; 212 | required int32 checkpoint_after_time = 12; 213 | required int32 checkpoint_max_count = 13; 214 | required int32 checkpoint_min_count = 14; 215 | required int32 subscriber_max_count = 15; 216 | optional string named_consumer_strategy = 16; 217 | } 218 | 219 | message DeletePersistentSubscription { 220 | required string subscription_group_name = 1; 221 | required string event_stream_id = 2; 222 | } 223 | 224 | message UpdatePersistentSubscription { 225 | required string subscription_group_name = 1; 226 | required string event_stream_id = 2; 227 | required bool resolve_link_tos = 3; 228 | required int64 start_from = 4; 229 | required int32 message_timeout_milliseconds = 5; 230 | required bool record_statistics = 6; 231 | required int32 live_buffer_size = 7; 232 | required int32 read_batch_size = 8; 233 | required int32 buffer_size = 9; 234 | required int32 max_retry_count = 10; 235 | required bool prefer_round_robin = 11; 236 | required int32 checkpoint_after_time = 12; 237 | required int32 checkpoint_max_count = 13; 238 | required int32 checkpoint_min_count = 14; 239 | required int32 subscriber_max_count = 15; 240 | optional string named_consumer_strategy = 16; 241 | } 242 | 243 | message UpdatePersistentSubscriptionCompleted { 244 | enum UpdatePersistentSubscriptionResult { 245 | Success = 0; 246 | DoesNotExist = 1; 247 | Fail = 2; 248 | AccessDenied=3; 249 | } 250 | required UpdatePersistentSubscriptionResult result = 1 [default = Success]; 251 | optional string reason = 2; 252 | } 253 | 254 | message CreatePersistentSubscriptionCompleted { 255 | enum CreatePersistentSubscriptionResult { 256 | Success = 0; 257 | AlreadyExists = 1; 258 | Fail = 2; 259 | AccessDenied=3; 260 | } 261 | required CreatePersistentSubscriptionResult result = 1 [default = Success]; 262 | optional string reason = 2; 263 | } 264 | 265 | message DeletePersistentSubscriptionCompleted { 266 | enum DeletePersistentSubscriptionResult { 267 | Success = 0; 268 | DoesNotExist = 1; 269 | Fail = 2; 270 | AccessDenied = 3; 271 | } 272 | required DeletePersistentSubscriptionResult result = 1 [default = Success]; 273 | optional string reason = 2; 274 | } 275 | 276 | message ConnectToPersistentSubscription { 277 | required string subscription_id = 1; 278 | required string event_stream_id = 2; 279 | required int32 allowed_in_flight_messages = 3; 280 | 281 | } 282 | 283 | message PersistentSubscriptionAckEvents { 284 | required string subscription_id = 1; 285 | repeated bytes processed_event_ids = 2; 286 | } 287 | 288 | message PersistentSubscriptionNakEvents { 289 | enum NakAction { 290 | Unknown = 0; 291 | Park = 1; 292 | Retry = 2; 293 | Skip = 3; 294 | Stop = 4; 295 | } 296 | 297 | required string subscription_id = 1; 298 | repeated bytes processed_event_ids = 2; 299 | optional string message = 3; 300 | required NakAction action = 4 [default = Unknown]; 301 | } 302 | 303 | message PersistentSubscriptionConfirmation { 304 | required int64 last_commit_position = 1; 305 | required string subscription_id = 2; 306 | optional int64 last_event_number = 3; 307 | } 308 | 309 | message PersistentSubscriptionStreamEventAppeared { 310 | required ResolvedIndexedEvent event = 1; 311 | optional int32 retryCount = 2; 312 | } 313 | 314 | message SubscribeToStream { 315 | required string event_stream_id = 1; 316 | required bool resolve_link_tos = 2; 317 | } 318 | 319 | message SubscriptionConfirmation { 320 | required int64 last_commit_position = 1; 321 | optional int64 last_event_number = 2; 322 | } 323 | 324 | message StreamEventAppeared { 325 | required ResolvedEvent event = 1; 326 | } 327 | 328 | message UnsubscribeFromStream { 329 | } 330 | 331 | message SubscriptionDropped { 332 | 333 | enum SubscriptionDropReason { 334 | Unsubscribed = 0; 335 | AccessDenied = 1; 336 | NotFound=2; 337 | PersistentSubscriptionDeleted=3; 338 | SubscriberMaxCountReached=4; 339 | } 340 | 341 | optional SubscriptionDropReason reason = 1 [default = Unsubscribed]; 342 | } 343 | 344 | message NotHandled { 345 | 346 | enum NotHandledReason { 347 | NotReady = 0; 348 | TooBusy = 1; 349 | NotMaster = 2; 350 | } 351 | 352 | required NotHandledReason reason = 1; 353 | optional bytes additional_info = 2; 354 | 355 | message MasterInfo { 356 | required string external_tcp_address = 1; 357 | required int32 external_tcp_port = 2; 358 | required string external_http_address = 3; 359 | required int32 external_http_port = 4; 360 | optional string external_secure_tcp_address = 5; 361 | optional int32 external_secure_tcp_port = 6; 362 | } 363 | } 364 | 365 | message ScavengeDatabase { 366 | } 367 | 368 | message ScavengeDatabaseCompleted { 369 | 370 | enum ScavengeResult { 371 | Success = 0; 372 | InProgress = 1; 373 | Failed = 2; 374 | } 375 | 376 | required ScavengeResult result = 1; 377 | optional string error = 2; 378 | required int32 total_time_ms = 3; 379 | required int64 total_space_saved = 4; 380 | } 381 | 382 | message IdentifyClient { 383 | required int32 version = 1; 384 | optional string connection_name = 2; 385 | } 386 | 387 | message ClientIdentified { 388 | } 389 | -------------------------------------------------------------------------------- /protos/tcp/rustproto.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | 3 | import "google/protobuf/descriptor.proto"; 4 | 5 | // see https://github.com/gogo/protobuf/blob/master/gogoproto/gogo.proto 6 | // for the original idea 7 | 8 | package rustproto; 9 | 10 | extend google.protobuf.FileOptions { 11 | // When true, oneof field is generated public 12 | optional bool expose_oneof_all = 17001; 13 | // When true all fields are public, and not accessors generated 14 | optional bool expose_fields_all = 17003; 15 | // When false, `get_`, `set_`, `mut_` etc. accessors are not generated 16 | optional bool generate_accessors_all = 17004; 17 | // Use `bytes::Bytes` for `bytes` fields 18 | optional bool carllerche_bytes_for_bytes_all = 17011; 19 | // Use `bytes::Bytes` for `string` fields 20 | optional bool carllerche_bytes_for_string_all = 17012; 21 | } 22 | 23 | extend google.protobuf.MessageOptions { 24 | // When true, oneof field is generated public 25 | optional bool expose_oneof = 17001; 26 | // When true all fields are public, and not accessors generated 27 | optional bool expose_fields = 17003; 28 | // When false, `get_`, `set_`, `mut_` etc. accessors are not generated 29 | optional bool generate_accessors = 17004; 30 | // Use `bytes::Bytes` for `bytes` fields 31 | optional bool carllerche_bytes_for_bytes = 17011; 32 | // Use `bytes::Bytes` for `string` fields 33 | optional bool carllerche_bytes_for_string = 17012; 34 | } 35 | 36 | extend google.protobuf.FieldOptions { 37 | // When true all fields are public, and not accessors generated 38 | optional bool expose_fields_field = 17003; 39 | // When false, `get_`, `set_`, `mut_` etc. accessors are not generated 40 | optional bool generate_accessors_field = 17004; 41 | // Use `bytes::Bytes` for `bytes` fields 42 | optional bool carllerche_bytes_for_bytes_field = 17011; 43 | // Use `bytes::Bytes` for `string` fields 44 | optional bool carllerche_bytes_for_string_field = 17012; 45 | } 46 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" 2 | -------------------------------------------------------------------------------- /src/connection.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::time::Duration; 3 | 4 | use futures::channel::mpsc::{channel, Receiver, Sender}; 5 | use futures::sink::SinkExt; 6 | use futures::stream::StreamExt; 7 | 8 | use crate::discovery; 9 | use crate::internal::commands; 10 | use crate::internal::driver::{Driver, Report}; 11 | use crate::internal::messaging::{Msg, OpMsg}; 12 | use crate::types::{self, ClusterSettings, OperationError, Settings, StreamMetadata}; 13 | 14 | /// Represents a connection to a single node. `Client` maintains a full duplex 15 | /// connection to the EventStore server. An EventStore connection operates 16 | /// quite differently than say a SQL connection. Normally when you use an 17 | /// EventStore connection you want to keep the connection open for a much 18 | /// longer of time than when you use a SQL connection. 19 | /// 20 | /// Another difference is that with the EventStore connection, all operations 21 | /// are handled in a full async manner (even if you call the synchronous 22 | /// behaviors). Many threads can use an EventStore connection at the same time 23 | /// or a single thread can make many asynchronous requests. To get the most 24 | /// performance out of the connection, it is generally recommended to use it 25 | /// in this way. 26 | #[derive(Clone)] 27 | pub struct Connection { 28 | sender: Sender, 29 | } 30 | 31 | /// Helps constructing a connection to the server. 32 | pub struct ConnectionBuilder { 33 | pub settings: Settings, 34 | } 35 | 36 | impl ConnectionBuilder { 37 | /// Maximum delay of inactivity before the client sends a heartbeat request. 38 | pub fn heartbeat_delay(mut self, delay: Duration) -> Self { 39 | self.settings.heartbeat_delay = delay; 40 | self 41 | } 42 | 43 | /// Maximum delay the server has to issue a heartbeat response. 44 | pub fn heartbeat_timeout(mut self, timeout: Duration) -> Self { 45 | self.settings.heartbeat_timeout = timeout; 46 | self 47 | } 48 | 49 | /// Delay in which an operation will be retried if no response arrived. 50 | pub fn operation_timeout(mut self, timeout: Duration) -> Self { 51 | self.settings.operation_timeout = timeout; 52 | self 53 | } 54 | 55 | /// Retry strategy when an operation has timeout. 56 | pub fn operation_retry(mut self, strategy: types::Retry) -> Self { 57 | self.settings.operation_retry = strategy; 58 | self 59 | } 60 | 61 | /// Retry strategy when failing to connect. 62 | pub fn connection_retry(mut self, strategy: types::Retry) -> Self { 63 | self.settings.connection_retry = strategy; 64 | self 65 | } 66 | 67 | /// 'Credentials' to use if other `Credentials` are not explicitly supplied 68 | /// when issuing commands. 69 | pub fn with_default_user(mut self, user: types::Credentials) -> Self { 70 | self.settings.default_user = Some(user); 71 | self 72 | } 73 | 74 | /// Default connection name. 75 | pub fn with_connection_name(mut self, name: S) -> Self 76 | where 77 | S: AsRef, 78 | { 79 | self.settings.connection_name = Some(name.as_ref().to_owned()); 80 | self 81 | } 82 | 83 | /// The period used to check pending command. Those checks include if the 84 | /// the connection has timeout or if the command was issued with a 85 | /// different connection. 86 | pub fn operation_check_period(mut self, period: Duration) -> Self { 87 | self.settings.operation_check_period = period; 88 | self 89 | } 90 | 91 | /// Maximum delay to create a successful connection to a node. 92 | pub fn connection_timeout(mut self, period: Duration) -> Self { 93 | self.settings.connection_timeout = period; 94 | self 95 | } 96 | 97 | /// Maximum delay to physically connect to a node. This property differs from 98 | /// `connection_timeout` by referencing the delay to have a connected socket to a node, whereas 99 | /// `connection_timeout` refers to the whole connection, validation included. 100 | pub fn socket_connection_timeout(mut self, period: Duration) -> Self { 101 | self.settings.socket_connection_timeout = period; 102 | self 103 | } 104 | 105 | #[cfg(feature = "tls")] 106 | /// Enable secure connection with the server/cluster. 107 | pub fn enable_secure_connection(mut self, config: crate::SecureSettings) -> Self { 108 | self.settings.tls_client_config = Some(config); 109 | self 110 | } 111 | 112 | /// Creates a connection to a single EventStore node. The connection will 113 | /// start right away. 114 | pub async fn single_node_connection(self, addr: SocketAddr) -> Connection { 115 | self.start_common_with_runtime(DiscoveryProcess::Static(addr)) 116 | .await 117 | } 118 | 119 | /// Creates a connection to a cluster of EventStore nodes. 120 | pub async fn cluster_nodes_connection(self, setts: ClusterSettings) -> Connection { 121 | self.start_common_with_runtime(DiscoveryProcess::Cluster(Box::new(setts))) 122 | .await 123 | } 124 | 125 | async fn start_common_with_runtime(self, discovery: DiscoveryProcess) -> Connection { 126 | let mut client = Connection::make(self.settings, discovery); 127 | 128 | client.start().await; 129 | 130 | client 131 | } 132 | } 133 | 134 | const DEFAULT_BOX_SIZE: usize = 500; 135 | 136 | async fn connection_state_machine( 137 | mut sender: Sender, 138 | mut recv: Receiver, 139 | mut driver: Driver, 140 | ) { 141 | async fn closing(sender: &mut Sender, driver: &mut Driver) { 142 | driver.close_connection(); 143 | let _ = sender.send(Msg::Marker).await; 144 | 145 | info!("Closing the connection..."); 146 | info!("Start clearing uncomplete operations..."); 147 | } 148 | 149 | // Live state 150 | while let Some(msg) = recv.next().await { 151 | match msg { 152 | Msg::Start => driver.start().await, 153 | Msg::Establish(endpoint) => driver.on_establish(endpoint), 154 | Msg::Established(id) => driver.on_established(id).await, 155 | Msg::ConnectionClosed(conn_id, error) => driver.on_connection_closed(conn_id, &error), 156 | Msg::Arrived(pkg) => driver.on_package_arrived(pkg).await, 157 | Msg::Transmit(pkg, mailbox) => driver.on_transmit(mailbox, pkg).await, 158 | Msg::Send(pkg) => driver.on_send_pkg(pkg).await, 159 | 160 | Msg::Tick => { 161 | if let Report::Quit = driver.on_tick().await { 162 | closing(&mut sender, &mut driver).await; 163 | break; 164 | } 165 | } 166 | 167 | // It's impossible to receive `Msg::Marker` at `State::Live` state. 168 | // However we can hit two birds with one stone with pattern-matching 169 | // coverage checker. 170 | Msg::Shutdown | Msg::Marker => { 171 | info!("User-shutdown request received."); 172 | closing(&mut sender, &mut driver).await; 173 | break; 174 | } 175 | } 176 | } 177 | 178 | // Closing state 179 | while let Some(msg) = recv.next().await { 180 | match msg { 181 | Msg::Transmit(_, mut mailbox) => { 182 | let _ = mailbox.send(OpMsg::Failed(OperationError::Aborted)).await; 183 | } 184 | 185 | Msg::Arrived(pkg) => driver.on_package_arrived(pkg).await, 186 | Msg::Marker => { 187 | // We've reached the end of our checkpoint, we can properly 188 | // aborts uncompleted operations. 189 | driver.abort().await; 190 | info!("Connection closed properly."); 191 | 192 | break; 193 | } 194 | 195 | _ => {} 196 | } 197 | } 198 | } 199 | 200 | enum DiscoveryProcess { 201 | Static(SocketAddr), 202 | Cluster(Box), 203 | } 204 | 205 | impl Connection { 206 | /// Return a connection builder. 207 | pub fn builder() -> ConnectionBuilder { 208 | ConnectionBuilder { 209 | settings: Default::default(), 210 | } 211 | } 212 | 213 | fn make(settings: Settings, discovery: DiscoveryProcess) -> Connection { 214 | let sender = Self::initialize(settings, discovery); 215 | 216 | Connection { sender } 217 | } 218 | 219 | fn initialize(settings: Settings, discovery: DiscoveryProcess) -> Sender { 220 | let (sender, recv) = channel(DEFAULT_BOX_SIZE); 221 | let (start_discovery, run_discovery) = futures::channel::mpsc::channel(DEFAULT_BOX_SIZE); 222 | let cloned_sender = sender.clone(); 223 | 224 | match discovery { 225 | DiscoveryProcess::Static(addr) => { 226 | let endpoint = types::Endpoint::from_addr(addr); 227 | let action = discovery::constant::discover(run_discovery, sender.clone(), endpoint); 228 | 229 | tokio::spawn(action); 230 | } 231 | 232 | DiscoveryProcess::Cluster(setts) => { 233 | #[cfg(feature = "tls")] 234 | { 235 | let secure_mode = settings.tls_client_config.is_some(); 236 | let action = discovery::cluster::discover( 237 | run_discovery, 238 | sender.clone(), 239 | *setts, 240 | secure_mode, 241 | ); 242 | 243 | tokio::spawn(action); 244 | } 245 | #[cfg(not(feature = "tls"))] 246 | { 247 | let action = 248 | discovery::cluster::discover(run_discovery, sender.clone(), *setts, false); 249 | 250 | tokio::spawn(action); 251 | } 252 | } 253 | }; 254 | 255 | let driver = Driver::new(settings, start_discovery, sender.clone()); 256 | 257 | tokio::spawn(connection_state_machine(cloned_sender, recv, driver)); 258 | sender 259 | } 260 | 261 | async fn start(&mut self) { 262 | let _ = self.sender.send(Msg::Start).await; 263 | } 264 | 265 | /// Sends events to a given stream. 266 | pub fn write_events(&self, stream: S) -> commands::WriteEvents 267 | where 268 | S: AsRef, 269 | { 270 | commands::WriteEvents::new(self.sender.clone(), stream) 271 | } 272 | 273 | /// Sets the metadata for a stream. 274 | pub fn write_stream_metadata( 275 | &self, 276 | stream: S, 277 | metadata: StreamMetadata, 278 | ) -> commands::WriteStreamMetadata 279 | where 280 | S: AsRef, 281 | { 282 | commands::WriteStreamMetadata::new(self.sender.clone(), stream, metadata) 283 | } 284 | 285 | /// Reads a single event from a given stream. 286 | pub fn read_event(&self, stream: S, event_number: i64) -> commands::ReadEvent 287 | where 288 | S: AsRef, 289 | { 290 | commands::ReadEvent::new(self.sender.clone(), stream, event_number) 291 | } 292 | 293 | /// Gets the metadata of a stream. 294 | pub fn read_stream_metadata(&self, stream: S) -> commands::ReadStreamMetadata 295 | where 296 | S: AsRef, 297 | { 298 | commands::ReadStreamMetadata::new(self.sender.clone(), stream) 299 | } 300 | 301 | /// Starts a transaction on a given stream. 302 | pub fn start_transaction(&self, stream: S) -> commands::TransactionStart 303 | where 304 | S: AsRef, 305 | { 306 | commands::TransactionStart::new(self.sender.clone(), stream) 307 | } 308 | 309 | /// Reads events from a given stream. The reading can be done forward and 310 | /// backward. 311 | pub fn read_stream(&self, stream: S) -> commands::ReadStreamEvents 312 | where 313 | S: AsRef, 314 | { 315 | commands::ReadStreamEvents::new(self.sender.clone(), stream) 316 | } 317 | 318 | /// Reads events for the system stream `$all`. The reading can be done 319 | /// forward and backward. 320 | pub fn read_all(&self) -> commands::ReadAllEvents { 321 | commands::ReadAllEvents::new(self.sender.clone()) 322 | } 323 | 324 | /// Deletes a given stream. By default, the server performs a soft delete, 325 | /// More information can be found on the [Deleting streams and events] 326 | /// page. 327 | /// 328 | /// [Deleting stream and events]: https://eventstore.org/docs/server/deleting-streams-and-events/index.html 329 | pub fn delete_stream(&self, stream: S) -> commands::DeleteStream 330 | where 331 | S: AsRef, 332 | { 333 | commands::DeleteStream::new(self.sender.clone(), stream) 334 | } 335 | 336 | /// Subscribes to a given stream. You will get notified of each new events 337 | /// written to this stream. 338 | pub fn subcribe_to_stream(&self, stream_id: S) -> commands::SubscribeToStream 339 | where 340 | S: AsRef, 341 | { 342 | commands::SubscribeToStream::new(self.sender.clone(), stream_id) 343 | } 344 | 345 | /// Subscribes to a given stream. This kind of subscription specifies a 346 | /// starting point (by default, the beginning of a stream). For a regular 347 | /// stream, that starting point will be an event number. For the system 348 | /// stream `$all`, it will be a position in the transaction file 349 | /// (see [`subscribe_to_all_from`]). This subscription will fetch every event 350 | /// until the end of the stream, then will dispatch subsequently written 351 | /// events. 352 | /// 353 | /// For example, if a starting point of 50 is specified when a stream has 354 | /// 100 events in it, the subscriber can expect to see events 51 through 355 | /// 100, and then any events subsequenttly written events until such time 356 | /// as the subscription is dropped or closed. 357 | /// 358 | /// [`subscribe_to_all_from`]: #method.subscribe_to_all_from 359 | pub fn subscribe_to_stream_from(&self, stream: S) -> commands::RegularCatchupSubscribe 360 | where 361 | S: AsRef, 362 | { 363 | commands::RegularCatchupSubscribe::new(self.sender.clone(), stream) 364 | } 365 | 366 | /// Like [`subscribe_to_stream_from`] but specific to system `$all` stream. 367 | /// 368 | /// [`subscribe_to_stream_from`]: #method.subscribe_to_stream_from 369 | pub fn subscribe_to_all_from(&self) -> commands::AllCatchupSubscribe { 370 | commands::AllCatchupSubscribe::new(self.sender.clone()) 371 | } 372 | 373 | /// Creates a persistent subscription group on a stream. 374 | /// 375 | /// Persistent subscriptions are special kind of subscription where the 376 | /// server remembers the state of the subscription. This allows for many 377 | /// different modes of operations compared to a regular or catchup 378 | /// subscription where the client holds the subscription state. 379 | pub fn create_persistent_subscription( 380 | &self, 381 | stream_id: S, 382 | group_name: S, 383 | ) -> commands::CreatePersistentSubscription 384 | where 385 | S: AsRef, 386 | { 387 | commands::CreatePersistentSubscription::new(stream_id, group_name, self.sender.clone()) 388 | } 389 | 390 | /// Updates a persistent subscription group on a stream. 391 | pub fn update_persistent_subscription( 392 | &self, 393 | stream_id: S, 394 | group_name: S, 395 | ) -> commands::UpdatePersistentSubscription 396 | where 397 | S: AsRef, 398 | { 399 | commands::UpdatePersistentSubscription::new(stream_id, group_name, self.sender.clone()) 400 | } 401 | 402 | /// Deletes a persistent subscription group on a stream. 403 | pub fn delete_persistent_subscription( 404 | &self, 405 | stream_id: S, 406 | group_name: S, 407 | ) -> commands::DeletePersistentSubscription 408 | where 409 | S: AsRef, 410 | { 411 | commands::DeletePersistentSubscription::new(stream_id, group_name, self.sender.clone()) 412 | } 413 | 414 | /// Connects to a persistent subscription group on a stream. 415 | pub fn connect_persistent_subscription( 416 | &self, 417 | stream_id: S, 418 | group_name: S, 419 | ) -> commands::ConnectToPersistentSubscription 420 | where 421 | S: AsRef, 422 | { 423 | commands::ConnectToPersistentSubscription::new(stream_id, group_name, self.sender.clone()) 424 | } 425 | 426 | /// Closes the connection to the server. 427 | /// 428 | /// When closing a connection, a `Connection` might have ongoing operations 429 | /// running. `shutdown` makes sure the `Connection` has handled 430 | /// everything properly when returning. 431 | /// 432 | /// `shutdown` blocks the current thread. 433 | pub async fn shutdown(mut self) { 434 | let _ = self.sender.send(Msg::Shutdown).await; 435 | } 436 | } 437 | -------------------------------------------------------------------------------- /src/discovery.rs: -------------------------------------------------------------------------------- 1 | pub mod cluster; 2 | pub mod constant; 3 | -------------------------------------------------------------------------------- /src/discovery/cluster.rs: -------------------------------------------------------------------------------- 1 | use crate::internal::messaging::Msg; 2 | use crate::types::{ClusterSettings, Either, Endpoint, GossipSeed, NodePreference}; 3 | use futures::channel::mpsc; 4 | use futures::sink::SinkExt; 5 | use futures::stream::StreamExt; 6 | use rand::rngs::SmallRng; 7 | use rand::seq::SliceRandom; 8 | use rand::SeedableRng; 9 | use std::iter::FromIterator; 10 | use std::net::{AddrParseError, SocketAddr}; 11 | use std::time::Duration; 12 | use uuid::Uuid; 13 | 14 | pub(crate) async fn discover( 15 | mut consumer: mpsc::Receiver>, 16 | sender: mpsc::Sender, 17 | settings: ClusterSettings, 18 | secure_mode: bool, 19 | ) { 20 | let preference = NodePreference::Random; 21 | let client = reqwest::Client::new(); 22 | let mut previous_candidates = None; 23 | let mut rng = SmallRng::from_entropy(); 24 | 25 | async fn discover( 26 | rng: &mut SmallRng, 27 | client: &reqwest::Client, 28 | settings: &ClusterSettings, 29 | previous_candidates: &mut Option>, 30 | preference: NodePreference, 31 | failed_endpoint: Option, 32 | ) -> Option { 33 | let candidates = match previous_candidates.take() { 34 | Some(old_candidates) => candidates_from_old_gossip(failed_endpoint, old_candidates), 35 | 36 | None => match candidates_from_dns(rng, &settings).await { 37 | Ok(seeds) => seeds, 38 | Err(e) => { 39 | error!("Error when performing DNS resolution: {}", e); 40 | Vec::new() 41 | } 42 | }, 43 | }; 44 | 45 | let mut outcome = None; 46 | 47 | for candidate in candidates { 48 | let result = get_gossip_from(client, candidate).await; 49 | let result: std::io::Result> = result.and_then(|member_info| { 50 | let members: Vec> = member_info 51 | .into_iter() 52 | .map(Member::from_member_info) 53 | .collect(); 54 | 55 | Result::from_iter(members) 56 | }); 57 | 58 | match result { 59 | Err(error) => { 60 | info!("candidate [{}] resolution error: {}", candidate, error); 61 | 62 | continue; 63 | } 64 | 65 | Ok(members) => { 66 | if members.is_empty() { 67 | continue; 68 | } else { 69 | outcome = determine_best_node(rng, preference, members.as_slice()); 70 | 71 | if outcome.is_some() { 72 | *previous_candidates = Some(members); 73 | break; 74 | } 75 | 76 | warn!("determine_best_node found no candidate!"); 77 | } 78 | } 79 | } 80 | } 81 | 82 | outcome 83 | } 84 | 85 | while let Some(failed_endpoint) = consumer.next().await { 86 | let mut att = 1usize; 87 | 88 | loop { 89 | if att > settings.max_discover_attempts { 90 | let err_msg = format!( 91 | "Failed to discover candidate in {} attempts", 92 | settings.max_discover_attempts 93 | ); 94 | 95 | let err = std::io::Error::new(std::io::ErrorKind::NotFound, err_msg); 96 | let _ = sender 97 | .clone() 98 | .send(Msg::ConnectionClosed(Uuid::nil(), err)) 99 | .await; 100 | 101 | break; 102 | } 103 | 104 | let result_opt = discover( 105 | &mut rng, 106 | &client, 107 | &settings, 108 | &mut previous_candidates, 109 | preference, 110 | failed_endpoint, 111 | ) 112 | .await; 113 | 114 | if let Some(node) = result_opt { 115 | let _ = if secure_mode { 116 | sender 117 | .clone() 118 | .send(Msg::Establish( 119 | node.secure_tcp_endpoint 120 | .expect("We expect secure_tcp_endpoint to be defined"), 121 | )) 122 | .await 123 | } else { 124 | sender.clone().send(Msg::Establish(node.tcp_endpoint)).await 125 | }; 126 | 127 | break; 128 | } 129 | 130 | tokio::time::delay_for(Duration::from_millis(500)).await; 131 | warn!("Timeout when trying to discover candidate, retrying..."); 132 | att += 1; 133 | } 134 | } 135 | } 136 | 137 | async fn candidates_from_dns( 138 | rng: &mut SmallRng, 139 | settings: &ClusterSettings, 140 | ) -> Result, trust_dns_resolver::error::ResolveError> { 141 | let mut src = match settings.kind.as_ref() { 142 | Either::Left(seeds) => { 143 | Ok::, trust_dns_resolver::error::ResolveError>(seeds.clone().into_vec()) 144 | } 145 | Either::Right(dns) => { 146 | let lookup = dns.resolver.srv_lookup(dns.domain_name.clone()).await?; 147 | let mut seeds = Vec::new(); 148 | 149 | for ip in lookup.ip_iter() { 150 | let seed = GossipSeed::from_socket_addr(SocketAddr::new(ip, settings.gossip_port)); 151 | seeds.push(seed); 152 | } 153 | 154 | Ok(seeds) 155 | } 156 | }?; 157 | 158 | src.shuffle(rng); 159 | Ok(src) 160 | } 161 | 162 | fn candidates_from_old_gossip( 163 | failed_endpoint: Option, 164 | old_candidates: Vec, 165 | ) -> Vec { 166 | let candidates = match failed_endpoint { 167 | Some(endpoint) => old_candidates 168 | .into_iter() 169 | .filter(|member| member.external_tcp != endpoint.addr) 170 | .collect(), 171 | 172 | None => old_candidates, 173 | }; 174 | 175 | arrange_gossip_candidates(candidates) 176 | } 177 | 178 | fn arrange_gossip_candidates(candidates: Vec) -> Vec { 179 | let mut arranged_candidates = Candidates::new(); 180 | 181 | for member in candidates { 182 | arranged_candidates.push(member); 183 | } 184 | 185 | arranged_candidates.shuffle(); 186 | arranged_candidates.gossip_seeds() 187 | } 188 | 189 | #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Copy, Clone)] 190 | #[serde(rename_all = "PascalCase")] 191 | enum VNodeState { 192 | Initializing, 193 | Unknown, 194 | PreReplica, 195 | CatchingUp, 196 | Clone, 197 | Slave, 198 | PreMaster, 199 | Master, 200 | Manager, 201 | ShuttingDown, 202 | Shutdown, 203 | } 204 | 205 | impl std::fmt::Display for VNodeState { 206 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 207 | use self::VNodeState::*; 208 | 209 | match self { 210 | Initializing => write!(f, "Initializing"), 211 | Unknown => write!(f, "Unknown"), 212 | PreReplica => write!(f, "PreReplica"), 213 | CatchingUp => write!(f, "CatchingUp"), 214 | Clone => write!(f, "Clone"), 215 | Slave => write!(f, "Slave"), 216 | PreMaster => write!(f, "PreMaster"), 217 | Master => write!(f, "Master"), 218 | Manager => write!(f, "Manager"), 219 | ShuttingDown => write!(f, "ShuttingDown"), 220 | Shutdown => write!(f, "Shutdown"), 221 | } 222 | } 223 | } 224 | 225 | #[derive(Serialize, Deserialize, Debug)] 226 | #[serde(rename_all = "camelCase")] 227 | struct Gossip { 228 | members: Vec, 229 | } 230 | 231 | #[derive(Serialize, Deserialize, Debug)] 232 | #[serde(rename_all = "camelCase")] 233 | struct MemberInfo { 234 | instance_id: Uuid, 235 | state: VNodeState, 236 | is_alive: bool, 237 | internal_tcp_ip: String, 238 | internal_tcp_port: u16, 239 | internal_secure_tcp_port: u16, 240 | external_tcp_ip: String, 241 | external_tcp_port: u16, 242 | external_secure_tcp_port: u16, 243 | internal_http_ip: String, 244 | internal_http_port: u16, 245 | external_http_ip: String, 246 | external_http_port: u16, 247 | last_commit_position: i64, 248 | writer_checkpoint: i64, 249 | chaser_checkpoint: i64, 250 | epoch_position: i64, 251 | epoch_number: i64, 252 | epoch_id: Uuid, 253 | node_priority: i64, 254 | } 255 | 256 | #[derive(Debug, Clone)] 257 | struct Member { 258 | external_tcp: SocketAddr, 259 | external_secure_tcp: Option, 260 | external_http: SocketAddr, 261 | internal_tcp: SocketAddr, 262 | internal_secure_tcp: Option, 263 | internal_http: SocketAddr, 264 | state: VNodeState, 265 | } 266 | 267 | fn addr_parse_error_to_io_error(error: AddrParseError) -> std::io::Error { 268 | std::io::Error::new(std::io::ErrorKind::InvalidData, format!("{}", error)) 269 | } 270 | 271 | impl Member { 272 | fn from_member_info(info: MemberInfo) -> std::io::Result { 273 | let external_tcp = parse_socket_addr(format!( 274 | "{}:{}", 275 | info.external_tcp_ip, info.external_tcp_port 276 | ))?; 277 | 278 | let external_secure_tcp = { 279 | if info.external_secure_tcp_port < 1 { 280 | Ok(None) 281 | } else { 282 | parse_socket_addr(format!( 283 | "{}:{}", 284 | info.external_tcp_ip, info.external_secure_tcp_port 285 | )) 286 | .map(Some) 287 | } 288 | }?; 289 | 290 | let external_http = parse_socket_addr(format!( 291 | "{}:{}", 292 | info.external_http_ip, info.external_http_port 293 | ))?; 294 | 295 | let internal_tcp = parse_socket_addr(format!( 296 | "{}:{}", 297 | info.internal_tcp_ip, info.internal_tcp_port 298 | ))?; 299 | 300 | let internal_secure_tcp = { 301 | if info.internal_secure_tcp_port < 1 { 302 | Ok(None) 303 | } else { 304 | parse_socket_addr(format!( 305 | "{}:{}", 306 | info.internal_tcp_ip, info.internal_secure_tcp_port 307 | )) 308 | .map(Some) 309 | } 310 | }?; 311 | 312 | let internal_http = parse_socket_addr(format!( 313 | "{}:{}", 314 | info.internal_http_ip, info.internal_http_port 315 | ))?; 316 | 317 | let member = Member { 318 | external_tcp, 319 | external_secure_tcp, 320 | external_http, 321 | internal_tcp, 322 | internal_secure_tcp, 323 | internal_http, 324 | state: info.state, 325 | }; 326 | 327 | Ok(member) 328 | } 329 | } 330 | 331 | fn parse_socket_addr(str_repr: String) -> std::io::Result { 332 | str_repr.parse().map_err(addr_parse_error_to_io_error) 333 | } 334 | 335 | struct Candidates { 336 | nodes: Vec, 337 | managers: Vec, 338 | } 339 | 340 | impl Candidates { 341 | fn new() -> Candidates { 342 | Candidates { 343 | nodes: vec![], 344 | managers: vec![], 345 | } 346 | } 347 | 348 | fn push(&mut self, member: Member) { 349 | if let VNodeState::Manager = member.state { 350 | self.managers.push(member); 351 | } else { 352 | self.nodes.push(member); 353 | } 354 | } 355 | 356 | fn shuffle(&mut self) { 357 | let mut rng = rand::thread_rng(); 358 | 359 | self.nodes.shuffle(&mut rng); 360 | self.managers.shuffle(&mut rng); 361 | } 362 | 363 | fn gossip_seeds(mut self) -> Vec { 364 | self.nodes.extend(self.managers); 365 | 366 | self.nodes 367 | .into_iter() 368 | .map(|member| GossipSeed::from_socket_addr(member.external_http)) 369 | .collect() 370 | } 371 | } 372 | 373 | pub(crate) struct NodeEndpoints { 374 | pub tcp_endpoint: Endpoint, 375 | pub secure_tcp_endpoint: Option, 376 | } 377 | 378 | async fn get_gossip_from( 379 | client: &reqwest::Client, 380 | gossip: GossipSeed, 381 | ) -> std::io::Result> { 382 | let url = gossip.url()?; 383 | 384 | let result = client.get(url).send().await; 385 | 386 | let resp = result.map_err(|error| { 387 | let msg = format!("[{}] responded with [{}]", gossip, error); 388 | std::io::Error::new(std::io::ErrorKind::Other, msg) 389 | })?; 390 | 391 | match resp.json::().await { 392 | Ok(gossip) => Ok(gossip.members), 393 | Err(error) => { 394 | let msg = format!("[{}] responded with [{}]", gossip, error); 395 | Err(std::io::Error::new(std::io::ErrorKind::Other, msg)) 396 | } 397 | } 398 | } 399 | 400 | fn determine_best_node( 401 | rng: &mut SmallRng, 402 | preference: NodePreference, 403 | members: &[Member], 404 | ) -> Option { 405 | fn allowed_states(state: VNodeState) -> bool { 406 | match state { 407 | VNodeState::Manager | VNodeState::ShuttingDown | VNodeState::Shutdown => false, 408 | _ => true, 409 | } 410 | } 411 | 412 | let mut members: Vec<&Member> = members 413 | .iter() 414 | .filter(|member| allowed_states(member.state)) 415 | .collect(); 416 | 417 | members.as_mut_slice().sort_by(|a, b| a.state.cmp(&b.state)); 418 | 419 | //TODO - Implement other node preferences. 420 | if let NodePreference::Random = preference { 421 | members.shuffle(rng); 422 | } 423 | 424 | let member_opt = members.into_iter().next(); 425 | 426 | member_opt.map(|member| { 427 | info!( 428 | "Discovering: found best choice [{},{:?}] ({})", 429 | member.external_tcp, member.external_secure_tcp, member.state 430 | ); 431 | 432 | NodeEndpoints { 433 | tcp_endpoint: Endpoint::from_addr(member.external_tcp), 434 | secure_tcp_endpoint: member.external_secure_tcp.map(Endpoint::from_addr), 435 | } 436 | }) 437 | } 438 | -------------------------------------------------------------------------------- /src/discovery/constant.rs: -------------------------------------------------------------------------------- 1 | use crate::internal::messaging::Msg; 2 | use crate::types::Endpoint; 3 | use futures::channel::mpsc; 4 | use futures::sink::SinkExt; 5 | use futures::stream::StreamExt; 6 | 7 | pub(crate) async fn discover( 8 | mut consumer: mpsc::Receiver>, 9 | mut sender: mpsc::Sender, 10 | endpoint: Endpoint, 11 | ) { 12 | while consumer.next().await.is_some() { 13 | let _ = sender.send(Msg::Establish(endpoint)).await; 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/es6/connection.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use crate::es6::commands; 4 | use crate::es6::grpc::event_store::client::{persistent, streams}; 5 | use crate::types::{self, Settings}; 6 | 7 | use http::uri::Uri; 8 | use tonic::transport::Channel; 9 | 10 | struct NoVerification; 11 | 12 | impl rustls::ServerCertVerifier for NoVerification { 13 | fn verify_server_cert( 14 | &self, 15 | _roots: &rustls::RootCertStore, 16 | _presented_certs: &[rustls::Certificate], 17 | _dns_name: webpki::DNSNameRef, 18 | _ocsp_response: &[u8], 19 | ) -> Result { 20 | Ok(rustls::ServerCertVerified::assertion()) 21 | } 22 | } 23 | 24 | /// Represents a connection to a single node. `Client` maintains a full duplex 25 | /// connection to the EventStore server. An EventStore connection operates 26 | /// quite differently than say a SQL connection. Normally when you use an 27 | /// EventStore connection you want to keep the connection open for a much 28 | /// longer of time than when you use a SQL connection. 29 | /// 30 | /// Another difference is that with the EventStore connection, all operations 31 | /// are handled in a full async manner (even if you call the synchronous 32 | /// behaviors). Many threads can use an EventStore connection at the same time 33 | /// or a single thread can make many asynchronous requests. To get the most 34 | /// performance out of the connection, it is generally recommended to use it 35 | /// in this way. 36 | #[derive(Clone)] 37 | pub struct Connection { 38 | settings: Settings, 39 | streams: streams::streams_client::StreamsClient, 40 | persistent: persistent::persistent_subscriptions_client::PersistentSubscriptionsClient, 41 | } 42 | 43 | /// Helps constructing a connection to the server. 44 | pub struct ConnectionBuilder { 45 | pub settings: Settings, 46 | disable_certs_validation: bool, 47 | } 48 | 49 | impl ConnectionBuilder { 50 | /// Maximum delay of inactivity before the client sends a heartbeat request. 51 | pub fn heartbeat_delay(mut self, delay: Duration) -> Self { 52 | self.settings.heartbeat_delay = delay; 53 | self 54 | } 55 | 56 | /// Maximum delay the server has to issue a heartbeat response. 57 | pub fn heartbeat_timeout(mut self, timeout: Duration) -> Self { 58 | self.settings.heartbeat_timeout = timeout; 59 | self 60 | } 61 | 62 | /// Delay in which an operation will be retried if no response arrived. 63 | pub fn operation_timeout(mut self, timeout: Duration) -> Self { 64 | self.settings.operation_timeout = timeout; 65 | self 66 | } 67 | 68 | /// Retry strategy when an operation has timeout. 69 | pub fn operation_retry(mut self, strategy: types::Retry) -> Self { 70 | self.settings.operation_retry = strategy; 71 | self 72 | } 73 | 74 | /// Retry strategy when failing to connect. 75 | pub fn connection_retry(mut self, strategy: types::Retry) -> Self { 76 | self.settings.connection_retry = strategy; 77 | self 78 | } 79 | 80 | /// 'Credentials' to use if other `Credentials` are not explicitly supplied 81 | /// when issuing commands. 82 | pub fn with_default_user(mut self, user: types::Credentials) -> Self { 83 | self.settings.default_user = Some(user); 84 | self 85 | } 86 | 87 | /// Default connection name. 88 | pub fn with_connection_name(mut self, name: S) -> Self 89 | where 90 | S: AsRef, 91 | { 92 | self.settings.connection_name = Some(name.as_ref().to_owned()); 93 | self 94 | } 95 | 96 | /// The period used to check pending command. Those checks include if the 97 | /// the connection has timeout or if the command was issued with a 98 | /// different connection. 99 | pub fn operation_check_period(mut self, period: Duration) -> Self { 100 | self.settings.operation_check_period = period; 101 | self 102 | } 103 | 104 | /// Disable the use of certificate validation. 105 | /// 106 | /// # Warning 107 | /// 108 | /// You should think very carefully before using this method. If 109 | /// invalid certificates are trusted, *any* certificate for *any* site 110 | /// will be trusted for use. This includes expired certificates. This 111 | /// introduces significant vulnerabilities, and should only be used 112 | /// as a last resort. 113 | pub fn disable_server_certificate_validation(mut self) -> Self { 114 | self.disable_certs_validation = true; 115 | self 116 | } 117 | 118 | /// Creates a connection to a single EventStore node. The connection will 119 | /// start right away. 120 | pub async fn single_node_connection( 121 | self, 122 | uri: Uri, 123 | ) -> Result> { 124 | Connection::initialize(self.settings, self.disable_certs_validation, uri).await 125 | } 126 | } 127 | 128 | impl Connection { 129 | /// Return a connection builder. 130 | pub fn builder() -> ConnectionBuilder { 131 | ConnectionBuilder { 132 | settings: Default::default(), 133 | disable_certs_validation: false, 134 | } 135 | } 136 | 137 | async fn initialize( 138 | settings: Settings, 139 | disable_certs_validation: bool, 140 | uri: http::uri::Uri, 141 | ) -> Result> { 142 | let mut channel = Channel::builder(uri); 143 | 144 | if disable_certs_validation { 145 | let mut rustls_config = rustls::ClientConfig::new(); 146 | let protocols = vec![(b"h2".to_vec())]; 147 | 148 | rustls_config.set_protocols(protocols.as_slice()); 149 | 150 | rustls_config 151 | .dangerous() 152 | .set_certificate_verifier(std::sync::Arc::new(NoVerification)); 153 | 154 | let client_config = 155 | tonic::transport::ClientTlsConfig::new().rustls_client_config(rustls_config); 156 | 157 | channel = channel.tls_config(client_config)?; 158 | } 159 | 160 | let channel = channel.connect().await?; 161 | 162 | let conn = Connection { 163 | settings, 164 | streams: streams::streams_client::StreamsClient::new(channel.clone()), 165 | persistent: 166 | persistent::persistent_subscriptions_client::PersistentSubscriptionsClient::new( 167 | channel, 168 | ), 169 | }; 170 | 171 | Ok(conn) 172 | } 173 | 174 | /// Sends events to a given stream. 175 | pub fn write_events(&self, stream: String) -> commands::WriteEvents { 176 | commands::WriteEvents::new( 177 | self.streams.clone(), 178 | stream, 179 | self.settings.default_user.clone(), 180 | ) 181 | } 182 | 183 | /// Reads events from a given stream. The reading can be done forward and 184 | /// backward. 185 | pub fn read_stream(&self, stream: String) -> commands::ReadStreamEvents { 186 | commands::ReadStreamEvents::new( 187 | self.streams.clone(), 188 | stream, 189 | self.settings.default_user.clone(), 190 | ) 191 | } 192 | 193 | /// Reads events for the system stream `$all`. The reading can be done 194 | /// forward and backward. 195 | pub fn read_all(&self) -> commands::ReadAllEvents { 196 | commands::ReadAllEvents::new(self.streams.clone(), self.settings.default_user.clone()) 197 | } 198 | 199 | /// Deletes a given stream. By default, the server performs a soft delete, 200 | /// More information can be found on the [Deleting streams and events] 201 | /// page. 202 | /// 203 | /// [Deleting stream and events]: https://eventstore.org/docs/server/deleting-streams-and-events/index.html 204 | pub fn delete_stream(&self, stream: String) -> commands::DeleteStream { 205 | commands::DeleteStream::new( 206 | self.streams.clone(), 207 | stream, 208 | self.settings.default_user.clone(), 209 | ) 210 | } 211 | 212 | /// Subscribes to a given stream. This kind of subscription specifies a 213 | /// starting point (by default, the beginning of a stream). For a regular 214 | /// stream, that starting point will be an event number. For the system 215 | /// stream `$all`, it will be a position in the transaction file 216 | /// (see [`subscribe_to_all_from`]). This subscription will fetch every event 217 | /// until the end of the stream, then will dispatch subsequently written 218 | /// events. 219 | /// 220 | /// For example, if a starting point of 50 is specified when a stream has 221 | /// 100 events in it, the subscriber can expect to see events 51 through 222 | /// 100, and then any events subsequenttly written events until such time 223 | /// as the subscription is dropped or closed. 224 | /// 225 | /// [`subscribe_to_all_from`]: #method.subscribe_to_all_from 226 | pub fn subscribe_to_stream_from(&self, stream: String) -> commands::RegularCatchupSubscribe { 227 | commands::RegularCatchupSubscribe::new( 228 | self.streams.clone(), 229 | stream, 230 | self.settings.default_user.clone(), 231 | ) 232 | } 233 | 234 | /// Like [`subscribe_to_stream_from`] but specific to system `$all` stream. 235 | /// 236 | /// [`subscribe_to_stream_from`]: #method.subscribe_to_stream_from 237 | pub fn subscribe_to_all_from(&self) -> commands::AllCatchupSubscribe { 238 | commands::AllCatchupSubscribe::new(self.streams.clone(), self.settings.default_user.clone()) 239 | } 240 | 241 | /// Creates a persistent subscription group on a stream. 242 | /// 243 | /// Persistent subscriptions are special kind of subscription where the 244 | /// server remembers the state of the subscription. This allows for many 245 | /// different modes of operations compared to a regular or catchup 246 | /// subscription where the client holds the subscription state. 247 | pub fn create_persistent_subscription( 248 | &self, 249 | stream_id: String, 250 | group_name: String, 251 | ) -> commands::CreatePersistentSubscription { 252 | commands::CreatePersistentSubscription::new( 253 | self.persistent.clone(), 254 | stream_id, 255 | group_name, 256 | self.settings.default_user.clone(), 257 | ) 258 | } 259 | 260 | /// Updates a persistent subscription group on a stream. 261 | pub fn update_persistent_subscription( 262 | &self, 263 | stream_id: String, 264 | group_name: String, 265 | ) -> commands::UpdatePersistentSubscription { 266 | commands::UpdatePersistentSubscription::new( 267 | self.persistent.clone(), 268 | stream_id, 269 | group_name, 270 | self.settings.default_user.clone(), 271 | ) 272 | } 273 | 274 | /// Deletes a persistent subscription group on a stream. 275 | pub fn delete_persistent_subscription( 276 | &self, 277 | stream_id: String, 278 | group_name: String, 279 | ) -> commands::DeletePersistentSubscription { 280 | commands::DeletePersistentSubscription::new( 281 | self.persistent.clone(), 282 | stream_id, 283 | group_name, 284 | self.settings.default_user.clone(), 285 | ) 286 | } 287 | 288 | /// Connects to a persistent subscription group on a stream. 289 | pub fn connect_persistent_subscription( 290 | &self, 291 | stream_id: String, 292 | group_name: String, 293 | ) -> commands::ConnectToPersistentSubscription { 294 | commands::ConnectToPersistentSubscription::new( 295 | self.persistent.clone(), 296 | stream_id, 297 | group_name, 298 | self.settings.default_user.clone(), 299 | ) 300 | } 301 | 302 | /// Closes the connection to the server. 303 | /// 304 | /// When closing a connection, a `Connection` might have ongoing operations 305 | /// running. `shutdown` makes sure the `Connection` has handled 306 | /// everything properly when returning. 307 | /// 308 | /// `shutdown` blocks the current thread. 309 | pub fn shutdown(self) {} 310 | } 311 | -------------------------------------------------------------------------------- /src/es6/grpc/event_store/client/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::large_enum_variant)] 2 | pub mod persistent; 3 | pub mod shared; 4 | pub mod streams; 5 | -------------------------------------------------------------------------------- /src/es6/grpc/event_store/client/persistent.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, PartialEq, ::prost::Message)] 2 | pub struct ReadReq { 3 | #[prost(oneof = "read_req::Content", tags = "1, 2, 3")] 4 | pub content: ::std::option::Option, 5 | } 6 | pub mod read_req { 7 | #[derive(Clone, PartialEq, ::prost::Message)] 8 | pub struct Options { 9 | #[prost(message, optional, tag = "1")] 10 | pub stream_identifier: ::std::option::Option, 11 | #[prost(string, tag = "2")] 12 | pub group_name: std::string::String, 13 | #[prost(int32, tag = "3")] 14 | pub buffer_size: i32, 15 | #[prost(message, optional, tag = "4")] 16 | pub uuid_option: ::std::option::Option, 17 | } 18 | pub mod options { 19 | #[derive(Clone, PartialEq, ::prost::Message)] 20 | pub struct UuidOption { 21 | #[prost(oneof = "uuid_option::Content", tags = "1, 2")] 22 | pub content: ::std::option::Option, 23 | } 24 | pub mod uuid_option { 25 | #[derive(Clone, PartialEq, ::prost::Oneof)] 26 | pub enum Content { 27 | #[prost(message, tag = "1")] 28 | Structured(super::super::super::super::shared::Empty), 29 | #[prost(message, tag = "2")] 30 | String(super::super::super::super::shared::Empty), 31 | } 32 | } 33 | } 34 | #[derive(Clone, PartialEq, ::prost::Message)] 35 | pub struct Ack { 36 | #[prost(bytes, tag = "1")] 37 | pub id: std::vec::Vec, 38 | #[prost(message, repeated, tag = "2")] 39 | pub ids: ::std::vec::Vec, 40 | } 41 | #[derive(Clone, PartialEq, ::prost::Message)] 42 | pub struct Nack { 43 | #[prost(bytes, tag = "1")] 44 | pub id: std::vec::Vec, 45 | #[prost(message, repeated, tag = "2")] 46 | pub ids: ::std::vec::Vec, 47 | #[prost(enumeration = "nack::Action", tag = "3")] 48 | pub action: i32, 49 | #[prost(string, tag = "4")] 50 | pub reason: std::string::String, 51 | } 52 | pub mod nack { 53 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] 54 | #[repr(i32)] 55 | pub enum Action { 56 | Unknown = 0, 57 | Park = 1, 58 | Retry = 2, 59 | Skip = 3, 60 | Stop = 4, 61 | } 62 | } 63 | #[derive(Clone, PartialEq, ::prost::Oneof)] 64 | pub enum Content { 65 | #[prost(message, tag = "1")] 66 | Options(Options), 67 | #[prost(message, tag = "2")] 68 | Ack(Ack), 69 | #[prost(message, tag = "3")] 70 | Nack(Nack), 71 | } 72 | } 73 | #[derive(Clone, PartialEq, ::prost::Message)] 74 | pub struct ReadResp { 75 | #[prost(oneof = "read_resp::Content", tags = "1, 2")] 76 | pub content: ::std::option::Option, 77 | } 78 | pub mod read_resp { 79 | #[derive(Clone, PartialEq, ::prost::Message)] 80 | pub struct ReadEvent { 81 | #[prost(message, optional, tag = "1")] 82 | pub event: ::std::option::Option, 83 | #[prost(message, optional, tag = "2")] 84 | pub link: ::std::option::Option, 85 | #[prost(oneof = "read_event::Position", tags = "3, 4")] 86 | pub position: ::std::option::Option, 87 | #[prost(oneof = "read_event::Count", tags = "5, 6")] 88 | pub count: ::std::option::Option, 89 | } 90 | pub mod read_event { 91 | #[derive(Clone, PartialEq, ::prost::Message)] 92 | pub struct RecordedEvent { 93 | #[prost(message, optional, tag = "1")] 94 | pub id: ::std::option::Option, 95 | #[prost(message, optional, tag = "2")] 96 | pub stream_identifier: 97 | ::std::option::Option, 98 | #[prost(uint64, tag = "3")] 99 | pub stream_revision: u64, 100 | #[prost(uint64, tag = "4")] 101 | pub prepare_position: u64, 102 | #[prost(uint64, tag = "5")] 103 | pub commit_position: u64, 104 | #[prost(map = "string, string", tag = "6")] 105 | pub metadata: ::std::collections::HashMap, 106 | #[prost(bytes, tag = "7")] 107 | pub custom_metadata: std::vec::Vec, 108 | #[prost(bytes, tag = "8")] 109 | pub data: std::vec::Vec, 110 | } 111 | #[derive(Clone, PartialEq, ::prost::Oneof)] 112 | pub enum Position { 113 | #[prost(uint64, tag = "3")] 114 | CommitPosition(u64), 115 | #[prost(message, tag = "4")] 116 | NoPosition(super::super::super::shared::Empty), 117 | } 118 | #[derive(Clone, PartialEq, ::prost::Oneof)] 119 | pub enum Count { 120 | #[prost(int32, tag = "5")] 121 | RetryCount(i32), 122 | #[prost(message, tag = "6")] 123 | NoRetryCount(super::super::super::shared::Empty), 124 | } 125 | } 126 | #[derive(Clone, PartialEq, ::prost::Message)] 127 | pub struct SubscriptionConfirmation { 128 | #[prost(string, tag = "1")] 129 | pub subscription_id: std::string::String, 130 | } 131 | #[derive(Clone, PartialEq, ::prost::Oneof)] 132 | pub enum Content { 133 | #[prost(message, tag = "1")] 134 | Event(ReadEvent), 135 | #[prost(message, tag = "2")] 136 | SubscriptionConfirmation(SubscriptionConfirmation), 137 | } 138 | } 139 | #[derive(Clone, PartialEq, ::prost::Message)] 140 | pub struct CreateReq { 141 | #[prost(message, optional, tag = "1")] 142 | pub options: ::std::option::Option, 143 | } 144 | pub mod create_req { 145 | #[derive(Clone, PartialEq, ::prost::Message)] 146 | pub struct Options { 147 | #[prost(message, optional, tag = "1")] 148 | pub stream_identifier: ::std::option::Option, 149 | #[prost(string, tag = "2")] 150 | pub group_name: std::string::String, 151 | #[prost(message, optional, tag = "3")] 152 | pub settings: ::std::option::Option, 153 | } 154 | #[derive(Clone, PartialEq, ::prost::Message)] 155 | pub struct Settings { 156 | #[prost(bool, tag = "1")] 157 | pub resolve_links: bool, 158 | #[prost(uint64, tag = "2")] 159 | pub revision: u64, 160 | #[prost(bool, tag = "3")] 161 | pub extra_statistics: bool, 162 | #[prost(int32, tag = "5")] 163 | pub max_retry_count: i32, 164 | #[prost(int32, tag = "7")] 165 | pub min_checkpoint_count: i32, 166 | #[prost(int32, tag = "8")] 167 | pub max_checkpoint_count: i32, 168 | #[prost(int32, tag = "9")] 169 | pub max_subscriber_count: i32, 170 | #[prost(int32, tag = "10")] 171 | pub live_buffer_size: i32, 172 | #[prost(int32, tag = "11")] 173 | pub read_batch_size: i32, 174 | #[prost(int32, tag = "12")] 175 | pub history_buffer_size: i32, 176 | #[prost(enumeration = "ConsumerStrategy", tag = "13")] 177 | pub named_consumer_strategy: i32, 178 | #[prost(oneof = "settings::MessageTimeout", tags = "4, 14")] 179 | pub message_timeout: ::std::option::Option, 180 | #[prost(oneof = "settings::CheckpointAfter", tags = "6, 15")] 181 | pub checkpoint_after: ::std::option::Option, 182 | } 183 | pub mod settings { 184 | #[derive(Clone, PartialEq, ::prost::Oneof)] 185 | pub enum MessageTimeout { 186 | #[prost(int64, tag = "4")] 187 | MessageTimeoutTicks(i64), 188 | #[prost(int32, tag = "14")] 189 | MessageTimeoutMs(i32), 190 | } 191 | #[derive(Clone, PartialEq, ::prost::Oneof)] 192 | pub enum CheckpointAfter { 193 | #[prost(int64, tag = "6")] 194 | CheckpointAfterTicks(i64), 195 | #[prost(int32, tag = "15")] 196 | CheckpointAfterMs(i32), 197 | } 198 | } 199 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] 200 | #[repr(i32)] 201 | pub enum ConsumerStrategy { 202 | DispatchToSingle = 0, 203 | RoundRobin = 1, 204 | Pinned = 2, 205 | } 206 | } 207 | #[derive(Clone, PartialEq, ::prost::Message)] 208 | pub struct CreateResp {} 209 | #[derive(Clone, PartialEq, ::prost::Message)] 210 | pub struct UpdateReq { 211 | #[prost(message, optional, tag = "1")] 212 | pub options: ::std::option::Option, 213 | } 214 | pub mod update_req { 215 | #[derive(Clone, PartialEq, ::prost::Message)] 216 | pub struct Options { 217 | #[prost(message, optional, tag = "1")] 218 | pub stream_identifier: ::std::option::Option, 219 | #[prost(string, tag = "2")] 220 | pub group_name: std::string::String, 221 | #[prost(message, optional, tag = "3")] 222 | pub settings: ::std::option::Option, 223 | } 224 | #[derive(Clone, PartialEq, ::prost::Message)] 225 | pub struct Settings { 226 | #[prost(bool, tag = "1")] 227 | pub resolve_links: bool, 228 | #[prost(uint64, tag = "2")] 229 | pub revision: u64, 230 | #[prost(bool, tag = "3")] 231 | pub extra_statistics: bool, 232 | #[prost(int32, tag = "5")] 233 | pub max_retry_count: i32, 234 | #[prost(int32, tag = "7")] 235 | pub min_checkpoint_count: i32, 236 | #[prost(int32, tag = "8")] 237 | pub max_checkpoint_count: i32, 238 | #[prost(int32, tag = "9")] 239 | pub max_subscriber_count: i32, 240 | #[prost(int32, tag = "10")] 241 | pub live_buffer_size: i32, 242 | #[prost(int32, tag = "11")] 243 | pub read_batch_size: i32, 244 | #[prost(int32, tag = "12")] 245 | pub history_buffer_size: i32, 246 | #[prost(enumeration = "ConsumerStrategy", tag = "13")] 247 | pub named_consumer_strategy: i32, 248 | #[prost(oneof = "settings::MessageTimeout", tags = "4, 14")] 249 | pub message_timeout: ::std::option::Option, 250 | #[prost(oneof = "settings::CheckpointAfter", tags = "6, 15")] 251 | pub checkpoint_after: ::std::option::Option, 252 | } 253 | pub mod settings { 254 | #[derive(Clone, PartialEq, ::prost::Oneof)] 255 | pub enum MessageTimeout { 256 | #[prost(int64, tag = "4")] 257 | MessageTimeoutTicks(i64), 258 | #[prost(int32, tag = "14")] 259 | MessageTimeoutMs(i32), 260 | } 261 | #[derive(Clone, PartialEq, ::prost::Oneof)] 262 | pub enum CheckpointAfter { 263 | #[prost(int64, tag = "6")] 264 | CheckpointAfterTicks(i64), 265 | #[prost(int32, tag = "15")] 266 | CheckpointAfterMs(i32), 267 | } 268 | } 269 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] 270 | #[repr(i32)] 271 | pub enum ConsumerStrategy { 272 | DispatchToSingle = 0, 273 | RoundRobin = 1, 274 | Pinned = 2, 275 | } 276 | } 277 | #[derive(Clone, PartialEq, ::prost::Message)] 278 | pub struct UpdateResp {} 279 | #[derive(Clone, PartialEq, ::prost::Message)] 280 | pub struct DeleteReq { 281 | #[prost(message, optional, tag = "1")] 282 | pub options: ::std::option::Option, 283 | } 284 | pub mod delete_req { 285 | #[derive(Clone, PartialEq, ::prost::Message)] 286 | pub struct Options { 287 | #[prost(message, optional, tag = "1")] 288 | pub stream_identifier: ::std::option::Option, 289 | #[prost(string, tag = "2")] 290 | pub group_name: std::string::String, 291 | } 292 | } 293 | #[derive(Clone, PartialEq, ::prost::Message)] 294 | pub struct DeleteResp {} 295 | #[doc = r" Generated client implementations."] 296 | pub mod persistent_subscriptions_client { 297 | #![allow(unused_variables, dead_code, missing_docs)] 298 | use tonic::codegen::*; 299 | pub struct PersistentSubscriptionsClient { 300 | inner: tonic::client::Grpc, 301 | } 302 | impl PersistentSubscriptionsClient { 303 | #[doc = r" Attempt to create a new client by connecting to a given endpoint."] 304 | pub async fn connect(dst: D) -> Result 305 | where 306 | D: std::convert::TryInto, 307 | D::Error: Into, 308 | { 309 | let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; 310 | Ok(Self::new(conn)) 311 | } 312 | } 313 | impl PersistentSubscriptionsClient 314 | where 315 | T: tonic::client::GrpcService, 316 | T::ResponseBody: Body + HttpBody + Send + 'static, 317 | T::Error: Into, 318 | ::Error: Into + Send, 319 | { 320 | pub fn new(inner: T) -> Self { 321 | let inner = tonic::client::Grpc::new(inner); 322 | Self { inner } 323 | } 324 | pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { 325 | let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); 326 | Self { inner } 327 | } 328 | pub async fn create( 329 | &mut self, 330 | request: impl tonic::IntoRequest, 331 | ) -> Result, tonic::Status> { 332 | self.inner.ready().await.map_err(|e| { 333 | tonic::Status::new( 334 | tonic::Code::Unknown, 335 | format!("Service was not ready: {}", e.into()), 336 | ) 337 | })?; 338 | let codec = tonic::codec::ProstCodec::default(); 339 | let path = http::uri::PathAndQuery::from_static( 340 | "/event_store.client.persistent_subscriptions.PersistentSubscriptions/Create", 341 | ); 342 | self.inner.unary(request.into_request(), path, codec).await 343 | } 344 | pub async fn update( 345 | &mut self, 346 | request: impl tonic::IntoRequest, 347 | ) -> Result, tonic::Status> { 348 | self.inner.ready().await.map_err(|e| { 349 | tonic::Status::new( 350 | tonic::Code::Unknown, 351 | format!("Service was not ready: {}", e.into()), 352 | ) 353 | })?; 354 | let codec = tonic::codec::ProstCodec::default(); 355 | let path = http::uri::PathAndQuery::from_static( 356 | "/event_store.client.persistent_subscriptions.PersistentSubscriptions/Update", 357 | ); 358 | self.inner.unary(request.into_request(), path, codec).await 359 | } 360 | pub async fn delete( 361 | &mut self, 362 | request: impl tonic::IntoRequest, 363 | ) -> Result, tonic::Status> { 364 | self.inner.ready().await.map_err(|e| { 365 | tonic::Status::new( 366 | tonic::Code::Unknown, 367 | format!("Service was not ready: {}", e.into()), 368 | ) 369 | })?; 370 | let codec = tonic::codec::ProstCodec::default(); 371 | let path = http::uri::PathAndQuery::from_static( 372 | "/event_store.client.persistent_subscriptions.PersistentSubscriptions/Delete", 373 | ); 374 | self.inner.unary(request.into_request(), path, codec).await 375 | } 376 | pub async fn read( 377 | &mut self, 378 | request: impl tonic::IntoStreamingRequest, 379 | ) -> Result>, tonic::Status> 380 | { 381 | self.inner.ready().await.map_err(|e| { 382 | tonic::Status::new( 383 | tonic::Code::Unknown, 384 | format!("Service was not ready: {}", e.into()), 385 | ) 386 | })?; 387 | let codec = tonic::codec::ProstCodec::default(); 388 | let path = http::uri::PathAndQuery::from_static( 389 | "/event_store.client.persistent_subscriptions.PersistentSubscriptions/Read", 390 | ); 391 | self.inner 392 | .streaming(request.into_streaming_request(), path, codec) 393 | .await 394 | } 395 | } 396 | impl Clone for PersistentSubscriptionsClient { 397 | fn clone(&self) -> Self { 398 | Self { 399 | inner: self.inner.clone(), 400 | } 401 | } 402 | } 403 | impl std::fmt::Debug for PersistentSubscriptionsClient { 404 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 405 | write!(f, "PersistentSubscriptionsClient {{ ... }}") 406 | } 407 | } 408 | } 409 | -------------------------------------------------------------------------------- /src/es6/grpc/event_store/client/shared.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, PartialEq, ::prost::Message)] 2 | pub struct Uuid { 3 | #[prost(oneof = "uuid::Value", tags = "1, 2")] 4 | pub value: ::std::option::Option, 5 | } 6 | pub mod uuid { 7 | #[derive(Clone, PartialEq, ::prost::Message)] 8 | pub struct Structured { 9 | #[prost(int64, tag = "1")] 10 | pub most_significant_bits: i64, 11 | #[prost(int64, tag = "2")] 12 | pub least_significant_bits: i64, 13 | } 14 | #[derive(Clone, PartialEq, ::prost::Oneof)] 15 | pub enum Value { 16 | #[prost(message, tag = "1")] 17 | Structured(Structured), 18 | #[prost(string, tag = "2")] 19 | String(std::string::String), 20 | } 21 | } 22 | #[derive(Clone, PartialEq, ::prost::Message)] 23 | pub struct Empty {} 24 | #[derive(Clone, PartialEq, ::prost::Message)] 25 | pub struct StreamIdentifier { 26 | #[prost(bytes, tag = "3")] 27 | pub stream_name: std::vec::Vec, 28 | } 29 | -------------------------------------------------------------------------------- /src/es6/grpc/event_store/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod client; 2 | -------------------------------------------------------------------------------- /src/es6/grpc/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod event_store; 2 | -------------------------------------------------------------------------------- /src/es6/mod.rs: -------------------------------------------------------------------------------- 1 | //! [PREVIEW] Exposes API to communicate with ES 6 server. ES 6 Server is still in development so 2 | //! expect that interface to change. Enable `es6` feature flag to use it. 3 | pub mod commands; 4 | pub mod connection; 5 | pub mod grpc; 6 | pub mod types; 7 | -------------------------------------------------------------------------------- /src/es6/types.rs: -------------------------------------------------------------------------------- 1 | use crate::types; 2 | use bytes::Bytes; 3 | use serde::de::Deserialize; 4 | use serde::ser::Serialize; 5 | use std::cmp::Ordering; 6 | use std::time::Duration; 7 | use uuid::Uuid; 8 | 9 | /// Constants used for expected version control. 10 | /// The use of expected version can be a bit tricky especially when discussing 11 | /// assurances given by the GetEventStore server. 12 | /// 13 | /// The GetEventStore server will assure idempotency for all operations using 14 | /// any value in `ExpectedVersion` except `ExpectedVersion::Any`. When using 15 | /// `ExpectedVersion::Any`, the GetEventStore server will do its best to assure 16 | /// idempotency but will not guarantee idempotency. 17 | #[derive(Copy, Clone, Debug)] 18 | pub enum ExpectedVersion { 19 | /// This write should not conflict with anything and should always succeed. 20 | Any, 21 | 22 | /// The stream should exist. If it or a metadata stream does not exist, 23 | /// treats that as a concurrency problem. 24 | StreamExists, 25 | 26 | /// The stream being written to should not yet exist. If it does exist, 27 | /// treats that as a concurrency problem. 28 | NoStream, 29 | 30 | /// States that the last event written to the stream should have an event 31 | /// number matching your expected value. 32 | Exact(u64), 33 | } 34 | 35 | #[derive(Copy, Clone, Eq, PartialEq, Debug)] 36 | /// Actual revision of a stream. 37 | pub enum CurrentRevision { 38 | /// The last event's number. 39 | Current(u64), 40 | 41 | /// The stream doesn't exist. 42 | NoStream, 43 | } 44 | 45 | #[derive(Copy, Clone, Eq, PartialEq, Debug)] 46 | /// Expected revision before a write occurs. 47 | pub enum ExpectedRevision { 48 | /// States that the last event written to the stream should have an event number matching your 49 | /// expected value. 50 | Expected(u64), 51 | 52 | /// You expected that write should not conflict with anything and should always succeed. 53 | Any, 54 | 55 | /// You expected the stream should exist. 56 | StreamExists, 57 | } 58 | 59 | /// Holds data of event about to be sent to the server. 60 | pub struct EventData { 61 | pub(crate) event_type: String, 62 | pub(crate) payload: types::Payload, 63 | pub(crate) id_opt: Option, 64 | pub(crate) custom_metadata: Option, 65 | } 66 | 67 | impl EventData { 68 | /// Creates an event with a JSON payload. 69 | pub fn json

(event_type: String, payload: P) -> serde_json::Result 70 | where 71 | P: Serialize, 72 | { 73 | let payload = serde_json::to_vec(&payload)?; 74 | let payload = Bytes::from(payload); 75 | let payload = types::Payload::Json(payload); 76 | 77 | Ok(EventData { 78 | event_type, 79 | payload, 80 | id_opt: None, 81 | custom_metadata: None, 82 | }) 83 | } 84 | 85 | /// Creates an event with a raw binary payload. 86 | pub fn binary(event_type: String, payload: Bytes) -> Self { 87 | EventData { 88 | event_type, 89 | payload: types::Payload::Binary(payload), 90 | id_opt: None, 91 | custom_metadata: None, 92 | } 93 | } 94 | 95 | /// Set an id to this event. By default, the id will be generated by the 96 | /// server. 97 | pub fn id(self, value: Uuid) -> Self { 98 | EventData { 99 | id_opt: Some(value), 100 | ..self 101 | } 102 | } 103 | 104 | /// Assignes a JSON metadata to this event. 105 | pub fn metadata_as_json

(self, payload: P) -> EventData 106 | where 107 | P: Serialize, 108 | { 109 | let bytes = Bytes::from(serde_json::to_vec(&payload).unwrap()); 110 | let json_bin = Some(types::Payload::Json(bytes)); 111 | 112 | EventData { 113 | custom_metadata: json_bin, 114 | ..self 115 | } 116 | } 117 | 118 | /// Assignes a raw binary metadata to this event. 119 | pub fn metadata_as_binary(self, payload: Bytes) -> EventData { 120 | let content_bin = Some(types::Payload::Binary(payload)); 121 | 122 | EventData { 123 | custom_metadata: content_bin, 124 | ..self 125 | } 126 | } 127 | } 128 | 129 | /// A structure referring to a potential logical record position in the 130 | /// GetEventStore transaction file. 131 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 132 | pub struct Position { 133 | /// Commit position of the record. 134 | pub commit: u64, 135 | 136 | /// Prepare position of the record. 137 | pub prepare: u64, 138 | } 139 | 140 | impl Position { 141 | /// Points to the begin of the transaction file. 142 | pub fn start() -> Position { 143 | Position { 144 | commit: 0, 145 | prepare: 0, 146 | } 147 | } 148 | } 149 | 150 | impl PartialOrd for Position { 151 | fn partial_cmp(&self, other: &Position) -> Option { 152 | Some(self.cmp(other)) 153 | } 154 | } 155 | 156 | impl Ord for Position { 157 | fn cmp(&self, other: &Position) -> Ordering { 158 | self.commit 159 | .cmp(&other.commit) 160 | .then(self.prepare.cmp(&other.prepare)) 161 | } 162 | } 163 | 164 | /// Returned after writing to a stream. 165 | #[derive(Debug)] 166 | pub struct WriteResult { 167 | /// Next expected version of the stream. 168 | pub next_expected_version: u64, 169 | 170 | /// `Position` of the write. 171 | pub position: Position, 172 | } 173 | 174 | #[derive(Debug)] 175 | pub enum Revision { 176 | Start, 177 | End, 178 | Exact(A), 179 | } 180 | 181 | /// A structure representing a single event or an resolved link event. 182 | #[derive(Debug)] 183 | pub struct ResolvedEvent { 184 | /// The event, or the resolved link event if this `ResolvedEvent` is a link 185 | /// event. 186 | pub event: Option, 187 | 188 | /// The link event if this `ResolvedEvent` is a link event. 189 | pub link: Option, 190 | 191 | pub commit_position: Option, 192 | } 193 | 194 | impl ResolvedEvent { 195 | /// If it's a link event with its associated resolved event. 196 | pub fn is_resolved(&self) -> bool { 197 | self.event.is_some() && self.link.is_some() 198 | } 199 | 200 | /// Returns the event that was read or which triggered the subscription. 201 | /// If this `ResolvedEvent` represents a link event, the link will be the 202 | /// orginal event, otherwise it will be the event. 203 | /// 204 | pub fn get_original_event(&self) -> &RecordedEvent { 205 | self.link.as_ref().unwrap_or_else(|| { 206 | self.event 207 | .as_ref() 208 | .expect("[get_original_event] Not supposed to happen!") 209 | }) 210 | } 211 | 212 | /// Returns the stream id of the original event. 213 | pub fn get_original_stream_id(&self) -> &str { 214 | let event = self.get_original_event(); 215 | 216 | &event.stream_id 217 | } 218 | } 219 | 220 | /// Represents a previously written event. 221 | #[derive(Debug)] 222 | pub struct RecordedEvent { 223 | /// The event stream that events belongs to. 224 | pub stream_id: String, 225 | 226 | /// Unique identifier representing this event. 227 | pub id: Uuid, 228 | 229 | /// Number of this event in the stream. 230 | pub revision: u64, 231 | 232 | /// Type of this event. 233 | pub event_type: String, 234 | 235 | /// Payload of this event. 236 | pub data: Bytes, 237 | 238 | /// Representing the metadata associated with this event. 239 | pub metadata: Bytes, 240 | 241 | /// Indicates wheter the content is internally marked as JSON. 242 | pub is_json: bool, 243 | 244 | /// An event position in the $all stream. 245 | pub position: Position, 246 | } 247 | 248 | impl RecordedEvent { 249 | /// Tries to decode this event payload as a JSON object. 250 | pub fn as_json<'a, T>(&'a self) -> serde_json::Result 251 | where 252 | T: Deserialize<'a>, 253 | { 254 | serde_json::from_slice(&self.data[..]) 255 | } 256 | } 257 | 258 | /// Gathers every persistent subscription property. 259 | #[derive(Debug, Clone, Copy)] 260 | pub struct PersistentSubscriptionSettings { 261 | /// Whether or not the persistent subscription shoud resolve 'linkTo' 262 | /// events to their linked events. 263 | pub resolve_links: bool, 264 | 265 | /// Where the subscription should start from (event number). 266 | pub revision: u64, 267 | 268 | /// Whether or not in depth latency statistics should be tracked on this 269 | /// subscription. 270 | pub extra_stats: bool, 271 | 272 | /// The amount of time after which a message should be considered to be 273 | /// timeout and retried. 274 | pub message_timeout: Duration, 275 | 276 | /// The maximum number of retries (due to timeout) before a message get 277 | /// considered to be parked. 278 | pub max_retry_count: i32, 279 | 280 | /// The size of the buffer listenning to live messages as they happen. 281 | pub live_buffer_size: i32, 282 | 283 | /// The number of events read at a time when paging in history. 284 | pub read_batch_size: i32, 285 | 286 | /// The number of events to cache when paging through history. 287 | pub history_buffer_size: i32, 288 | 289 | /// The amount of time to try checkpoint after. 290 | pub checkpoint_after: Duration, 291 | 292 | /// The minimum number of messages to checkpoint. 293 | pub min_checkpoint_count: i32, 294 | 295 | /// The maximum number of messages to checkpoint. If this number is reached 296 | /// , a checkpoint will be forced. 297 | pub max_checkpoint_count: i32, 298 | 299 | /// The maximum number of subscribers allowed. 300 | pub max_subscriber_count: i32, 301 | 302 | /// The strategy to use for distributing events to client consumers. 303 | pub named_consumer_strategy: types::SystemConsumerStrategy, 304 | } 305 | 306 | impl PersistentSubscriptionSettings { 307 | pub fn default() -> PersistentSubscriptionSettings { 308 | PersistentSubscriptionSettings { 309 | resolve_links: false, 310 | revision: 0, 311 | extra_stats: false, 312 | message_timeout: Duration::from_secs(30), 313 | max_retry_count: 10, 314 | live_buffer_size: 500, 315 | read_batch_size: 20, 316 | history_buffer_size: 500, 317 | checkpoint_after: Duration::from_secs(2), 318 | min_checkpoint_count: 10, 319 | max_checkpoint_count: 1_000, 320 | max_subscriber_count: 0, // Means their is no limit. 321 | named_consumer_strategy: types::SystemConsumerStrategy::RoundRobin, 322 | } 323 | } 324 | } 325 | 326 | impl Default for PersistentSubscriptionSettings { 327 | fn default() -> PersistentSubscriptionSettings { 328 | PersistentSubscriptionSettings::default() 329 | } 330 | } 331 | 332 | #[derive(Clone, Debug, Copy, Eq, PartialEq)] 333 | pub struct WrongExpectedVersion { 334 | pub current: CurrentRevision, 335 | pub expected: ExpectedRevision, 336 | } 337 | 338 | impl std::fmt::Display for WrongExpectedVersion { 339 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 340 | write!( 341 | f, 342 | "WrongExpectedVersion: expected: {:?}, got: {:?}", 343 | self.expected, self.current 344 | ) 345 | } 346 | } 347 | 348 | impl std::error::Error for WrongExpectedVersion {} 349 | -------------------------------------------------------------------------------- /src/internal.rs: -------------------------------------------------------------------------------- 1 | pub mod command; 2 | pub mod commands; 3 | pub mod connection; 4 | pub mod driver; 5 | pub mod messages; 6 | pub mod messaging; 7 | pub mod operations; 8 | pub mod package; 9 | pub mod registry; 10 | pub mod timespan; 11 | -------------------------------------------------------------------------------- /src/internal/command.rs: -------------------------------------------------------------------------------- 1 | #[derive(Copy, Clone, Debug)] 2 | pub enum Cmd { 3 | HeartbeatRequest, 4 | HeartbeatResponse, 5 | IdentifyClient, 6 | ClientIdentified, 7 | Authenticate, 8 | Authenticated, 9 | NotAuthenticated, 10 | WriteEvents, 11 | WriteEventsCompleted, 12 | ReadEvent, 13 | ReadEventCompleted, 14 | TransactionStart, 15 | TransactionStartCompleted, 16 | TransactionWrite, 17 | TransactionWriteCompleted, 18 | TransactionCommit, 19 | TransactionCommitCompleted, 20 | ReadStreamEventsForward, 21 | ReadStreamEventsForwardCompleted, 22 | ReadStreamEventsBackward, 23 | ReadStreamEventsBackwardCompleted, 24 | ReadAllEventsForward, 25 | ReadAllEventsForwardCompleted, 26 | ReadAllEventsBackward, 27 | ReadAllEventsBackwardCompleted, 28 | DeleteStream, 29 | DeleteStreamCompleted, 30 | SubscribeToStream, 31 | SubscriptionConfirmed, 32 | StreamEventAppeared, 33 | UnsubscribeFromStream, 34 | SubscriptionDropped, 35 | CreatePersistentSubscription, 36 | CreatePersistentSubscriptionCompleted, 37 | UpdatePersistentSubscription, 38 | UpdatePersistentSubscriptionCompleted, 39 | DeletePersistentSubscription, 40 | DeletePersistentSubscriptionCompleted, 41 | ConnectToPersistentSubscription, 42 | PersistentSubscriptionConfirmation, 43 | PersistentSubscriptionStreamEventAppeared, 44 | PersistentSubscriptionAckEvents, 45 | PersistentSubscriptionNakEvents, 46 | BadRequest, 47 | NotHandled, 48 | Unknown(u8), 49 | } 50 | 51 | impl PartialEq for Cmd { 52 | fn eq(&self, other: &Cmd) -> bool { 53 | self.to_u8() == other.to_u8() 54 | } 55 | } 56 | 57 | impl Eq for Cmd {} 58 | 59 | impl Cmd { 60 | pub fn to_u8(self) -> u8 { 61 | match self { 62 | Cmd::HeartbeatRequest => 0x01, 63 | Cmd::HeartbeatResponse => 0x02, 64 | Cmd::IdentifyClient => 0xF5, 65 | Cmd::ClientIdentified => 0xF6, 66 | Cmd::Authenticate => 0xF2, 67 | Cmd::Authenticated => 0xF3, 68 | Cmd::NotAuthenticated => 0xF4, 69 | Cmd::WriteEvents => 0x82, 70 | Cmd::WriteEventsCompleted => 0x83, 71 | Cmd::ReadEvent => 0xB0, 72 | Cmd::ReadEventCompleted => 0xB1, 73 | Cmd::TransactionStart => 0x84, 74 | Cmd::TransactionStartCompleted => 0x85, 75 | Cmd::TransactionWrite => 0x86, 76 | Cmd::TransactionWriteCompleted => 0x87, 77 | Cmd::TransactionCommit => 0x88, 78 | Cmd::TransactionCommitCompleted => 0x89, 79 | Cmd::ReadStreamEventsForward => 0xB2, 80 | Cmd::ReadStreamEventsForwardCompleted => 0xB3, 81 | Cmd::ReadStreamEventsBackward => 0xB4, 82 | Cmd::ReadStreamEventsBackwardCompleted => 0xB5, 83 | Cmd::ReadAllEventsForward => 0xB6, 84 | Cmd::ReadAllEventsForwardCompleted => 0xB7, 85 | Cmd::ReadAllEventsBackward => 0xB8, 86 | Cmd::ReadAllEventsBackwardCompleted => 0xB9, 87 | Cmd::DeleteStream => 0x8A, 88 | Cmd::DeleteStreamCompleted => 0x8B, 89 | Cmd::SubscribeToStream => 0xC0, 90 | Cmd::SubscriptionConfirmed => 0xC1, 91 | Cmd::StreamEventAppeared => 0xC2, 92 | Cmd::UnsubscribeFromStream => 0xC3, 93 | Cmd::SubscriptionDropped => 0xC4, 94 | Cmd::CreatePersistentSubscription => 0xC8, 95 | Cmd::CreatePersistentSubscriptionCompleted => 0xC9, 96 | Cmd::UpdatePersistentSubscription => 0xCE, 97 | Cmd::UpdatePersistentSubscriptionCompleted => 0xCF, 98 | Cmd::DeletePersistentSubscription => 0xCA, 99 | Cmd::DeletePersistentSubscriptionCompleted => 0xCB, 100 | Cmd::ConnectToPersistentSubscription => 0xC5, 101 | Cmd::PersistentSubscriptionConfirmation => 0xC6, 102 | Cmd::PersistentSubscriptionStreamEventAppeared => 0xC7, 103 | Cmd::PersistentSubscriptionAckEvents => 0xCC, 104 | Cmd::PersistentSubscriptionNakEvents => 0xCD, 105 | Cmd::BadRequest => 0xF0, 106 | Cmd::NotHandled => 0xF1, 107 | Cmd::Unknown(cmd) => cmd, 108 | } 109 | } 110 | 111 | pub fn from_u8(cmd: u8) -> Cmd { 112 | match cmd { 113 | 0x01 => Cmd::HeartbeatRequest, 114 | 0x02 => Cmd::HeartbeatResponse, 115 | 0xF5 => Cmd::IdentifyClient, 116 | 0xF6 => Cmd::ClientIdentified, 117 | 0xF2 => Cmd::Authenticate, 118 | 0xF3 => Cmd::Authenticated, 119 | 0xF4 => Cmd::NotAuthenticated, 120 | 0x82 => Cmd::WriteEvents, 121 | 0x83 => Cmd::WriteEventsCompleted, 122 | 0xB0 => Cmd::ReadEvent, 123 | 0xB1 => Cmd::ReadEventCompleted, 124 | 0x84 => Cmd::TransactionStart, 125 | 0x85 => Cmd::TransactionStartCompleted, 126 | 0x86 => Cmd::TransactionWrite, 127 | 0x87 => Cmd::TransactionWriteCompleted, 128 | 0x88 => Cmd::TransactionCommit, 129 | 0x89 => Cmd::TransactionCommitCompleted, 130 | 0xB2 => Cmd::ReadStreamEventsForward, 131 | 0xB3 => Cmd::ReadStreamEventsForwardCompleted, 132 | 0xB4 => Cmd::ReadStreamEventsBackward, 133 | 0xB5 => Cmd::ReadStreamEventsBackwardCompleted, 134 | 0xB6 => Cmd::ReadAllEventsForward, 135 | 0xB7 => Cmd::ReadAllEventsForwardCompleted, 136 | 0xB8 => Cmd::ReadAllEventsBackward, 137 | 0xB9 => Cmd::ReadAllEventsBackwardCompleted, 138 | 0x8A => Cmd::DeleteStream, 139 | 0x8B => Cmd::DeleteStreamCompleted, 140 | 0xC0 => Cmd::SubscribeToStream, 141 | 0xC1 => Cmd::SubscriptionConfirmed, 142 | 0xC2 => Cmd::StreamEventAppeared, 143 | 0xC3 => Cmd::UnsubscribeFromStream, 144 | 0xC4 => Cmd::SubscriptionDropped, 145 | 0xC5 => Cmd::ConnectToPersistentSubscription, 146 | 0xC8 => Cmd::CreatePersistentSubscription, 147 | 0xC9 => Cmd::CreatePersistentSubscriptionCompleted, 148 | 0xCE => Cmd::UpdatePersistentSubscription, 149 | 0xCF => Cmd::UpdatePersistentSubscriptionCompleted, 150 | 0xCA => Cmd::DeletePersistentSubscription, 151 | 0xCB => Cmd::DeletePersistentSubscriptionCompleted, 152 | 0xC6 => Cmd::PersistentSubscriptionConfirmation, 153 | 0xC7 => Cmd::PersistentSubscriptionStreamEventAppeared, 154 | 0xCC => Cmd::PersistentSubscriptionAckEvents, 155 | 0xCD => Cmd::PersistentSubscriptionNakEvents, 156 | 0xF0 => Cmd::BadRequest, 157 | 0xF1 => Cmd::NotHandled, 158 | _ => Cmd::Unknown(cmd), 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/internal/connection.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{channel, Receiver, Sender}; 2 | use futures::sink::SinkExt; 3 | use futures::stream::iter; 4 | use futures::stream::StreamExt; 5 | use std::net::SocketAddr; 6 | use tokio::io::{split, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 7 | use tokio::net::TcpStream; 8 | use tokio_byteorder::{AsyncWriteBytesExt, LittleEndian}; 9 | use uuid::Uuid; 10 | 11 | use crate::internal::command::Cmd; 12 | use crate::internal::messaging::Msg; 13 | use crate::internal::package::Pkg; 14 | use crate::Settings; 15 | 16 | pub struct Connection { 17 | pub id: Uuid, 18 | pub desc: String, 19 | sender: Sender, 20 | } 21 | 22 | fn decode_bytes_error(err: uuid::Error) -> std::io::Error { 23 | std::io::Error::new(std::io::ErrorKind::Other, format!("BytesError {}", err)) 24 | } 25 | 26 | async fn decode_pkg(reading: &mut A) -> std::io::Result 27 | where 28 | A: AsyncRead + Unpin, 29 | { 30 | let frame_size = tokio_byteorder::AsyncReadBytesExt::read_u32::(reading).await?; 31 | let mut src = AsyncReadExt::take(reading, frame_size.into()); 32 | let cmd = AsyncReadExt::read_u8(&mut src).await?; 33 | let cmd = Cmd::from_u8(cmd); 34 | // Parses the authentication flag. The server always sends 0 on responses. 35 | let _ = AsyncReadExt::read_u8(&mut src).await?; 36 | let mut uuid = [0; 16]; 37 | 38 | src.read_exact(&mut uuid).await?; 39 | 40 | let correlation = Uuid::from_slice(&uuid).map_err(decode_bytes_error)?; 41 | let mut payload: Vec = Vec::with_capacity(src.limit() as usize); 42 | 43 | src.read_to_end(&mut payload).await?; 44 | 45 | let pkg = Pkg { 46 | cmd, 47 | creds_opt: None, 48 | correlation, 49 | payload: payload.into(), 50 | }; 51 | 52 | Ok(pkg) 53 | } 54 | 55 | async fn encode_pkg(dest: &mut A, pkg: Pkg) -> std::io::Result<()> 56 | where 57 | A: AsyncWrite + Unpin, 58 | { 59 | let size = pkg.size(); 60 | let auth_flag = if pkg.creds_opt.is_some() { 0x01 } else { 0x00 }; 61 | 62 | AsyncWriteBytesExt::write_u32::(dest, size as u32).await?; 63 | AsyncWriteExt::write_u8(dest, pkg.cmd.to_u8()).await?; 64 | AsyncWriteExt::write_u8(dest, auth_flag).await?; 65 | dest.write_all(pkg.correlation.as_bytes()).await?; 66 | 67 | if let Some(creds) = pkg.creds_opt.as_ref() { 68 | AsyncWriteExt::write_u8(dest, creds.login.len() as u8).await?; 69 | dest.write_all(creds.login.as_ref()).await?; 70 | AsyncWriteExt::write_u8(dest, creds.password.len() as u8).await?; 71 | dest.write_all(creds.password.as_ref()).await?; 72 | } 73 | 74 | dest.write_all(pkg.payload.as_ref()).await?; 75 | dest.flush().await?; 76 | 77 | Ok(()) 78 | } 79 | 80 | #[inline] 81 | fn timeout_error() -> std::io::Error { 82 | std::io::Error::new(std::io::ErrorKind::Interrupted, "Connection timeout") 83 | } 84 | 85 | async fn start_read_write_threads( 86 | conn_id: Uuid, 87 | bus: Sender, 88 | mut recv: Receiver, 89 | stream: S, 90 | ) where 91 | S: AsyncRead + AsyncWrite + Send + 'static, 92 | { 93 | let (mut reading, mut writing) = split(stream); 94 | let mut reading_bus = bus.clone(); 95 | 96 | tokio::spawn(async move { 97 | loop { 98 | let result = decode_pkg(&mut reading).await; 99 | 100 | match result { 101 | Ok(pkg) => { 102 | let _ = reading_bus.send(Msg::Arrived(pkg)).await; 103 | } 104 | 105 | Err(e) => { 106 | let _ = reading_bus.send(Msg::ConnectionClosed(conn_id, e)).await; 107 | 108 | break; 109 | } 110 | } 111 | } 112 | }); 113 | 114 | let mut writing_bus = bus; 115 | 116 | tokio::spawn(async move { 117 | while let Some(pkg) = recv.next().await { 118 | if let Err(e) = encode_pkg(&mut writing, pkg).await { 119 | let _ = writing_bus.send(Msg::ConnectionClosed(conn_id, e)).await; 120 | 121 | break; 122 | } 123 | } 124 | }); 125 | } 126 | 127 | async fn process( 128 | setts: Settings, 129 | mut bus: Sender, 130 | recv: Receiver, 131 | conn_id: Uuid, 132 | addr: SocketAddr, 133 | ) { 134 | let result = 135 | tokio::time::timeout(setts.socket_connection_timeout, TcpStream::connect(&addr)).await; 136 | match result { 137 | Ok(result) => match result { 138 | Ok(stream) => { 139 | let _ = bus.send(Msg::Established(conn_id)).await; 140 | #[cfg(feature = "tls")] 141 | { 142 | if let Some(config) = setts.tls_client_config { 143 | let dnsname = config.domain; 144 | let connector: tokio_rustls::TlsConnector = 145 | std::sync::Arc::new(config.rustls_config).into(); 146 | 147 | match connector.connect(dnsname.as_ref(), stream).await { 148 | Ok(stream) => { 149 | start_read_write_threads(conn_id, bus, recv, stream).await 150 | } 151 | 152 | Err(e) => { 153 | error!("TLS error on connection: {}", e); 154 | return; 155 | } 156 | } 157 | } 158 | } 159 | #[cfg(not(feature = "tls"))] 160 | { 161 | start_read_write_threads(conn_id, bus, recv, stream).await 162 | } 163 | } 164 | 165 | Err(err) => { 166 | let _ = bus.send(Msg::ConnectionClosed(conn_id, err)).await; 167 | } 168 | }, 169 | 170 | Err(_) => { 171 | let _ = bus 172 | .send(Msg::ConnectionClosed(conn_id, timeout_error())) 173 | .await; 174 | } 175 | } 176 | } 177 | 178 | impl Connection { 179 | pub fn new(setts: Settings, bus: Sender, addr: SocketAddr) -> Connection { 180 | let (sender, recv) = channel(500); 181 | let id = Uuid::new_v4(); 182 | let desc = format!("{:?}", addr); 183 | 184 | tokio::spawn(process(setts, bus, recv, id, addr)); 185 | 186 | Connection { id, desc, sender } 187 | } 188 | 189 | pub async fn enqueue(&mut self, pkg: Pkg) { 190 | let _ = self.sender.send(pkg).await; 191 | } 192 | 193 | pub async fn enqueue_all(&mut self, pkgs: Vec) { 194 | let stream = pkgs.into_iter().map(Ok); 195 | let _ = self.sender.send_all(&mut iter(stream)).await; 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /src/internal/endpoint.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddrV4; 2 | 3 | pub struct Endpoint { 4 | pub addr: SocketAddrV4, 5 | } 6 | 7 | impl Endpoint { 8 | pub fn from_addr(addr: SocketAddrV4) -> Endpoint { 9 | Endpoint { 10 | addr: addr, 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/internal/messaging.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io::Error; 3 | 4 | use uuid::Uuid; 5 | 6 | use crate::internal::package::Pkg; 7 | use crate::types::{Endpoint, OperationError}; 8 | use futures::channel::mpsc; 9 | 10 | #[derive(Debug)] 11 | pub enum OpMsg { 12 | Recv(Pkg), 13 | Failed(OperationError), 14 | } 15 | 16 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 17 | pub enum Lifetime { 18 | OneTime(A), 19 | /// `super::command::Cmd` is used to know on which command we decide to stop. For example, in 20 | /// case of subscription, `super::command::Cmd::SubscriptionDropped` will stop a subscription 21 | /// transmission. 22 | KeepAlive(super::command::Cmd, A), 23 | } 24 | 25 | impl Lifetime { 26 | pub fn inner(self) -> A { 27 | match self { 28 | Lifetime::KeepAlive(_, a) => a, 29 | Lifetime::OneTime(a) => a, 30 | } 31 | } 32 | 33 | pub fn keep_alive_until(&self) -> Option { 34 | match self { 35 | Lifetime::KeepAlive(cmd, _) => Some(*cmd), 36 | Lifetime::OneTime(_) => None, 37 | } 38 | } 39 | } 40 | 41 | pub type Mailbox = mpsc::Sender; 42 | 43 | pub enum Msg { 44 | Start, 45 | Shutdown, 46 | Tick, 47 | Establish(Endpoint), 48 | Established(Uuid), 49 | Arrived(Pkg), 50 | ConnectionClosed(Uuid, Error), 51 | Transmit(Lifetime, Mailbox), 52 | Send(Pkg), 53 | Marker, // Use as checkpoint detection. 54 | } 55 | 56 | impl fmt::Debug for Msg { 57 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 58 | use Msg::*; 59 | 60 | match self { 61 | Start => writeln!(f, "Start"), 62 | Shutdown => writeln!(f, "Shutdown"), 63 | Tick => writeln!(f, "Tick"), 64 | Establish(ept) => writeln!(f, "Establish({:?})", ept), 65 | Established(id) => writeln!(f, "Established({:?})", id), 66 | Arrived(pkg) => writeln!(f, "Arrived({:?})", pkg), 67 | ConnectionClosed(id, e) => writeln!(f, "ConnectionClosed({:?}, {:?})", id, e), 68 | Transmit(pkg, _) => writeln!(f, "Transmit({:?})", pkg), 69 | Send(pkg) => writeln!(f, "Send({:?})", pkg), 70 | Marker => writeln!(f, "Marker"), 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/internal/package.rs: -------------------------------------------------------------------------------- 1 | use bytes::{buf::BufMutExt, Bytes, BytesMut}; 2 | use protobuf::{parse_from_carllerche_bytes, Message}; 3 | use uuid::Uuid; 4 | 5 | use crate::internal::command::Cmd; 6 | use crate::internal::messages; 7 | use crate::types::Credentials; 8 | 9 | #[derive(Debug, Clone)] 10 | pub struct Pkg { 11 | pub cmd: Cmd, 12 | pub creds_opt: Option, 13 | pub correlation: Uuid, 14 | pub payload: Bytes, 15 | } 16 | 17 | static CLIENT_VERSION: i32 = 1; 18 | pub static PKG_MANDATORY_SIZE: usize = 18; 19 | 20 | impl Pkg { 21 | pub fn new(cmd: Cmd, correlation: Uuid) -> Pkg { 22 | Pkg { 23 | cmd, 24 | correlation, 25 | creds_opt: None, 26 | payload: Default::default(), 27 | } 28 | } 29 | 30 | pub fn from_message( 31 | cmd: Cmd, 32 | creds_opt: Option, 33 | msg: &M, 34 | ) -> ::std::io::Result 35 | where 36 | M: Message, 37 | { 38 | let mut writer = BytesMut::with_capacity(msg.compute_size() as usize).writer(); 39 | 40 | msg.write_to_writer(&mut writer)?; 41 | 42 | let pkg = Pkg { 43 | cmd, 44 | creds_opt, 45 | correlation: Uuid::new_v4(), 46 | payload: writer.into_inner().freeze(), 47 | }; 48 | 49 | Ok(pkg) 50 | } 51 | 52 | pub fn size(&self) -> usize { 53 | let creds_size = { 54 | match self.creds_opt { 55 | Some(ref creds) => creds.network_size(), 56 | None => 0, 57 | } 58 | }; 59 | 60 | PKG_MANDATORY_SIZE + self.payload.len() + creds_size 61 | } 62 | 63 | pub fn heartbeat_request() -> Pkg { 64 | Pkg::new(Cmd::HeartbeatRequest, Uuid::new_v4()) 65 | } 66 | 67 | pub fn authenticate(creds: Credentials) -> Pkg { 68 | Pkg { 69 | cmd: Cmd::Authenticate, 70 | correlation: Uuid::new_v4(), 71 | creds_opt: Some(creds), 72 | payload: Default::default(), 73 | } 74 | } 75 | 76 | pub fn identify_client(name_opt: &Option) -> Pkg { 77 | let mut msg = messages::IdentifyClient::new(); 78 | let name = match *name_opt { 79 | Some(ref name) => name.clone(), 80 | None => format!("ES-{}", Uuid::new_v4()).into(), 81 | }; 82 | 83 | msg.set_connection_name(name); 84 | msg.set_version(CLIENT_VERSION); 85 | 86 | let mut writer = BytesMut::with_capacity(msg.compute_size() as usize).writer(); 87 | 88 | msg.write_to_writer(&mut writer).unwrap(); 89 | 90 | Pkg { 91 | cmd: Cmd::IdentifyClient, 92 | correlation: Uuid::new_v4(), 93 | creds_opt: None, 94 | payload: writer.into_inner().freeze(), 95 | } 96 | } 97 | 98 | // Copies the Pkg except its payload. 99 | pub fn copy_headers_only(&self) -> Pkg { 100 | Pkg { 101 | cmd: self.cmd, 102 | correlation: self.correlation, 103 | payload: Default::default(), 104 | creds_opt: None, 105 | } 106 | } 107 | 108 | pub fn to_message(&self) -> ::std::io::Result 109 | where 110 | M: Message, 111 | { 112 | parse_from_carllerche_bytes(&self.payload).map_err(|e| e.into()) 113 | } 114 | 115 | pub fn build_text(self) -> String { 116 | unsafe { String::from_utf8_unchecked(self.payload.to_vec()) } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/internal/registry.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::time::{Duration, Instant}; 3 | 4 | use futures::sink::SinkExt; 5 | use uuid::Uuid; 6 | 7 | use crate::internal::command::Cmd; 8 | use crate::internal::messages; 9 | use crate::internal::messaging::{self, Lifetime, OpMsg}; 10 | use crate::internal::package::Pkg; 11 | use crate::types::{Endpoint, OperationError, Retry}; 12 | 13 | #[derive(Debug)] 14 | struct Request { 15 | original: Pkg, 16 | conn_id: Uuid, 17 | retries: usize, 18 | started: Instant, 19 | mailbox: messaging::Mailbox, 20 | keep_alive_until: Option, 21 | } 22 | 23 | impl Request { 24 | fn keep_alive(&self) -> bool { 25 | self.keep_alive_until.is_some() 26 | } 27 | 28 | fn inner_lifetime(self) -> (messaging::Mailbox, Lifetime) { 29 | let lifetime = if let Some(cmd) = self.keep_alive_until { 30 | Lifetime::KeepAlive(cmd, self.original) 31 | } else { 32 | Lifetime::OneTime(self.original) 33 | }; 34 | 35 | (self.mailbox, lifetime) 36 | } 37 | } 38 | 39 | #[derive(Debug)] 40 | struct Waiting { 41 | pkg: Lifetime, 42 | mailbox: messaging::Mailbox, 43 | } 44 | 45 | pub struct Registry { 46 | requests: HashMap, 47 | waitings: Vec, 48 | timeout: Duration, 49 | max_retries: Retry, 50 | } 51 | 52 | impl Registry { 53 | pub fn new(timeout: Duration, max_retries: Retry) -> Self { 54 | Registry { 55 | requests: HashMap::new(), 56 | waitings: Vec::new(), 57 | timeout, 58 | max_retries, 59 | } 60 | } 61 | 62 | pub fn register(&mut self, conn_id: Uuid, mailbox: messaging::Mailbox, pkg: Lifetime) { 63 | let keep_alive_until = pkg.keep_alive_until(); 64 | let req = Request { 65 | mailbox, 66 | conn_id, 67 | original: pkg.inner(), 68 | retries: 1, 69 | started: Instant::now(), 70 | keep_alive_until, 71 | }; 72 | 73 | self.requests.insert(req.original.correlation, req); 74 | } 75 | 76 | pub fn postpone(&mut self, mailbox: messaging::Mailbox, pkg: Lifetime) { 77 | let req = Waiting { pkg, mailbox }; 78 | 79 | self.waitings.push(req); 80 | } 81 | 82 | pub async fn handle(&mut self, pkg: Pkg) -> Option { 83 | if let Some(mut req) = self.requests.remove(&pkg.correlation) { 84 | debug!( 85 | "Package [{}]: command {:?} received {:?}.", 86 | req.original.correlation, req.original.cmd, pkg.cmd, 87 | ); 88 | 89 | match pkg.cmd { 90 | Cmd::BadRequest => { 91 | let msg = pkg.build_text(); 92 | 93 | error!("Bad request for command {:?}: {}.", req.original.cmd, msg,); 94 | 95 | let error = OperationError::ServerError(Some(msg)); 96 | let _ = req.mailbox.send(OpMsg::Failed(error)).await; 97 | } 98 | 99 | Cmd::NotAuthenticated => { 100 | error!( 101 | "Not authenticated for command {:?} [{:?}].", 102 | req.original.cmd, req.original.correlation, 103 | ); 104 | 105 | let error = OperationError::AuthenticationRequired; 106 | let _ = req.mailbox.send(OpMsg::Failed(error)).await; 107 | } 108 | 109 | Cmd::NotHandled => { 110 | warn!( 111 | "Not handled request {:?} id {}.", 112 | req.original.cmd, req.original.correlation, 113 | ); 114 | 115 | let result = pkg.to_message::().and_then(|msg| { 116 | if let messages::NotHandled_NotHandledReason::NotMaster = msg.get_reason() { 117 | let master_info = 118 | protobuf::parse_from_bytes::( 119 | msg.get_additional_info(), 120 | )?; 121 | 122 | // TODO - Support reconnection on the secure port when we are going to 123 | // implement SSL connection. 124 | let addr_str = format!( 125 | "{}:{}", 126 | master_info.get_external_tcp_address(), 127 | master_info.get_external_tcp_port(), 128 | ); 129 | let addr = addr_str.parse().map_err(|e| { 130 | std::io::Error::new( 131 | std::io::ErrorKind::InvalidInput, 132 | format!("Failed parsing ip address: {}", e), 133 | ) 134 | })?; 135 | 136 | let external_tcp_port = Endpoint::from_addr(addr); 137 | 138 | Ok(Some(external_tcp_port)) 139 | } else { 140 | Ok(None) 141 | } 142 | }); 143 | 144 | match result { 145 | Ok(endpoint_opt) => { 146 | let orig_cmd = req.original.cmd; 147 | let (mailbox, orig_pkg) = req.inner_lifetime(); 148 | 149 | self.postpone(mailbox, orig_pkg); 150 | 151 | if let Some(endpoint) = endpoint_opt { 152 | warn!( 153 | "Received a non master error on command {:?} id {} retrying on [{:?}]", 154 | orig_cmd, 155 | pkg.correlation, 156 | endpoint, 157 | ); 158 | 159 | return Some(endpoint); 160 | } else { 161 | warn!( 162 | "The server has either not started or is too busy. 163 | Retrying command {:?} id {}.", 164 | orig_cmd, pkg.correlation, 165 | ); 166 | } 167 | } 168 | 169 | Err(error) => { 170 | error!( 171 | "Decoding error: can't decode NotHandled message: {}.", 172 | error, 173 | ); 174 | 175 | let msg = format!( 176 | "Decoding error: can't decode NotHandled message: {}.", 177 | error, 178 | ); 179 | 180 | let error = OperationError::ProtobufDecodingError(msg); 181 | let _ = req.mailbox.send(OpMsg::Failed(error)).await; 182 | } 183 | } 184 | } 185 | 186 | _ => { 187 | let resp_cmd = pkg.cmd; 188 | let _ = req.mailbox.send(OpMsg::Recv(pkg)).await; 189 | 190 | if let Some(cmd) = req.keep_alive_until { 191 | if cmd != resp_cmd { 192 | self.requests.insert(req.original.correlation, req); 193 | } 194 | } 195 | } 196 | } 197 | 198 | None 199 | } else { 200 | warn!("No operation associated to package {:?}", pkg); 201 | 202 | None 203 | } 204 | } 205 | 206 | pub async fn check_and_retry(&mut self, conn_id: Uuid) -> Vec { 207 | debug!("Enter check_and_retry process…"); 208 | 209 | let mut pkgs: Vec = vec![]; 210 | let max_retries = self.max_retries.to_usize(); 211 | 212 | for key in self.requests.keys().copied().collect::>() { 213 | let mut req = self.requests.remove(&key).expect("impossible situation"); 214 | 215 | if req.conn_id != conn_id { 216 | let _ = req 217 | .mailbox 218 | .send(OpMsg::Failed(OperationError::ConnectionHasDropped)) 219 | .await; 220 | 221 | continue; 222 | } 223 | 224 | if !req.keep_alive() && req.started.elapsed() >= self.timeout { 225 | if req.retries + 1 > max_retries { 226 | error!( 227 | "Command {:?} [{:?}]: maximum retries threshold reached [{}], aborted!", 228 | req.original.cmd, req.original.correlation, max_retries, 229 | ); 230 | 231 | let _ = req 232 | .mailbox 233 | .send(OpMsg::Failed(OperationError::Aborted)) 234 | .await; 235 | 236 | continue; 237 | } else { 238 | req.retries += 1; 239 | req.started = Instant::now(); 240 | 241 | warn!( 242 | "Command {:?} [{:?}] has timeout. Retrying (attempt {}/{})", 243 | req.original.cmd, req.original.correlation, req.retries, max_retries, 244 | ); 245 | 246 | pkgs.push(req.original.clone()); 247 | } 248 | } 249 | 250 | self.requests.insert(key, req); 251 | } 252 | 253 | while let Some(req) = self.waitings.pop() { 254 | pkgs.push(req.pkg.clone().inner()); 255 | self.register(conn_id, req.mailbox, req.pkg); 256 | } 257 | 258 | debug!("check_and_retry process completed."); 259 | 260 | pkgs 261 | } 262 | 263 | pub async fn abort(&mut self) { 264 | for (_, mut req) in self.requests.drain() { 265 | let _ = req 266 | .mailbox 267 | .send(OpMsg::Failed(OperationError::Aborted)) 268 | .await; 269 | } 270 | 271 | for mut req in self.waitings.drain(..) { 272 | let _ = req 273 | .mailbox 274 | .send(OpMsg::Failed(OperationError::Aborted)) 275 | .await; 276 | } 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /src/internal/timespan.rs: -------------------------------------------------------------------------------- 1 | use serde::de::{self, Deserialize, Deserializer, Visitor}; 2 | use serde::ser::{Serialize, Serializer}; 3 | use std::borrow::Borrow; 4 | use std::time::Duration; 5 | 6 | #[derive(Debug, PartialEq, Eq, Copy, Clone)] 7 | pub struct Timespan { 8 | pub ticks: u64, 9 | } 10 | 11 | pub struct Builder { 12 | days: u64, 13 | hours: u64, 14 | minutes: u64, 15 | seconds: u64, 16 | milliseconds: u64, 17 | } 18 | 19 | impl Builder { 20 | fn new() -> Builder { 21 | Builder { 22 | days: 0, 23 | hours: 0, 24 | minutes: 0, 25 | seconds: 0, 26 | milliseconds: 0, 27 | } 28 | } 29 | 30 | pub fn days(&mut self, days: u64) -> &mut Builder { 31 | self.days = days; 32 | 33 | self 34 | } 35 | 36 | pub fn hours(&mut self, hours: u64) -> &mut Builder { 37 | self.hours = hours; 38 | 39 | self 40 | } 41 | 42 | pub fn minutes(&mut self, minutes: u64) -> &mut Builder { 43 | self.minutes = minutes; 44 | 45 | self 46 | } 47 | 48 | pub fn seconds(&mut self, seconds: u64) -> &mut Builder { 49 | self.seconds = seconds; 50 | 51 | self 52 | } 53 | 54 | pub fn milliseconds(&mut self, milliseconds: u64) -> &mut Builder { 55 | self.milliseconds = milliseconds; 56 | 57 | self 58 | } 59 | 60 | pub fn build(&self) -> Timespan { 61 | let total_millis = 62 | self.days * 24 * 3600 + self.hours * 3600 + self.minutes * 60 + self.seconds; 63 | let total_millis = total_millis * 1000; 64 | let total_millis = total_millis + self.milliseconds; 65 | let ticks = total_millis * TICKS_PER_MILLIS; 66 | 67 | Timespan::from_ticks(ticks) 68 | } 69 | } 70 | 71 | impl Timespan { 72 | fn from_ticks(ticks: u64) -> Timespan { 73 | Timespan { ticks } 74 | } 75 | 76 | pub fn from_duration(duration: Duration) -> Timespan { 77 | let mut builder = Timespan::builder(); 78 | 79 | builder 80 | .seconds(duration.as_secs()) 81 | .milliseconds(u64::from(duration.subsec_millis())) 82 | .build() 83 | } 84 | 85 | pub fn builder() -> Builder { 86 | Builder::new() 87 | } 88 | 89 | pub fn days(self) -> u64 { 90 | self.ticks / TICKS_PER_DAY 91 | } 92 | 93 | pub fn hours(self) -> u64 { 94 | (self.ticks / TICKS_PER_HOUR) % 24 95 | } 96 | 97 | pub fn minutes(self) -> u64 { 98 | (self.ticks / TICKS_PER_MINUTE) % 60 99 | } 100 | 101 | pub fn seconds(self) -> u64 { 102 | (self.ticks / TICKS_PER_SECONDS) % 60 103 | } 104 | 105 | pub fn total_milliseconds(self) -> u64 { 106 | let millis = ((self.ticks as f64) * MILLIS_PER_TICK) as u64; 107 | 108 | if millis > MAX_MILLIS { 109 | MAX_MILLIS 110 | } else { 111 | millis 112 | } 113 | } 114 | 115 | pub fn build_duration(self) -> Duration { 116 | Duration::from_millis(self.total_milliseconds()) 117 | } 118 | 119 | fn str_repr(self) -> String { 120 | let mut builder = String::new(); 121 | let days = self.days(); 122 | let hours = self.hours(); 123 | let minutes = self.minutes(); 124 | let seconds = self.seconds(); 125 | let fractions = self.ticks % TICKS_PER_SECONDS; 126 | 127 | if days > 0 { 128 | builder.push_str(format!("{}.", days).borrow()); 129 | } 130 | 131 | builder.push_str(format!("{:02}:", hours).borrow()); 132 | builder.push_str(format!("{:02}:", minutes).borrow()); 133 | builder.push_str(format!("{:02}", seconds).borrow()); 134 | 135 | if fractions > 0 { 136 | builder.push_str(format!(".{:07}", fractions).borrow()); 137 | } 138 | 139 | builder 140 | } 141 | } 142 | 143 | impl Serialize for Timespan { 144 | fn serialize(&self, serializer: S) -> Result 145 | where 146 | S: Serializer, 147 | { 148 | serializer.serialize_str(self.str_repr().borrow()) 149 | } 150 | } 151 | 152 | struct ForTimespan; 153 | 154 | enum Parse { 155 | Days, 156 | Hours, 157 | Minutes, 158 | Seconds, 159 | Fractions, 160 | } 161 | 162 | impl<'de> Visitor<'de> for ForTimespan { 163 | type Value = Timespan; 164 | 165 | fn expecting(&self, formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { 166 | write!(formatter, "a string representing a Timespan") 167 | } 168 | 169 | fn visit_str(self, value: &str) -> Result 170 | where 171 | E: de::Error, 172 | { 173 | fn to_u32(vec: &[u32]) -> u32 { 174 | if vec.is_empty() { 175 | return 0; 176 | } 177 | 178 | let mut exp = vec.len() as u32; 179 | let mut result = 0; 180 | 181 | for value in vec { 182 | exp -= 1; 183 | result += value * 10_u32.pow(exp); 184 | } 185 | 186 | result 187 | } 188 | 189 | let mut state = Parse::Days; 190 | let mut buffer = Vec::new(); 191 | let mut builder = Timespan::builder(); 192 | 193 | for c in value.chars() { 194 | match state { 195 | Parse::Days => { 196 | if c == '.' { 197 | let num = to_u32(&buffer); 198 | 199 | builder.days(u64::from(num)); 200 | buffer.clear(); 201 | state = Parse::Hours; 202 | } else if c == ':' { 203 | let num = to_u32(&buffer); 204 | 205 | builder.hours(u64::from(num)); 206 | buffer.clear(); 207 | state = Parse::Minutes; 208 | } else { 209 | buffer.push(c.to_digit(BASE_10_RDX).unwrap()); 210 | } 211 | } 212 | 213 | Parse::Hours => { 214 | if c == ':' { 215 | let num = to_u32(&buffer); 216 | 217 | builder.hours(u64::from(num)); 218 | buffer.clear(); 219 | state = Parse::Minutes; 220 | } else { 221 | buffer.push(c.to_digit(BASE_10_RDX).unwrap()); 222 | } 223 | } 224 | 225 | Parse::Minutes => { 226 | if c == ':' { 227 | let num = to_u32(&buffer); 228 | 229 | builder.minutes(u64::from(num)); 230 | buffer.clear(); 231 | state = Parse::Seconds; 232 | } else { 233 | buffer.push(c.to_digit(BASE_10_RDX).unwrap()); 234 | } 235 | } 236 | 237 | Parse::Seconds => { 238 | if c == '.' { 239 | let num = to_u32(&buffer); 240 | 241 | builder.seconds(u64::from(num)); 242 | buffer.clear(); 243 | state = Parse::Fractions; 244 | } else { 245 | buffer.push(c.to_digit(BASE_10_RDX).unwrap()); 246 | } 247 | } 248 | 249 | Parse::Fractions => { 250 | buffer.push(c.to_digit(BASE_10_RDX).unwrap()); 251 | } 252 | } 253 | } 254 | 255 | let num = to_u32(&buffer); 256 | let mut residual_ticks = 0; 257 | 258 | // In case the Timespan string representation didn't contain fractions. 259 | if let Parse::Seconds = state { 260 | builder.seconds(u64::from(num)); 261 | } else { 262 | residual_ticks = u64::from(num); 263 | } 264 | 265 | let mut timespan = builder.build(); 266 | 267 | timespan.ticks += residual_ticks; 268 | 269 | Ok(timespan) 270 | } 271 | } 272 | 273 | impl<'de> Deserialize<'de> for Timespan { 274 | fn deserialize(deserializer: D) -> Result 275 | where 276 | D: Deserializer<'de>, 277 | { 278 | deserializer.deserialize_str(ForTimespan) 279 | } 280 | } 281 | 282 | const BASE_10_RDX: u32 = 10; 283 | 284 | const TICKS_PER_MILLIS: u64 = 10_000; 285 | const TICKS_PER_SECONDS: u64 = TICKS_PER_MILLIS * 1_000; 286 | const TICKS_PER_MINUTE: u64 = TICKS_PER_SECONDS * 60; 287 | const TICKS_PER_HOUR: u64 = TICKS_PER_MINUTE * 60; 288 | const TICKS_PER_DAY: u64 = TICKS_PER_HOUR * 24; 289 | 290 | const MILLIS_PER_TICK: f64 = 1.0 / (TICKS_PER_MILLIS as f64); 291 | 292 | const MAX_MILLIS: u64 = ::std::u64::MAX / TICKS_PER_MILLIS; 293 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /// Provides a TCP client for [GetEventStore] datatbase. 2 | 3 | #[macro_use] 4 | extern crate serde_derive; 5 | 6 | #[macro_use] 7 | extern crate log; 8 | 9 | mod connection; 10 | mod discovery; 11 | #[cfg(feature = "es6")] 12 | pub mod es6; 13 | mod internal; 14 | pub mod types; 15 | 16 | pub use connection::{Connection, ConnectionBuilder}; 17 | 18 | pub use internal::commands; 19 | 20 | pub use types::*; 21 | --------------------------------------------------------------------------------