├── .github ├── CODEOWNERS └── workflows │ ├── lint.yml │ ├── release.yml │ └── security.yml ├── .gitignore ├── .rustfmt.toml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── _rabbit ├── enabled_plugins └── plugins │ └── rabbitmq_delayed_message_exchange-3.11.1.ez ├── assets ├── banner_dark.png └── banner_light.png ├── deny.toml ├── omniqueue ├── Cargo.toml ├── src │ ├── backends │ │ ├── azure_queue_storage.rs │ │ ├── gcp_pubsub.rs │ │ ├── in_memory.rs │ │ ├── mod.rs │ │ ├── rabbitmq.rs │ │ ├── redis │ │ │ ├── cluster.rs │ │ │ ├── fallback.rs │ │ │ ├── mod.rs │ │ │ ├── sentinel.rs │ │ │ └── streams.rs │ │ └── sqs.rs │ ├── builder.rs │ ├── lib.rs │ ├── macros.rs │ ├── queue │ │ ├── acker.rs │ │ ├── consumer.rs │ │ ├── mod.rs │ │ └── producer.rs │ └── scheduled │ │ └── mod.rs └── tests │ └── it │ ├── azure_queue_storage.rs │ ├── gcp_pubsub.rs │ ├── main.rs │ ├── rabbitmq.rs │ ├── redis.rs │ ├── redis_cluster.rs │ ├── redis_fallback.rs │ └── sqs.rs └── testing-docker-compose.yml /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @svix/Engineering 2 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | env: 4 | CARGO_TERM_COLOR: always 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | 12 | jobs: 13 | check-fmt: 14 | name: Check formatting 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - uses: dtolnay/rust-toolchain@nightly 20 | with: 21 | components: rustfmt 22 | 23 | - name: rustfmt 24 | run: cargo fmt -- --check 25 | 26 | test-versions: 27 | name: Library Lint 28 | runs-on: ubuntu-latest 29 | strategy: 30 | matrix: 31 | rust: [stable, beta] 32 | steps: 33 | - uses: actions/checkout@master 34 | 35 | - uses: dtolnay/rust-toolchain@master 36 | with: 37 | toolchain: ${{ matrix.rust }} 38 | components: clippy 39 | 40 | - uses: taiki-e/install-action@cargo-hack 41 | 42 | - uses: Swatinem/rust-cache@v2 43 | 44 | - name: Clippy 45 | run: cargo hack --each-feature clippy --all-targets -- -D warnings 46 | 47 | - name: Start test dependencies 48 | run: docker compose -f "./testing-docker-compose.yml" up -d 49 | 50 | - name: Run tests 51 | env: 52 | AWS_DEFAULT_REGION: localhost 53 | AWS_ACCESS_KEY_ID: x 54 | AWS_SECRET_ACCESS_KEY: x 55 | run: cargo test --all-features 56 | 57 | - name: Stop test dependencies 58 | run: docker compose -f "./testing-docker-compose.yml" down 59 | 60 | typos: 61 | name: Check for typos 62 | runs-on: ubuntu-latest 63 | 64 | steps: 65 | - uses: actions/checkout@v4 66 | - uses: crate-ci/typos@v1.25.0 67 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | env: 4 | CARGO_TERM_COLOR: always 5 | 6 | on: 7 | push: 8 | tags: 9 | - 'v*' 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | environment: Release 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - uses: dtolnay/rust-toolchain@stable 19 | with: 20 | toolchain: ${{ matrix.rust }} 21 | 22 | - uses: Swatinem/rust-cache@v2 23 | # only restore cache for faster publishing, don't save back results 24 | save-if: false 25 | 26 | - name: Publish 27 | env: 28 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 29 | run: cargo publish --package omniqueue 30 | -------------------------------------------------------------------------------- /.github/workflows/security.yml: -------------------------------------------------------------------------------- 1 | name: Rust Lib Security 2 | 3 | env: 4 | CARGO_TERM_COLOR: always 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - '**/Cargo.toml' 12 | - 'deny.toml' 13 | - '.github/workflows/security.yml' 14 | pull_request: 15 | paths: 16 | - '**/Cargo.toml' 17 | - 'deny.toml' 18 | - '.github/workflows/security.yml' 19 | 20 | jobs: 21 | security_audit: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v4 25 | - uses: EmbarkStudios/cargo-deny-action@v2 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target 3 | .vscode 4 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | wrap_comments = true 2 | imports_granularity = "Crate" 3 | group_imports = "StdExternalCrate" 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # unreleased 2 | 3 | ## Breaking changes 4 | 5 | - Remove `QueueError::Unsupported` 6 | - This variant was never constructed inside `omniqueue` 7 | - Rename `aws_config` to `sqs_config` and use `aws_sdk_sqs::Config` 8 | 9 | ## Additions 10 | 11 | - Add `QueueError::PayloadTooLarge` 12 | 13 | # 0.2.0 14 | 15 | This release is a big one, and we are considering omniqueue out of early development now. 16 | You can expect the API to change much less in the coming releases compared to this one. 17 | 18 | ## Breaking changes 19 | 20 | - **redis: Some implementation changes mean that this backend is runtime-incompatible with the same backend in omniqueue 0.1** 21 | - Revise the public module structure to shorten import paths and make the docs easier to navigate 22 | - Revise the public API to require fewer trait imports for common usage 23 | - Rename a few types and traits 24 | - Most notably, `MemoryQueueBackend` is now named `InMemoryBackend` 25 | - Everything else should be easily found by searching for the old names 26 | - Remove custom encoders / decoders 27 | - Custom encoding can be handled more efficiently by wrapping omniqueue's 28 | `raw` send / receive interfaces into a custom higher-level interface 29 | - Update and prune dependency tree 30 | - Switch omniqueue's public traits from `async_trait` to [native async-in-traits][] 31 | - Simplify generic bounds (only matters if you were using omniqueue in generic code) 32 | 33 | ## Additions 34 | 35 | - Add a backend for Google Cloud's Pub/Sub queue (`gcp_pubsub` feature / module) 36 | - Add some documentation 37 | - Introduce an `omniqueue::Result` type alias 38 | 39 | [native async-in-traits]: https://blog.rust-lang.org/2023/12/21/async-fn-rpit-in-traits.html 40 | 41 | # 0.1.0 42 | 43 | Initial release. 44 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | 4 | members = [ 5 | "omniqueue", 6 | ] 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Svix 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | # Omniqueue 8 | 9 | Omniqueue is an abstraction layer over queue backends for Rust. It includes support for RabbitMQ, 10 | Redis streams, and SQS. 11 | 12 | Omniqueue provides a high level interface which allows sending and receiving raw byte arrays, any 13 | `serde` `Deserialize` and `Serialize` implementors via JSON encoded byte arrays, or any arbitrary 14 | types for which you have provided an encoding and/or decoding function. 15 | 16 | It is designed to be flexible and to be able to adapt to fit your existing queue configurations, but 17 | with a set of defaults that makes it simple to start sending and receiving quickly. 18 | 19 | ## How to use Omniqueue 20 | 21 | While the exact configuration will depend on the backend used, usage is roughly as follows. 22 | 23 | 1. Add `omniqueue` to your `Cargo.toml`. All backends are enabled by default including RabbitMQ, 24 | Redis (via their stream type), SQS, and an in-memory queue based off of `tokio`'s mpsc 25 | channel which is perfect for testing. 26 | 27 | If you only need some backends, then simply disable the default features, and enable any backends 28 | that you require. 29 | 30 | 2. Construct and use your queue. 31 | 32 | The exact configuration type used will depend on your backend, but it's as simple as: 33 | 34 | ```rust 35 | let cfg = SqsConfig { 36 | queue_dsn: "http://localhost:9324/queue/queue_name".to_owned(), 37 | override_endpoint: true, 38 | }; 39 | let (producer, mut consumer) = SqsBackend::builder(cfg).build_pair().await?; 40 | 41 | producer.send_serde_json(&ExampleType::default()).await?; 42 | 43 | let delivery = c.receive().await?; 44 | assert_eq!( 45 | delivery.payload_serde_json::().await?, 46 | Some(ExampleType::default()) 47 | ); 48 | 49 | delivery.ack().await?; 50 | ``` 51 | 52 | The producer and consumers returned implement the `QueueProducer` and `QueueConsumer` traits 53 | respectively. This means you can make functions generic over any queue backend. Alternatively, if 54 | you need dynamic dispatch, it's as simple as one extra line ih the builder: 55 | 56 | ```rust 57 | let cfg = SqsConfig { 58 | queue_dsn: "http://localhost:9324/queue/queue_name".to_owned(), 59 | override_endpoint: true, 60 | }; 61 | let (producer, mut consumer) = SqsBackend::builder(cfg) 62 | .make_dynamic() 63 | .build_pair() 64 | .await?; 65 | ``` 66 | 67 | ## Encoders and Decoders 68 | 69 | Part of the design of this crate was a clear separation of responsibility. The users of the queue 70 | generically should never have to concern themselves with how any given item is represented within 71 | the queue backend. Instead, they should be allowed to think only in Rust types. 72 | 73 | On the other hand, the users who define which backend to use should be the only ones concerned with 74 | getting the queue's internal representations to the Rust types. 75 | 76 | Enter `CustomEncoder`s and `CustomDecoder`s: these are a simple as closures or function pointers 77 | that convert from regular Rust types to the type expected by the queue backend's input or output. 78 | 79 | They are defined and used as follows: 80 | 81 | ```rust 82 | #[derive(Debug, PartialEq)] 83 | struct ExampleType { 84 | field: u8, 85 | } 86 | 87 | 88 | let (p, mut c) = RabbitMqBackend::builder(cfg) 89 | // RabbitMQ's internal representation is an arbitrary byte array. 90 | .with_encoder(|et: &ExampleType| -> omniqueue::Result> { 91 | Ok(vec![et.field]) 92 | }) 93 | .with_decoder(|v: &Vec| -> omniqueue::Result { 94 | Ok(ExampleType { 95 | field: *v.first().unwrap_or(&0), 96 | }) 97 | }) 98 | .build_pair() 99 | .await?; 100 | 101 | let payload = ExampleType { field: 2 }; 102 | 103 | p.send_custom(&payload).await?; 104 | 105 | let delivery = c.receive().await?; 106 | assert_eq!(d.payload_custom::()?, Some(payload)) 107 | 108 | delivery.ack().await?; 109 | ``` 110 | 111 | These functions are called automatically assuming you have an encoder and/or decoder for the right 112 | type. This makes adapting the crate to an existing queue whose internal data layout doesn't match 113 | the defaults to a T as simple as possible. 114 | -------------------------------------------------------------------------------- /_rabbit/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management, rabbitmq_delayed_message_exchange]. -------------------------------------------------------------------------------- /_rabbit/plugins/rabbitmq_delayed_message_exchange-3.11.1.ez: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/_rabbit/plugins/rabbitmq_delayed_message_exchange-3.11.1.ez -------------------------------------------------------------------------------- /assets/banner_dark.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/assets/banner_dark.png -------------------------------------------------------------------------------- /assets/banner_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/assets/banner_light.png -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [graph] 2 | targets = [ 3 | { triple = "x86_64-pc-windows-gnu" }, 4 | { triple = "x86_64-unknown-linux-musl" }, 5 | { triple = "x86_64-apple-darwin" }, 6 | { triple = "aarch64-apple-darwin" }, 7 | ] 8 | 9 | [advisories] 10 | db-path = "~/.cargo/advisory-db" 11 | db-urls = ["https://github.com/rustsec/advisory-db"] 12 | yanked = "deny" 13 | ignore = [ 14 | # TODO: Wait for dependencies to upgrade off of paste 15 | "RUSTSEC-2024-0436", 16 | ] 17 | 18 | [licenses] 19 | allow = [ 20 | "Apache-2.0", 21 | "BSD-2-Clause", 22 | "BSD-3-Clause", 23 | "CDLA-Permissive-2.0", 24 | "ISC", 25 | "MIT", 26 | "MPL-2.0", 27 | "Unicode-3.0", 28 | ] 29 | confidence-threshold = 0.8 30 | exceptions = [ 31 | #{ allow = ["Zlib"], name = "adler32", version = "*" }, 32 | ] 33 | 34 | [[licenses.clarify]] 35 | name = "ring" 36 | version = "*" 37 | expression = "MIT AND ISC AND OpenSSL" 38 | license-files = [ 39 | { path = "LICENSE", hash = 0xbd0eed23 } 40 | ] 41 | 42 | [[licenses.clarify]] 43 | name = "encoding_rs" 44 | version = "0.8.30" 45 | expression = "MIT OR Apache-2.0" 46 | license-files = [ 47 | { path = "COPYRIGHT", hash = 0x39f8ad31 } 48 | ] 49 | 50 | [licenses.private] 51 | ignore = false 52 | registries = [] 53 | 54 | [bans] 55 | multiple-versions = "warn" 56 | wildcards = "allow" 57 | highlight = "all" 58 | allow = [] 59 | deny = [] 60 | skip = [] 61 | skip-tree = [] 62 | 63 | [sources] 64 | unknown-registry = "warn" 65 | unknown-git = "warn" 66 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 67 | allow-git = [] 68 | 69 | -------------------------------------------------------------------------------- /omniqueue/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "omniqueue" 3 | version = "0.2.1" 4 | license = "MIT" 5 | description = "An abstraction layer over various queue backends" 6 | authors = ["Svix Inc. "] 7 | repository = "https://github.com/svix/omniqueue-rs/" 8 | readme = "../README.md" 9 | rust-version = "1.79" 10 | edition = "2021" 11 | 12 | [dependencies] 13 | aws-config = { version = "1.1.5", default-features = false, features = ["behavior-version-latest"], optional = true } 14 | aws-sdk-sqs = { version = "1.13.0", optional = true } 15 | azure_storage = { version = "0.21.0", optional = true } 16 | azure_storage_queues = { version = "0.21.0", optional = true } 17 | bb8 = { version = "0.9.0", optional = true } 18 | bb8-redis = { version = "0.23.0", optional = true } 19 | bytesize = "2.0.1" 20 | futures-util = { version = "0.3.28", default-features = false, features = ["async-await", "std"], optional = true } 21 | gcloud-googleapis = { version = "1.2.0", optional = true } 22 | gcloud-pubsub = { version = "1.3.0", optional = true } 23 | lapin = { version = "2", optional = true } 24 | redis = { version = "0.31.0", features = ["tokio-comp", "tokio-native-tls-comp", "streams"], optional = true } 25 | serde = "1.0.196" 26 | serde_json = "1" 27 | svix-ksuid = { version = "0.8.0", optional = true } 28 | sync_wrapper = "1.0.1" 29 | thiserror = "2.0" 30 | time = "0.3.34" 31 | tokio = { version = "1", features = ["rt", "sync", "time"] } 32 | tracing = "0.1" 33 | 34 | [dev-dependencies] 35 | anyhow = "1.0.79" 36 | fastrand = "2.0.1" 37 | rstest = "0.25.0" 38 | serde = { version = "1.0.196", features = ["derive"] } 39 | tokio = { version = "1", features = ["macros"] } 40 | tokio-executor-trait = "2.1" 41 | tokio-reactor-trait = "1.1.0" 42 | 43 | [features] 44 | default = ["in_memory", "gcp_pubsub", "rabbitmq", "redis", "redis_cluster", "sqs"] 45 | in_memory = [] 46 | gcp_pubsub = ["dep:futures-util", "dep:gcloud-googleapis", "dep:gcloud-pubsub"] 47 | rabbitmq = ["dep:futures-util", "dep:lapin"] 48 | # Generate message IDs for queue items. Likely not needed outside of Svix. 49 | rabbitmq-with-message-ids = ["rabbitmq", "dep:svix-ksuid"] 50 | redis = ["dep:bb8", "dep:bb8-redis", "dep:redis", "dep:svix-ksuid"] 51 | redis_cluster = ["redis", "redis/cluster-async"] 52 | redis_sentinel = ["redis", "redis/sentinel"] 53 | sqs = ["dep:aws-config", "dep:aws-sdk-sqs", "dep:futures-util"] 54 | azure_queue_storage = ["dep:azure_storage", "dep:azure_storage_queues"] 55 | beta = [] 56 | -------------------------------------------------------------------------------- /omniqueue/src/backends/azure_queue_storage.rs: -------------------------------------------------------------------------------- 1 | use std::{num::NonZeroUsize, time::Duration}; 2 | 3 | use azure_storage::StorageCredentials; 4 | use azure_storage_queues::{ 5 | operations::Message, PopReceipt, QueueClient, QueueServiceClientBuilder, 6 | }; 7 | use serde::Serialize; 8 | 9 | #[allow(deprecated)] 10 | use crate::{ 11 | builder::Static, queue::Acker, Delivery, QueueBackend, QueueBuilder, QueueError, Result, 12 | }; 13 | 14 | fn get_client(cfg: &AqsConfig) -> QueueClient { 15 | let AqsConfig { 16 | queue_name, 17 | storage_account, 18 | credentials, 19 | cloud_uri, 20 | .. 21 | } = cfg; 22 | let mut builder = QueueServiceClientBuilder::new(storage_account, credentials.clone()); 23 | if let Some(cloud_uri) = cloud_uri { 24 | builder = builder.cloud_location(azure_storage::CloudLocation::Custom { 25 | account: cfg.storage_account.clone(), 26 | uri: cloud_uri.clone(), 27 | }); 28 | } 29 | builder.build().queue_client(queue_name) 30 | } 31 | 32 | /// Note that blocking receives are not supported by Azure Queue Storage and 33 | /// that message order is not guaranteed. 34 | #[non_exhaustive] 35 | pub struct AqsBackend; 36 | 37 | impl AqsBackend { 38 | /// Creates a new Azure Queue Storage builder with the given 39 | /// configuration. 40 | pub fn builder(cfg: impl Into) -> QueueBuilder { 41 | #[allow(deprecated)] 42 | QueueBuilder::new(cfg.into()) 43 | } 44 | } 45 | 46 | const DEFAULT_RECV_TIMEOUT: Duration = Duration::from_secs(180); 47 | const DEFAULT_EMPTY_RECV_DELAY: Duration = Duration::from_millis(200); 48 | 49 | #[derive(Clone)] 50 | pub struct AqsConfig { 51 | pub queue_name: String, 52 | pub empty_receive_delay: Option, 53 | pub message_ttl: Duration, 54 | pub storage_account: String, 55 | pub credentials: StorageCredentials, 56 | pub cloud_uri: Option, 57 | pub receive_timeout: Option, 58 | } 59 | 60 | #[allow(deprecated)] 61 | impl QueueBackend for AqsBackend { 62 | type Config = AqsConfig; 63 | 64 | type PayloadIn = String; 65 | type PayloadOut = String; 66 | 67 | type Producer = AqsProducer; 68 | type Consumer = AqsConsumer; 69 | 70 | async fn new_pair(config: Self::Config) -> Result<(AqsProducer, AqsConsumer)> { 71 | let client = get_client(&config); 72 | Ok(( 73 | AqsProducer { 74 | client: client.clone(), 75 | config: config.clone(), 76 | }, 77 | AqsConsumer { 78 | client: client.clone(), 79 | config: config.clone(), 80 | }, 81 | )) 82 | } 83 | 84 | async fn producing_half(config: Self::Config) -> Result { 85 | let client = get_client(&config); 86 | Ok(AqsProducer { client, config }) 87 | } 88 | 89 | async fn consuming_half(config: Self::Config) -> Result { 90 | let client = get_client(&config); 91 | Ok(AqsConsumer { client, config }) 92 | } 93 | } 94 | 95 | pub struct AqsProducer { 96 | client: QueueClient, 97 | config: AqsConfig, 98 | } 99 | 100 | impl AqsProducer { 101 | pub async fn send_raw(&self, payload: &str) -> Result<()> { 102 | self.send_raw_scheduled(payload, Duration::ZERO).await 103 | } 104 | 105 | #[tracing::instrument( 106 | name = "send", 107 | skip_all, 108 | fields( 109 | payload_size = payload.len(), 110 | delay = (delay > Duration::ZERO).then(|| tracing::field::debug(delay)) 111 | ) 112 | )] 113 | pub async fn send_raw_scheduled(&self, payload: &str, delay: Duration) -> Result<()> { 114 | self.client 115 | .put_message(payload) 116 | .visibility_timeout(delay) 117 | .ttl(self.config.message_ttl) 118 | .await 119 | .map_err(QueueError::generic) 120 | .map(|_| ()) 121 | } 122 | 123 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 124 | let payload = serde_json::to_string(payload)?; 125 | self.send_raw(&payload).await 126 | } 127 | 128 | pub async fn send_serde_json_scheduled( 129 | &self, 130 | payload: &P, 131 | delay: Duration, 132 | ) -> Result<()> { 133 | let payload = serde_json::to_string(payload)?; 134 | self.send_raw_scheduled(&payload, delay).await 135 | } 136 | 137 | pub async fn redrive_dlq(&self) -> Result<()> { 138 | Err(QueueError::Unsupported( 139 | "redrive_dlq is not supported by AqsBackend", 140 | )) 141 | } 142 | } 143 | 144 | impl crate::QueueProducer for AqsProducer { 145 | type Payload = String; 146 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 147 | } 148 | impl crate::ScheduledQueueProducer for AqsProducer { 149 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled); 150 | } 151 | 152 | /// Note that blocking receives are not supported by Azure Queue Storage and 153 | /// that message order is not guaranteed. 154 | pub struct AqsConsumer { 155 | client: QueueClient, 156 | config: AqsConfig, 157 | } 158 | 159 | struct AqsAcker { 160 | client: QueueClient, 161 | already_acked_or_nacked: bool, 162 | pop_receipt: PopReceipt, 163 | } 164 | 165 | impl Acker for AqsAcker { 166 | async fn ack(&mut self) -> Result<()> { 167 | if self.already_acked_or_nacked { 168 | return Err(QueueError::CannotAckOrNackTwice); 169 | } 170 | 171 | self.client 172 | .pop_receipt_client(self.pop_receipt.clone()) 173 | .delete() 174 | .await 175 | .map_err(QueueError::generic)?; 176 | self.already_acked_or_nacked = true; 177 | Ok(()) 178 | } 179 | 180 | async fn nack(&mut self) -> Result<()> { 181 | Ok(()) 182 | } 183 | 184 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> { 185 | Err(QueueError::Unsupported( 186 | "set_ack_deadline is not yet supported by AqsBackend", 187 | )) 188 | } 189 | } 190 | 191 | impl AqsConsumer { 192 | fn wrap_message(&self, message: &Message) -> Delivery { 193 | Delivery::new( 194 | message.message_text.as_bytes().to_owned(), 195 | AqsAcker { 196 | client: self.client.clone(), 197 | pop_receipt: message.pop_receipt(), 198 | already_acked_or_nacked: false, 199 | }, 200 | ) 201 | } 202 | 203 | /// Note that blocking receives are not supported by Azure Queue Storage. 204 | /// Calls to this method will return immediately if no messages are 205 | /// available for delivery in the queue. 206 | pub async fn receive(&mut self) -> Result { 207 | self.client 208 | .get_messages() 209 | .visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT)) 210 | .await 211 | .map_err(QueueError::generic) 212 | .and_then(|m| m.messages.into_iter().next().ok_or(QueueError::NoData)) 213 | .map(|m| self.wrap_message(&m)) 214 | } 215 | 216 | pub async fn receive_all( 217 | &mut self, 218 | max_messages: usize, 219 | deadline: Duration, 220 | ) -> Result> { 221 | let end = std::time::Instant::now() + deadline; 222 | let mut interval = tokio::time::interval( 223 | self.config 224 | .empty_receive_delay 225 | .unwrap_or(DEFAULT_EMPTY_RECV_DELAY), 226 | ); 227 | loop { 228 | interval.tick().await; 229 | let msgs = self 230 | .client 231 | .get_messages() 232 | .number_of_messages(max_messages.try_into().unwrap_or(u8::MAX)) 233 | .visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT)) 234 | .await 235 | .map_err(QueueError::generic) 236 | .map(|m| { 237 | m.messages 238 | .iter() 239 | .map(|m| self.wrap_message(m)) 240 | .collect::>() 241 | })?; 242 | if !msgs.is_empty() { 243 | return Ok(msgs); 244 | } 245 | if std::time::Instant::now() > end { 246 | return Ok(vec![]); 247 | } 248 | } 249 | } 250 | } 251 | 252 | impl crate::QueueConsumer for AqsConsumer { 253 | type Payload = String; 254 | omni_delegate!(receive, receive_all); 255 | 256 | fn max_messages(&self) -> Option { 257 | // https://learn.microsoft.com/en-us/rest/api/storageservices/get-messages#uri-parameters 258 | NonZeroUsize::new(32) 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /omniqueue/src/backends/gcp_pubsub.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | path::{Path, PathBuf}, 3 | sync::Arc, 4 | time::Duration, 5 | }; 6 | 7 | use futures_util::{future::try_join_all, StreamExt}; 8 | use gcloud_googleapis::pubsub::v1::PubsubMessage; 9 | use gcloud_pubsub::{ 10 | client::{google_cloud_auth::credentials::CredentialsFile, Client, ClientConfig}, 11 | publisher::Publisher, 12 | subscriber::ReceivedMessage, 13 | subscription::Subscription, 14 | }; 15 | use serde::Serialize; 16 | 17 | #[allow(deprecated)] 18 | use crate::{ 19 | builder::{QueueBuilder, Static}, 20 | queue::{Acker, Delivery, QueueBackend}, 21 | QueueError, Result, 22 | }; 23 | 24 | pub struct GcpPubSubBackend; 25 | 26 | impl GcpPubSubBackend { 27 | /// Creates a new Google Cloud Pub/Sub queue builder with the given 28 | /// configuration. 29 | pub fn builder(config: GcpPubSubConfig) -> QueueBuilder { 30 | #[allow(deprecated)] 31 | QueueBuilder::new(config) 32 | } 33 | } 34 | 35 | type Payload = Vec; 36 | 37 | // FIXME: topic/subscription are each for read/write. Split config up? 38 | #[derive(Clone, Debug, Eq, PartialEq)] 39 | pub struct GcpPubSubConfig { 40 | pub topic_id: String, 41 | pub subscription_id: String, 42 | pub credentials_file: Option, 43 | } 44 | 45 | /// Make a `ClientConfig` from a `CredentialsFile` on disk. 46 | async fn configure_client_from_file>(cred_file_path: P) -> Result { 47 | let bytes = std::fs::read(cred_file_path).map_err(QueueError::generic)?; 48 | let creds: CredentialsFile = serde_json::from_slice(&bytes).map_err(QueueError::generic)?; 49 | ClientConfig::default() 50 | .with_credentials(creds) 51 | .await 52 | .map_err(QueueError::generic) 53 | } 54 | 55 | /// Making a `ClientConfig` via env vars is possible in two ways: 56 | /// - setting `GOOGLE_APPLICATION_CREDENTIALS` to the file path to have it 57 | /// loaded automatically 58 | /// - setting `GOOGLE_APPLICATION_CREDENTIALS_JSON` to the file contents 59 | /// (avoiding the need for a file on disk). 60 | async fn configure_client_from_env() -> Result { 61 | ClientConfig::default() 62 | .with_auth() 63 | .await 64 | .map_err(QueueError::generic) 65 | } 66 | 67 | async fn get_client(cfg: &GcpPubSubConfig) -> Result { 68 | let config = { 69 | if let Some(fp) = &cfg.credentials_file { 70 | tracing::trace!("reading gcp creds from file: {}", fp.display()); 71 | configure_client_from_file(&fp).await? 72 | } else { 73 | tracing::trace!("reading gcp creds from env"); 74 | configure_client_from_env().await? 75 | } 76 | }; 77 | Client::new(config).await.map_err(QueueError::generic) 78 | } 79 | 80 | #[allow(deprecated)] 81 | impl QueueBackend for GcpPubSubBackend { 82 | type Config = GcpPubSubConfig; 83 | 84 | type PayloadIn = Payload; 85 | type PayloadOut = Payload; 86 | 87 | type Producer = GcpPubSubProducer; 88 | type Consumer = GcpPubSubConsumer; 89 | 90 | async fn new_pair(config: Self::Config) -> Result<(GcpPubSubProducer, GcpPubSubConsumer)> { 91 | let client = get_client(&config).await?; 92 | Ok(( 93 | GcpPubSubProducer::new(client.clone(), config.topic_id).await?, 94 | GcpPubSubConsumer::new(client, config.subscription_id).await?, 95 | )) 96 | } 97 | 98 | async fn producing_half(config: Self::Config) -> Result { 99 | let client = get_client(&config).await?; 100 | GcpPubSubProducer::new(client, config.topic_id).await 101 | } 102 | 103 | async fn consuming_half(config: Self::Config) -> Result { 104 | let client = get_client(&config).await?; 105 | GcpPubSubConsumer::new(client, config.subscription_id).await 106 | } 107 | } 108 | 109 | pub struct GcpPubSubProducer { 110 | client: Client, 111 | topic_id: Arc, 112 | } 113 | 114 | impl GcpPubSubProducer { 115 | async fn new(client: Client, topic_id: String) -> Result { 116 | let topic = client.topic(&topic_id); 117 | // Only warn if the topic doesn't exist at this point. 118 | // If it gets created after the fact, we should be able to still use it 119 | // when available, otherwise if it's still missing at that time, error. 120 | if !topic.exists(None).await.map_err(QueueError::generic)? { 121 | tracing::warn!("topic {} does not exist", &topic_id); 122 | } 123 | Ok(Self { 124 | client, 125 | topic_id: Arc::new(topic_id), 126 | }) 127 | } 128 | 129 | async fn publisher(&self) -> Result { 130 | // N.b. defer the creation of a publisher/topic until needed. Helps recover when 131 | // the topic does not yet exist, but will soon. 132 | // Might be more expensive to recreate each time, but overall more reliable. 133 | let topic = self.client.topic(&self.topic_id); 134 | 135 | // Publishing to a non-existent topic will cause the publisher to wait 136 | // (forever?) Giving this error will allow dependents to handle the 137 | // error case immediately when this happens, instead of holding the 138 | // connection open indefinitely. 139 | if !topic.exists(None).await.map_err(QueueError::generic)? { 140 | return Err(QueueError::Generic( 141 | format!("topic {} does not exist", &self.topic_id).into(), 142 | )); 143 | } 144 | 145 | // FIXME: may need to expose `PublisherConfig` to caller so they can tweak this 146 | Ok(topic.new_publisher(None)) 147 | } 148 | 149 | #[tracing::instrument( 150 | name = "send", 151 | skip_all, 152 | fields(payload_size = payload.len()) 153 | )] 154 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> { 155 | let msg = PubsubMessage { 156 | data: payload.to_vec(), 157 | ..Default::default() 158 | }; 159 | 160 | let publisher = self.publisher().await?; 161 | let awaiter = publisher.publish(msg).await; 162 | awaiter.get().await.map_err(QueueError::generic)?; 163 | Ok(()) 164 | } 165 | 166 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 167 | self.send_raw(&serde_json::to_vec(&payload)?).await 168 | } 169 | 170 | pub async fn redrive_dlq(&self) -> Result<()> { 171 | Err(QueueError::Unsupported( 172 | "redrive_dlq is not supported by GcpPubSubBackend", 173 | )) 174 | } 175 | } 176 | 177 | impl std::fmt::Debug for GcpPubSubProducer { 178 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 179 | f.debug_struct("GcpPubSubProducer") 180 | .field("topic_id", &self.topic_id) 181 | .finish() 182 | } 183 | } 184 | 185 | impl crate::QueueProducer for GcpPubSubProducer { 186 | type Payload = Payload; 187 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 188 | 189 | /// This method is overwritten for the Google Cloud Pub/Sub backend to be 190 | /// more efficient than the default of sequentially publishing `payloads`. 191 | #[tracing::instrument(name = "send_batch", skip_all)] 192 | async fn send_raw_batch( 193 | &self, 194 | payloads: impl IntoIterator + Send, IntoIter: Send> + Send, 195 | ) -> Result<()> { 196 | let msgs = payloads 197 | .into_iter() 198 | .map(|payload| PubsubMessage { 199 | data: payload.as_ref().to_vec(), 200 | ..Default::default() 201 | }) 202 | .collect(); 203 | 204 | let publisher = self.publisher().await?; 205 | let awaiters = publisher.publish_bulk(msgs).await; 206 | try_join_all(awaiters.into_iter().map(|a| a.get())) 207 | .await 208 | .map_err(QueueError::generic)?; 209 | Ok(()) 210 | } 211 | 212 | /// This method is overwritten for the Google Cloud Pub/Sub backend to be 213 | /// more efficient than the default of sequentially publishing `payloads`. 214 | #[tracing::instrument(name = "send_batch", skip_all)] 215 | async fn send_serde_json_batch( 216 | &self, 217 | payloads: impl IntoIterator + Send, 218 | ) -> Result<()> { 219 | let msgs = payloads 220 | .into_iter() 221 | .map(|payload| { 222 | Ok(PubsubMessage { 223 | data: serde_json::to_vec(&payload)?, 224 | ..Default::default() 225 | }) 226 | }) 227 | .collect::>()?; 228 | 229 | let publisher = self.publisher().await?; 230 | let awaiters = publisher.publish_bulk(msgs).await; 231 | try_join_all(awaiters.into_iter().map(|a| a.get())) 232 | .await 233 | .map_err(QueueError::generic)?; 234 | Ok(()) 235 | } 236 | } 237 | 238 | pub struct GcpPubSubConsumer { 239 | client: Client, 240 | subscription_id: Arc, 241 | } 242 | 243 | impl GcpPubSubConsumer { 244 | async fn new(client: Client, subscription_id: String) -> Result { 245 | Ok(Self { 246 | client, 247 | subscription_id: Arc::new(subscription_id), 248 | }) 249 | } 250 | 251 | pub async fn receive(&mut self) -> Result { 252 | let subscription = subscription(&self.client, &self.subscription_id).await?; 253 | let mut stream = subscription 254 | .subscribe(None) 255 | .await 256 | .map_err(QueueError::generic)?; 257 | 258 | let recv_msg = stream.next().await.ok_or_else(|| QueueError::NoData)?; 259 | 260 | Ok(self.wrap_recv_msg(recv_msg)) 261 | } 262 | 263 | pub async fn receive_all( 264 | &mut self, 265 | max_messages: usize, 266 | deadline: Duration, 267 | ) -> Result> { 268 | let subscription = subscription(&self.client, &self.subscription_id).await?; 269 | match tokio::time::timeout(deadline, subscription.pull(max_messages as _, None)).await { 270 | Ok(messages) => Ok(messages 271 | .map_err(QueueError::generic)? 272 | .into_iter() 273 | .map(|m| self.wrap_recv_msg(m)) 274 | .collect()), 275 | // Timeout 276 | Err(_) => Ok(vec![]), 277 | } 278 | } 279 | 280 | fn wrap_recv_msg(&self, mut recv_msg: ReceivedMessage) -> Delivery { 281 | // FIXME: would be nice to avoid having to move the data out here. 282 | // While it's possible to ack via a subscription and an ack_id, nack 283 | // is only possible via a `ReceiveMessage`. This means we either need 284 | // to hold 2 copies of the payload, or move the bytes out so they can be 285 | // returned _outside of the Acker_. 286 | let payload = recv_msg.message.data.drain(..).collect(); 287 | 288 | Delivery::new( 289 | payload, 290 | GcpPubSubAcker { 291 | recv_msg, 292 | subscription_id: self.subscription_id.clone(), 293 | }, 294 | ) 295 | } 296 | } 297 | 298 | impl std::fmt::Debug for GcpPubSubConsumer { 299 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 300 | f.debug_struct("GcpPubSubConsumer") 301 | .field("subscription_id", &self.subscription_id) 302 | .finish() 303 | } 304 | } 305 | 306 | async fn subscription(client: &Client, subscription_id: &str) -> Result { 307 | let subscription = client.subscription(subscription_id); 308 | if !subscription 309 | .exists(None) 310 | .await 311 | .map_err(QueueError::generic)? 312 | { 313 | return Err(QueueError::Generic( 314 | format!("subscription {} does not exist", &subscription_id).into(), 315 | )); 316 | } 317 | Ok(subscription) 318 | } 319 | 320 | impl crate::QueueConsumer for GcpPubSubConsumer { 321 | type Payload = Payload; 322 | omni_delegate!(receive, receive_all); 323 | } 324 | 325 | struct GcpPubSubAcker { 326 | recv_msg: ReceivedMessage, 327 | subscription_id: Arc, 328 | } 329 | 330 | impl std::fmt::Debug for GcpPubSubAcker { 331 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 332 | f.debug_struct("GcpPubSubAcker") 333 | .field("ack_id", &self.recv_msg.ack_id()) 334 | .field("message_id", &self.recv_msg.message.message_id) 335 | .field("subscription_id", &self.subscription_id) 336 | .finish() 337 | } 338 | } 339 | 340 | impl Acker for GcpPubSubAcker { 341 | async fn ack(&mut self) -> Result<()> { 342 | self.recv_msg.ack().await.map_err(QueueError::generic) 343 | } 344 | 345 | async fn nack(&mut self) -> Result<()> { 346 | self.recv_msg.nack().await.map_err(QueueError::generic) 347 | } 348 | 349 | async fn set_ack_deadline(&mut self, duration: Duration) -> Result<()> { 350 | let duration_secs = duration.as_secs().try_into().map_err(|e| { 351 | QueueError::Generic(Box::::from(format!( 352 | "set_ack_deadline duration {duration:?} is too large: {e:?}" 353 | ))) 354 | })?; 355 | 356 | self.recv_msg 357 | .modify_ack_deadline(duration_secs) 358 | .await 359 | .map_err(QueueError::generic) 360 | } 361 | } 362 | -------------------------------------------------------------------------------- /omniqueue/src/backends/in_memory.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use serde::Serialize; 4 | use tokio::sync::mpsc; 5 | 6 | #[allow(deprecated)] 7 | use crate::{ 8 | builder::{QueueBuilder, Static}, 9 | queue::{Acker, Delivery, QueueBackend}, 10 | QueueError, Result, 11 | }; 12 | 13 | pub struct InMemoryBackend; 14 | 15 | impl InMemoryBackend { 16 | /// Creates a new in-memory queue builder. 17 | pub fn builder() -> QueueBuilder { 18 | #[allow(deprecated)] 19 | QueueBuilder::new(()) 20 | } 21 | } 22 | 23 | #[allow(deprecated)] 24 | impl QueueBackend for InMemoryBackend { 25 | type PayloadIn = Vec; 26 | type PayloadOut = Vec; 27 | 28 | type Producer = InMemoryProducer; 29 | type Consumer = InMemoryConsumer; 30 | 31 | type Config = (); 32 | 33 | async fn new_pair(_config: ()) -> Result<(InMemoryProducer, InMemoryConsumer)> { 34 | let (tx, rx) = mpsc::unbounded_channel(); 35 | 36 | Ok(( 37 | InMemoryProducer { tx: tx.clone() }, 38 | InMemoryConsumer { tx, rx }, 39 | )) 40 | } 41 | 42 | async fn producing_half(_config: ()) -> Result { 43 | Err(QueueError::CannotCreateHalf) 44 | } 45 | 46 | async fn consuming_half(_config: ()) -> Result { 47 | Err(QueueError::CannotCreateHalf) 48 | } 49 | } 50 | 51 | pub struct InMemoryProducer { 52 | tx: mpsc::UnboundedSender>, 53 | } 54 | 55 | impl InMemoryProducer { 56 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> { 57 | self.tx.send(payload.to_vec()).map_err(QueueError::generic) 58 | } 59 | 60 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 61 | let payload = serde_json::to_vec(payload)?; 62 | self.send_raw(&payload).await 63 | } 64 | 65 | pub async fn send_raw_scheduled(&self, payload: &[u8], delay: Duration) -> Result<()> { 66 | let tx = self.tx.clone(); 67 | let payload = payload.to_vec(); 68 | tokio::spawn(async move { 69 | tracing::trace!("MemoryQueue: event sent > (delay: {:?})", delay); 70 | tokio::time::sleep(delay).await; 71 | if tx.send(payload).is_err() { 72 | tracing::error!("Receiver dropped"); 73 | } 74 | }); 75 | Ok(()) 76 | } 77 | 78 | pub async fn send_serde_json_scheduled( 79 | &self, 80 | payload: &P, 81 | delay: Duration, 82 | ) -> Result<()> { 83 | let payload = serde_json::to_vec(payload)?; 84 | self.send_raw_scheduled(&payload, delay).await 85 | } 86 | 87 | pub async fn redrive_dlq(&self) -> Result<()> { 88 | Err(QueueError::Unsupported( 89 | "redrive_dlq is not supported by InMemoryBackend", 90 | )) 91 | } 92 | } 93 | 94 | impl crate::QueueProducer for InMemoryProducer { 95 | type Payload = Vec; 96 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 97 | } 98 | impl crate::ScheduledQueueProducer for InMemoryProducer { 99 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled); 100 | } 101 | 102 | pub struct InMemoryConsumer { 103 | rx: mpsc::UnboundedReceiver>, 104 | tx: mpsc::UnboundedSender>, 105 | } 106 | 107 | impl InMemoryConsumer { 108 | fn wrap_payload(&self, payload: Vec) -> Delivery { 109 | Delivery::new( 110 | payload.clone(), 111 | InMemoryAcker { 112 | tx: self.tx.clone(), 113 | payload_copy: Some(payload), 114 | already_acked_or_nacked: false, 115 | }, 116 | ) 117 | } 118 | 119 | pub async fn receive(&mut self) -> Result { 120 | let payload = self 121 | .rx 122 | .recv() 123 | .await 124 | .ok_or_else(|| QueueError::Generic("recv failed".into()))?; 125 | Ok(self.wrap_payload(payload)) 126 | } 127 | 128 | pub async fn receive_all( 129 | &mut self, 130 | max_messages: usize, 131 | deadline: Duration, 132 | ) -> Result> { 133 | let mut out = Vec::with_capacity(max_messages); 134 | let start = Instant::now(); 135 | match tokio::time::timeout(deadline, self.rx.recv()).await { 136 | Ok(Some(x)) => out.push(self.wrap_payload(x)), 137 | // Timeouts and stream termination 138 | Err(_) | Ok(None) => return Ok(out), 139 | } 140 | 141 | if max_messages > 1 { 142 | // `try_recv` will break the loop if no ready items are already 143 | // buffered in the channel. This should allow us to 144 | // opportunistically fill up the buffer in the remaining time. 145 | while let Ok(x) = self.rx.try_recv() { 146 | out.push(self.wrap_payload(x)); 147 | if out.len() >= max_messages || start.elapsed() >= deadline { 148 | break; 149 | } 150 | } 151 | } 152 | Ok(out) 153 | } 154 | } 155 | 156 | impl crate::QueueConsumer for InMemoryConsumer { 157 | type Payload = Vec; 158 | omni_delegate!(receive, receive_all); 159 | } 160 | 161 | struct InMemoryAcker { 162 | tx: mpsc::UnboundedSender>, 163 | payload_copy: Option>, 164 | already_acked_or_nacked: bool, 165 | } 166 | 167 | impl Acker for InMemoryAcker { 168 | async fn ack(&mut self) -> Result<()> { 169 | if self.already_acked_or_nacked { 170 | Err(QueueError::CannotAckOrNackTwice) 171 | } else { 172 | self.already_acked_or_nacked = true; 173 | Ok(()) 174 | } 175 | } 176 | 177 | async fn nack(&mut self) -> Result<()> { 178 | if self.already_acked_or_nacked { 179 | Err(QueueError::CannotAckOrNackTwice) 180 | } else { 181 | self.already_acked_or_nacked = true; 182 | self.tx 183 | .send( 184 | self.payload_copy 185 | .take() 186 | .ok_or(QueueError::CannotAckOrNackTwice)?, 187 | ) 188 | .map_err(QueueError::generic) 189 | } 190 | } 191 | 192 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> { 193 | Err(QueueError::Unsupported( 194 | "set_ack_deadline is not yet supported by InMemoryBackend", 195 | )) 196 | } 197 | } 198 | 199 | #[cfg(test)] 200 | mod tests { 201 | use std::time::{Duration, Instant}; 202 | 203 | use serde::{Deserialize, Serialize}; 204 | 205 | use super::InMemoryBackend; 206 | use crate::QueueProducer; 207 | 208 | #[derive(Clone, Copy, Debug, Eq, Deserialize, PartialEq, Serialize)] 209 | struct TypeA { 210 | a: i32, 211 | } 212 | 213 | #[tokio::test] 214 | async fn simple_queue_test() { 215 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 216 | 217 | p.send_serde_json(&TypeA { a: 13 }).await.unwrap(); 218 | assert_eq!( 219 | c.receive() 220 | .await 221 | .unwrap() 222 | .payload_serde_json::() 223 | .unwrap() 224 | .unwrap(), 225 | TypeA { a: 13 }, 226 | ); 227 | 228 | p.send_bytes(&serde_json::to_vec(&TypeA { a: 14 }).unwrap()) 229 | .await 230 | .unwrap(); 231 | assert_eq!( 232 | serde_json::from_slice::(c.receive().await.unwrap().borrow_payload().unwrap()) 233 | .unwrap(), 234 | TypeA { a: 14 }, 235 | ); 236 | } 237 | 238 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 239 | struct ExType { 240 | a: u8, 241 | } 242 | 243 | /// Consumer will return immediately if there are fewer than max messages to 244 | /// start with. 245 | #[tokio::test] 246 | async fn test_send_recv_all_partial() { 247 | let payload = ExType { a: 2 }; 248 | 249 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 250 | 251 | p.send_serde_json(&payload).await.unwrap(); 252 | let deadline = Duration::from_secs(1); 253 | 254 | let now = Instant::now(); 255 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 256 | assert_eq!(xs.len(), 1); 257 | let d = xs.remove(0); 258 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 259 | d.ack().await.unwrap(); 260 | assert!(now.elapsed() <= deadline); 261 | } 262 | 263 | /// Consumer should yield items immediately if there's a full batch ready on 264 | /// the first poll. 265 | #[tokio::test] 266 | async fn test_send_recv_all_full() { 267 | let payload1 = ExType { a: 1 }; 268 | let payload2 = ExType { a: 2 }; 269 | 270 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 271 | 272 | p.send_serde_json(&payload1).await.unwrap(); 273 | p.send_serde_json(&payload2).await.unwrap(); 274 | let deadline = Duration::from_secs(1); 275 | 276 | let now = Instant::now(); 277 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 278 | assert_eq!(xs.len(), 2); 279 | let d1 = xs.remove(0); 280 | assert_eq!( 281 | d1.payload_serde_json::().unwrap().unwrap(), 282 | payload1 283 | ); 284 | d1.ack().await.unwrap(); 285 | 286 | let d2 = xs.remove(0); 287 | assert_eq!( 288 | d2.payload_serde_json::().unwrap().unwrap(), 289 | payload2 290 | ); 291 | d2.ack().await.unwrap(); 292 | // N.b. it's still possible this could turn up false if the test runs 293 | // too slow. 294 | assert!(now.elapsed() < deadline); 295 | } 296 | 297 | /// Consumer will return the full batch immediately, but also return 298 | /// immediately if a partial batch is ready. 299 | #[tokio::test] 300 | async fn test_send_recv_all_full_then_partial() { 301 | let payload1 = ExType { a: 1 }; 302 | let payload2 = ExType { a: 2 }; 303 | let payload3 = ExType { a: 3 }; 304 | 305 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 306 | 307 | p.send_serde_json(&payload1).await.unwrap(); 308 | p.send_serde_json(&payload2).await.unwrap(); 309 | p.send_serde_json(&payload3).await.unwrap(); 310 | 311 | let deadline = Duration::from_secs(1); 312 | let now1 = Instant::now(); 313 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 314 | assert_eq!(xs.len(), 2); 315 | let d1 = xs.remove(0); 316 | assert_eq!( 317 | d1.payload_serde_json::().unwrap().unwrap(), 318 | payload1 319 | ); 320 | d1.ack().await.unwrap(); 321 | 322 | let d2 = xs.remove(0); 323 | assert_eq!( 324 | d2.payload_serde_json::().unwrap().unwrap(), 325 | payload2 326 | ); 327 | d2.ack().await.unwrap(); 328 | assert!(now1.elapsed() < deadline); 329 | 330 | // 2nd call 331 | let now2 = Instant::now(); 332 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 333 | assert_eq!(ys.len(), 1); 334 | let d3 = ys.remove(0); 335 | assert_eq!( 336 | d3.payload_serde_json::().unwrap().unwrap(), 337 | payload3 338 | ); 339 | d3.ack().await.unwrap(); 340 | assert!(now2.elapsed() <= deadline); 341 | } 342 | 343 | /// Consumer will NOT wait indefinitely for at least one item. 344 | #[tokio::test] 345 | async fn test_send_recv_all_late_arriving_items() { 346 | let (_p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 347 | 348 | let deadline = Duration::from_secs(1); 349 | let now = Instant::now(); 350 | let xs = c.receive_all(2, deadline).await.unwrap(); 351 | let elapsed = now.elapsed(); 352 | 353 | assert_eq!(xs.len(), 0); 354 | // Elapsed should be around the deadline, ballpark 355 | assert!(elapsed >= deadline); 356 | assert!(elapsed <= deadline + Duration::from_millis(200)); 357 | } 358 | 359 | #[tokio::test] 360 | async fn test_scheduled() { 361 | let payload1 = ExType { a: 1 }; 362 | 363 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap(); 364 | 365 | let delay = Duration::from_millis(100); 366 | let now = Instant::now(); 367 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap(); 368 | let delivery = c 369 | .receive_all(1, delay * 2) 370 | .await 371 | .unwrap() 372 | .into_iter() 373 | .next() 374 | .unwrap(); 375 | assert!(now.elapsed() >= delay); 376 | assert!(now.elapsed() < delay * 2); 377 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap()); 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /omniqueue/src/backends/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "azure_queue_storage")] 2 | pub mod azure_queue_storage; 3 | #[cfg(feature = "gcp_pubsub")] 4 | pub mod gcp_pubsub; 5 | #[cfg(feature = "in_memory")] 6 | pub mod in_memory; 7 | #[cfg(feature = "rabbitmq")] 8 | pub mod rabbitmq; 9 | #[cfg(feature = "redis")] 10 | pub mod redis; 11 | #[cfg(feature = "sqs")] 12 | pub mod sqs; 13 | 14 | #[cfg(feature = "azure_queue_storage")] 15 | pub use azure_queue_storage::{AqsBackend, AqsConfig, AqsConsumer, AqsProducer}; 16 | #[cfg(feature = "gcp_pubsub")] 17 | pub use gcp_pubsub::{GcpPubSubBackend, GcpPubSubConfig, GcpPubSubConsumer, GcpPubSubProducer}; 18 | #[cfg(feature = "in_memory")] 19 | pub use in_memory::{InMemoryBackend, InMemoryConsumer, InMemoryProducer}; 20 | #[cfg(feature = "rabbitmq")] 21 | pub use rabbitmq::{RabbitMqBackend, RabbitMqConfig, RabbitMqConsumer, RabbitMqProducer}; 22 | #[cfg(feature = "redis")] 23 | pub use redis::{RedisBackend, RedisBackendBuilder, RedisConfig, RedisConsumer, RedisProducer}; 24 | #[cfg(feature = "redis_cluster")] 25 | pub use redis::{RedisClusterBackend, RedisClusterBackendBuilder}; 26 | #[cfg(feature = "sqs")] 27 | pub use sqs::{SqsBackend, SqsConfig, SqsConsumer, SqsProducer}; 28 | -------------------------------------------------------------------------------- /omniqueue/src/backends/rabbitmq.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use futures_util::{FutureExt, StreamExt}; 4 | use lapin::types::AMQPValue; 5 | pub use lapin::{ 6 | acker::Acker as LapinAcker, 7 | options::{ 8 | BasicAckOptions, BasicConsumeOptions, BasicNackOptions, BasicPublishOptions, 9 | BasicQosOptions, 10 | }, 11 | types::FieldTable, 12 | BasicProperties, Channel, Connection, ConnectionProperties, Consumer, 13 | }; 14 | use serde::Serialize; 15 | 16 | #[allow(deprecated)] 17 | use crate::{ 18 | builder::{QueueBuilder, Static}, 19 | queue::{Acker, Delivery, QueueBackend}, 20 | QueueError, Result, 21 | }; 22 | 23 | #[derive(Clone)] 24 | pub struct RabbitMqConfig { 25 | pub uri: String, 26 | pub connection_properties: ConnectionProperties, 27 | 28 | pub publish_exchange: String, 29 | pub publish_routing_key: String, 30 | pub publish_options: BasicPublishOptions, 31 | pub publish_properties: BasicProperties, 32 | 33 | pub consume_queue: String, 34 | pub consumer_tag: String, 35 | pub consume_options: BasicConsumeOptions, 36 | pub consume_arguments: FieldTable, 37 | 38 | pub consume_prefetch_count: Option, 39 | pub requeue_on_nack: bool, 40 | } 41 | 42 | pub struct RabbitMqBackend; 43 | 44 | impl RabbitMqBackend { 45 | /// Creates a new RabbitMQ queue builder with the given configuration. 46 | pub fn builder(config: RabbitMqConfig) -> QueueBuilder { 47 | #[allow(deprecated)] 48 | QueueBuilder::new(config) 49 | } 50 | } 51 | 52 | async fn consumer(conn: &Connection, cfg: RabbitMqConfig) -> Result { 53 | let channel_rx = conn.create_channel().await.map_err(QueueError::generic)?; 54 | 55 | if let Some(n) = cfg.consume_prefetch_count { 56 | channel_rx 57 | .basic_qos(n, BasicQosOptions::default()) 58 | .await 59 | .map_err(QueueError::generic)?; 60 | } 61 | 62 | Ok(RabbitMqConsumer { 63 | consumer: channel_rx 64 | .basic_consume( 65 | &cfg.consume_queue, 66 | &cfg.consumer_tag, 67 | cfg.consume_options, 68 | cfg.consume_arguments.clone(), 69 | ) 70 | .await 71 | .map_err(QueueError::generic)?, 72 | requeue_on_nack: cfg.requeue_on_nack, 73 | }) 74 | } 75 | 76 | async fn producer(conn: &Connection, cfg: RabbitMqConfig) -> Result { 77 | let channel_tx = conn.create_channel().await.map_err(QueueError::generic)?; 78 | Ok(RabbitMqProducer { 79 | channel: channel_tx, 80 | exchange: cfg.publish_exchange.clone(), 81 | routing_key: cfg.publish_routing_key.clone(), 82 | options: cfg.publish_options, 83 | properties: cfg.publish_properties.clone(), 84 | }) 85 | } 86 | 87 | #[allow(deprecated)] 88 | impl QueueBackend for RabbitMqBackend { 89 | type PayloadIn = Vec; 90 | type PayloadOut = Vec; 91 | 92 | type Producer = RabbitMqProducer; 93 | type Consumer = RabbitMqConsumer; 94 | 95 | type Config = RabbitMqConfig; 96 | 97 | async fn new_pair(cfg: RabbitMqConfig) -> Result<(RabbitMqProducer, RabbitMqConsumer)> { 98 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone()) 99 | .await 100 | .map_err(QueueError::generic)?; 101 | 102 | Ok(( 103 | producer(&conn, cfg.clone()).await?, 104 | consumer(&conn, cfg.clone()).await?, 105 | )) 106 | } 107 | 108 | async fn producing_half(cfg: RabbitMqConfig) -> Result { 109 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone()) 110 | .await 111 | .map_err(QueueError::generic)?; 112 | 113 | producer(&conn, cfg.clone()).await 114 | } 115 | 116 | async fn consuming_half(cfg: RabbitMqConfig) -> Result { 117 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone()) 118 | .await 119 | .map_err(QueueError::generic)?; 120 | 121 | consumer(&conn, cfg.clone()).await 122 | } 123 | } 124 | 125 | pub struct RabbitMqProducer { 126 | channel: Channel, 127 | exchange: String, 128 | routing_key: String, 129 | options: BasicPublishOptions, 130 | properties: BasicProperties, 131 | } 132 | 133 | impl RabbitMqProducer { 134 | async fn send_raw_with_headers( 135 | &self, 136 | payload: &[u8], 137 | headers: Option, 138 | ) -> Result<()> { 139 | let mut properties = self.properties.clone(); 140 | #[cfg(feature = "rabbitmq-with-message-ids")] 141 | { 142 | use svix_ksuid::{KsuidLike as _, KsuidMs}; 143 | use time::OffsetDateTime; 144 | 145 | let id = &KsuidMs::new(Some(OffsetDateTime::now_utc()), None); 146 | properties = properties.with_message_id(id.to_string().into()); 147 | } 148 | if let Some(headers) = headers { 149 | properties = properties.with_headers(headers); 150 | } 151 | 152 | self.channel 153 | .basic_publish( 154 | &self.exchange, 155 | &self.routing_key, 156 | self.options, 157 | payload, 158 | properties, 159 | ) 160 | .await 161 | .map_err(QueueError::generic)?; 162 | 163 | Ok(()) 164 | } 165 | 166 | #[tracing::instrument( 167 | name = "send", 168 | skip_all, 169 | fields(payload_size = payload.len()) 170 | )] 171 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> { 172 | self.send_raw_with_headers(payload, None).await 173 | } 174 | 175 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 176 | let payload = serde_json::to_vec(payload)?; 177 | self.send_raw(&payload).await 178 | } 179 | 180 | #[tracing::instrument( 181 | name = "send", 182 | skip_all, 183 | fields(payload_size = payload.len(), delay) 184 | )] 185 | pub async fn send_raw_scheduled(&self, payload: &[u8], delay: Duration) -> Result<()> { 186 | let mut headers = FieldTable::default(); 187 | 188 | let delay_ms: u32 = delay 189 | .as_millis() 190 | .try_into() 191 | .map_err(|_| QueueError::Generic("delay is too large".into()))?; 192 | headers.insert("x-delay".into(), AMQPValue::LongUInt(delay_ms)); 193 | 194 | self.send_raw_with_headers(payload, Some(headers)).await 195 | } 196 | 197 | pub async fn send_serde_json_scheduled( 198 | &self, 199 | payload: &P, 200 | delay: Duration, 201 | ) -> Result<()> { 202 | let payload = serde_json::to_vec(payload)?; 203 | self.send_raw_scheduled(&payload, delay).await 204 | } 205 | 206 | pub async fn redrive_dlq(&self) -> Result<()> { 207 | Err(QueueError::Unsupported( 208 | "redrive_dlq is not supported by RabbitMqBackend", 209 | )) 210 | } 211 | } 212 | 213 | impl crate::QueueProducer for RabbitMqProducer { 214 | type Payload = Vec; 215 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 216 | } 217 | impl crate::ScheduledQueueProducer for RabbitMqProducer { 218 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled); 219 | } 220 | 221 | pub struct RabbitMqConsumer { 222 | consumer: Consumer, 223 | requeue_on_nack: bool, 224 | } 225 | 226 | impl RabbitMqConsumer { 227 | fn wrap_delivery(&self, delivery: lapin::message::Delivery) -> Delivery { 228 | Delivery::new( 229 | delivery.data, 230 | RabbitMqAcker { 231 | acker: Some(delivery.acker), 232 | requeue_on_nack: self.requeue_on_nack, 233 | }, 234 | ) 235 | } 236 | 237 | pub async fn receive(&mut self) -> Result { 238 | let mut stream = 239 | self.consumer 240 | .clone() 241 | .map(|l: Result| { 242 | let l = l.map_err(QueueError::generic)?; 243 | Ok(self.wrap_delivery(l)) 244 | }); 245 | 246 | stream.next().await.ok_or(QueueError::NoData)? 247 | } 248 | 249 | pub async fn receive_all( 250 | &mut self, 251 | max_messages: usize, 252 | deadline: Duration, 253 | ) -> Result> { 254 | let mut stream = self.consumer.clone().map( 255 | |l: Result| -> Result { 256 | let l = l.map_err(QueueError::generic)?; 257 | Ok(self.wrap_delivery(l)) 258 | }, 259 | ); 260 | let start = Instant::now(); 261 | let mut out = Vec::with_capacity(max_messages); 262 | match tokio::time::timeout(deadline, stream.next()).await { 263 | Ok(Some(x)) => out.push(x?), 264 | // Timeouts and stream termination 265 | Err(_) | Ok(None) => return Ok(out), 266 | } 267 | 268 | if max_messages > 1 { 269 | // `now_or_never` will break the loop if no ready items are already 270 | // buffered in the stream. This should allow us to opportunistically 271 | // fill up the buffer in the remaining time. 272 | while let Some(Some(x)) = stream.next().now_or_never() { 273 | out.push(x?); 274 | if out.len() >= max_messages || start.elapsed() >= deadline { 275 | break; 276 | } 277 | } 278 | } 279 | Ok(out) 280 | } 281 | } 282 | 283 | impl crate::QueueConsumer for RabbitMqConsumer { 284 | type Payload = Vec; 285 | omni_delegate!(receive, receive_all); 286 | } 287 | 288 | struct RabbitMqAcker { 289 | acker: Option, 290 | requeue_on_nack: bool, 291 | } 292 | 293 | impl Acker for RabbitMqAcker { 294 | async fn ack(&mut self) -> Result<()> { 295 | self.acker 296 | .take() 297 | .ok_or(QueueError::CannotAckOrNackTwice)? 298 | .ack(BasicAckOptions { multiple: false }) 299 | .await 300 | .map(|_| ()) 301 | .map_err(QueueError::generic) 302 | } 303 | 304 | async fn nack(&mut self) -> Result<()> { 305 | self.acker 306 | .take() 307 | .ok_or(QueueError::CannotAckOrNackTwice)? 308 | .nack(BasicNackOptions { 309 | requeue: self.requeue_on_nack, 310 | multiple: false, 311 | }) 312 | .await 313 | .map(|_| ()) 314 | .map_err(QueueError::generic) 315 | } 316 | 317 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> { 318 | Err(QueueError::Unsupported( 319 | "set_ack_deadline is not supported by RabbitMQ", 320 | )) 321 | } 322 | } 323 | -------------------------------------------------------------------------------- /omniqueue/src/backends/redis/cluster.rs: -------------------------------------------------------------------------------- 1 | use redis::{ 2 | cluster::{ClusterClient, ClusterClientBuilder}, 3 | cluster_routing::{MultipleNodeRoutingInfo, ResponsePolicy, RoutingInfo}, 4 | ErrorKind, FromRedisValue, IntoConnectionInfo, RedisError, 5 | }; 6 | 7 | /// ConnectionManager that implements `bb8::ManageConnection` and supports 8 | /// asynchronous clustered connections via `redis::cluster::ClusterClient` 9 | #[derive(Clone)] 10 | pub struct RedisClusterConnectionManager { 11 | client: ClusterClient, 12 | } 13 | 14 | impl RedisClusterConnectionManager { 15 | pub fn new( 16 | info: T, 17 | ) -> Result { 18 | Ok(RedisClusterConnectionManager { 19 | client: ClusterClientBuilder::new(vec![info]).build()?, 20 | }) 21 | } 22 | } 23 | 24 | impl bb8::ManageConnection for RedisClusterConnectionManager { 25 | type Connection = redis::cluster_async::ClusterConnection; 26 | type Error = RedisError; 27 | 28 | async fn connect(&self) -> Result { 29 | self.client.get_async_connection().await 30 | } 31 | 32 | async fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> { 33 | let pong = conn 34 | .route_command( 35 | &redis::cmd("PING"), 36 | RoutingInfo::MultiNode(( 37 | MultipleNodeRoutingInfo::AllMasters, 38 | Some(ResponsePolicy::OneSucceeded), 39 | )), 40 | ) 41 | .await 42 | .and_then(|v| String::from_redis_value(&v))?; 43 | match pong.as_str() { 44 | "PONG" => Ok(()), 45 | _ => Err((ErrorKind::ResponseError, "ping request").into()), 46 | } 47 | } 48 | 49 | fn has_broken(&self, _: &mut Self::Connection) -> bool { 50 | false 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /omniqueue/src/backends/redis/fallback.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the main queue using two lists instead of redis streams, 2 | //! for compatibility with redis versions older than 6.2.0. 3 | 4 | use std::time::Duration; 5 | 6 | use bb8::ManageConnection; 7 | use redis::AsyncCommands; 8 | use svix_ksuid::{KsuidLike as _, KsuidMs}; 9 | use time::OffsetDateTime; 10 | use tracing::{error, trace, warn}; 11 | 12 | use super::{ 13 | internal_from_list, internal_to_list_payload, DeadLetterQueueConfig, InternalPayload, 14 | InternalPayloadOwned, RawPayload, RedisConnection, RedisConsumer, RedisProducer, 15 | }; 16 | use crate::{queue::Acker, Delivery, QueueError, Result}; 17 | 18 | pub(super) async fn send_raw( 19 | producer: &RedisProducer, 20 | payload: &[u8], 21 | ) -> Result<()> { 22 | producer 23 | .redis 24 | .get() 25 | .await 26 | .map_err(QueueError::generic)? 27 | .lpush( 28 | &producer.queue_key, 29 | internal_to_list_payload(InternalPayload::new(payload)), 30 | ) 31 | .await 32 | .map_err(QueueError::generic) 33 | } 34 | 35 | pub(super) async fn receive(consumer: &RedisConsumer) -> Result { 36 | let res = receive_with_timeout(consumer, Duration::ZERO).await?; 37 | res.ok_or_else(|| QueueError::Generic("No data".into())) 38 | } 39 | 40 | pub(super) async fn receive_all( 41 | consumer: &RedisConsumer, 42 | deadline: Duration, 43 | _max_messages: usize, 44 | ) -> Result> { 45 | // FIXME: Run up to max_messages RPOPLPUSH'es until there is a null reply? 46 | let delivery = receive_with_timeout(consumer, deadline).await?; 47 | Ok(delivery.into_iter().collect()) 48 | } 49 | 50 | async fn receive_with_timeout( 51 | consumer: &RedisConsumer, 52 | timeout: Duration, 53 | ) -> Result> { 54 | let payload: Option> = consumer 55 | .redis 56 | .get() 57 | .await 58 | .map_err(QueueError::generic)? 59 | .brpoplpush( 60 | &consumer.queue_key, 61 | &consumer.processing_queue_key, 62 | // The documentation at https://redis.io/docs/latest/commands/brpoplpush/ does not 63 | // state what unit the timeout is, but `BLPOP` and `BLMPOP` have similar timeout 64 | // parameters that are documented as being seconds. 65 | timeout.as_secs_f64(), 66 | ) 67 | .await 68 | .map_err(QueueError::generic)?; 69 | 70 | match payload { 71 | Some(old_payload) => Some(internal_to_delivery( 72 | internal_from_list(&old_payload)?.into(), 73 | consumer, 74 | old_payload, 75 | )) 76 | .transpose(), 77 | None => Ok(None), 78 | } 79 | } 80 | 81 | fn internal_to_delivery( 82 | InternalPayloadOwned { 83 | payload, 84 | num_receives, 85 | }: InternalPayloadOwned, 86 | consumer: &RedisConsumer, 87 | old_payload: Vec, 88 | ) -> Result { 89 | Ok(Delivery::new( 90 | payload, 91 | RedisFallbackAcker { 92 | redis: consumer.redis.clone(), 93 | processing_queue_key: consumer.processing_queue_key.clone(), 94 | old_payload, 95 | already_acked_or_nacked: false, 96 | num_receives, 97 | dlq_config: consumer.dlq_config.clone(), 98 | }, 99 | )) 100 | } 101 | 102 | struct RedisFallbackAcker { 103 | redis: bb8::Pool, 104 | processing_queue_key: String, 105 | // We delete based on the payload -- and since the 106 | // `num_receives` changes after receiving it's the 107 | // `old_payload`, since `num_receives` is part of the 108 | // payload. Make sense? 109 | old_payload: RawPayload, 110 | 111 | already_acked_or_nacked: bool, 112 | 113 | num_receives: usize, 114 | dlq_config: Option, 115 | } 116 | 117 | impl Acker for RedisFallbackAcker { 118 | async fn ack(&mut self) -> Result<()> { 119 | if self.already_acked_or_nacked { 120 | return Err(QueueError::CannotAckOrNackTwice); 121 | } 122 | 123 | let _: () = self 124 | .redis 125 | .get() 126 | .await 127 | .map_err(QueueError::generic)? 128 | .lrem(&self.processing_queue_key, 1, &self.old_payload) 129 | .await 130 | .map_err(QueueError::generic)?; 131 | 132 | self.already_acked_or_nacked = true; 133 | 134 | Ok(()) 135 | } 136 | 137 | async fn nack(&mut self) -> Result<()> { 138 | if let Some(dlq_config) = &self.dlq_config { 139 | if dlq_config.max_retries_reached(self.num_receives) { 140 | trace!("Maximum attempts reached"); 141 | // Try to get the raw payload, but if that fails (which 142 | // seems possible given that we're already in a failure 143 | // scenario), just push the full `InternalPayload` onto the DLQ: 144 | let payload = match internal_from_list(&self.old_payload) { 145 | Ok(InternalPayload { payload, .. }) => payload, 146 | Err(e) => { 147 | warn!(error = ?e, "Failed to get original payload, sending to DLQ with internal payload"); 148 | &self.old_payload 149 | } 150 | }; 151 | send_to_dlq(&self.redis, dlq_config, payload).await?; 152 | return self.ack().await; 153 | } 154 | } 155 | 156 | if self.already_acked_or_nacked { 157 | return Err(QueueError::CannotAckOrNackTwice); 158 | } 159 | 160 | self.already_acked_or_nacked = true; 161 | 162 | Ok(()) 163 | } 164 | 165 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> { 166 | Err(QueueError::Unsupported( 167 | "set_ack_deadline is not yet supported by redis fallback backend", 168 | )) 169 | } 170 | } 171 | 172 | pub(super) async fn add_to_main_queue( 173 | keys: Vec>, 174 | main_queue_name: &str, 175 | conn: &mut impl AsyncCommands, 176 | ) -> Result<()> { 177 | // We don't care about existing `num_receives` 178 | // since we're pushing onto a different queue. 179 | let new_keys = keys 180 | .into_iter() 181 | // So reset it to avoid carrying state over: 182 | .map(|x| InternalPayload::new(x.payload)) 183 | .map(internal_to_list_payload) 184 | .collect::>(); 185 | let _: () = conn 186 | .lpush(main_queue_name, new_keys) 187 | .await 188 | .map_err(QueueError::generic)?; 189 | Ok(()) 190 | } 191 | 192 | pub(super) async fn background_task_processing( 193 | pool: bb8::Pool, 194 | queue_key: String, 195 | processing_queue_key: String, 196 | ack_deadline_ms: i64, 197 | dlq_config: Option, 198 | ) -> Result<()> { 199 | // FIXME: ack_deadline_ms should be unsigned 200 | let ack_deadline = Duration::from_millis(ack_deadline_ms as _); 201 | loop { 202 | if let Err(err) = reenqueue_timed_out_messages( 203 | &pool, 204 | &queue_key, 205 | &processing_queue_key, 206 | ack_deadline, 207 | &dlq_config, 208 | ) 209 | .await 210 | { 211 | error!("{err}"); 212 | tokio::time::sleep(Duration::from_millis(500)).await; 213 | continue; 214 | } 215 | } 216 | } 217 | 218 | async fn send_to_dlq( 219 | redis: &bb8::Pool, 220 | dlq_config: &DeadLetterQueueConfig, 221 | payload: &[u8], 222 | ) -> Result<()> { 223 | let DeadLetterQueueConfig { queue_key: dlq, .. } = dlq_config; 224 | 225 | let _: () = redis 226 | .get() 227 | .await 228 | .map_err(QueueError::generic)? 229 | .rpush(dlq, payload) 230 | .await 231 | .map_err(QueueError::generic)?; 232 | 233 | Ok(()) 234 | } 235 | 236 | async fn reenqueue_timed_out_messages( 237 | pool: &bb8::Pool, 238 | queue_key: &str, 239 | processing_queue_key: &str, 240 | ack_deadline: Duration, 241 | dlq_config: &Option, 242 | ) -> Result<(), Box> { 243 | const BATCH_SIZE: isize = 50; 244 | 245 | let mut conn = pool.get().await?; 246 | 247 | let keys: Vec = conn.lrange(processing_queue_key, -1, -1).await?; 248 | 249 | // If the key is older than now, it means we should be processing keys 250 | let validity_limit = KsuidMs::new(Some(OffsetDateTime::now_utc() - ack_deadline), None) 251 | .to_string() 252 | .into_bytes(); 253 | 254 | if !keys.is_empty() && keys[0] <= validity_limit { 255 | let keys: Vec = conn.lrange(processing_queue_key, -BATCH_SIZE, -1).await?; 256 | for key in keys { 257 | if key <= validity_limit { 258 | let internal = internal_from_list(&key)?; 259 | let num_receives = internal.num_receives; 260 | 261 | match &dlq_config { 262 | Some(dlq_config) if dlq_config.max_retries_reached(num_receives) => { 263 | trace!( 264 | num_receives = num_receives, 265 | "Maximum attempts reached for message, moving item to DLQ", 266 | ); 267 | send_to_dlq(pool, dlq_config, internal.payload).await?; 268 | } 269 | _ => { 270 | trace!( 271 | num_receives = num_receives, 272 | "Pushing back overdue task to queue" 273 | ); 274 | let _: () = conn 275 | .rpush(queue_key, internal_to_list_payload(internal)) 276 | .await?; 277 | } 278 | } 279 | 280 | // We use LREM to be sure we only delete the keys we should be deleting 281 | let _: () = conn.lrem(processing_queue_key, 1, &key).await?; 282 | } 283 | } 284 | } else { 285 | // Sleep before attempting to fetch again if nothing was found 286 | tokio::time::sleep(Duration::from_millis(500)).await; 287 | } 288 | 289 | Ok(()) 290 | } 291 | -------------------------------------------------------------------------------- /omniqueue/src/backends/redis/sentinel.rs: -------------------------------------------------------------------------------- 1 | use redis::{ 2 | sentinel::{SentinelClient, SentinelNodeConnectionInfo, SentinelServerType}, 3 | ErrorKind, IntoConnectionInfo, RedisError, 4 | }; 5 | use tokio::sync::Mutex; 6 | 7 | // The mutex here is needed b/c there's currently 8 | // no way to get connections in the redis sentinel client 9 | // without a mutable reference to the underlying client. 10 | struct LockedSentinelClient(pub(crate) Mutex); 11 | 12 | /// ConnectionManager that implements `bb8::ManageConnection` and supports 13 | /// asynchronous Sentinel connections via `redis::sentinel::SentinelClient` 14 | pub struct RedisSentinelConnectionManager { 15 | client: LockedSentinelClient, 16 | } 17 | 18 | impl RedisSentinelConnectionManager { 19 | pub fn new( 20 | info: Vec, 21 | service_name: String, 22 | node_connection_info: Option, 23 | ) -> Result { 24 | Ok(RedisSentinelConnectionManager { 25 | client: LockedSentinelClient(Mutex::new(SentinelClient::build( 26 | info, 27 | service_name, 28 | node_connection_info, 29 | SentinelServerType::Master, 30 | )?)), 31 | }) 32 | } 33 | } 34 | 35 | impl bb8::ManageConnection for RedisSentinelConnectionManager { 36 | type Connection = redis::aio::MultiplexedConnection; 37 | type Error = RedisError; 38 | 39 | async fn connect(&self) -> Result { 40 | self.client.0.lock().await.get_async_connection().await 41 | } 42 | 43 | async fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> { 44 | let pong: String = redis::cmd("PING").query_async(conn).await?; 45 | match pong.as_str() { 46 | "PONG" => Ok(()), 47 | _ => Err((ErrorKind::ResponseError, "ping request").into()), 48 | } 49 | } 50 | 51 | fn has_broken(&self, _: &mut Self::Connection) -> bool { 52 | false 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /omniqueue/src/backends/redis/streams.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the main queue using redis streams. 2 | 3 | use std::time::Duration; 4 | 5 | use bb8::ManageConnection; 6 | use redis::{ 7 | streams::{ 8 | StreamAutoClaimOptions, StreamClaimReply, StreamId, StreamRangeReply, StreamReadOptions, 9 | StreamReadReply, 10 | }, 11 | AsyncCommands as _, FromRedisValue, RedisResult, 12 | }; 13 | use tracing::{error, trace}; 14 | 15 | use super::{ 16 | DeadLetterQueueConfig, InternalPayload, InternalPayloadOwned, RedisConnection, RedisConsumer, 17 | RedisProducer, 18 | }; 19 | use crate::{queue::Acker, Delivery, QueueError, Result}; 20 | 21 | /// Special ID for XADD command's which generates a stream ID automatically 22 | const GENERATE_STREAM_ID: &str = "*"; 23 | /// Special ID for XREADGROUP commands which reads any new messages 24 | const LISTEN_STREAM_ID: &str = ">"; 25 | 26 | /// The maximum number of pending messages to reinsert into the queue after 27 | /// becoming stale per loop 28 | // FIXME(onelson): expose in config? 29 | const PENDING_BATCH_SIZE: usize = 1000; 30 | 31 | macro_rules! internal_to_stream_payload { 32 | ($internal_payload:expr, $payload_key:expr) => { 33 | &[ 34 | ($payload_key, $internal_payload.payload), 35 | ( 36 | NUM_RECEIVES, 37 | $internal_payload.num_receives.to_string().as_bytes(), 38 | ), 39 | ] 40 | }; 41 | } 42 | 43 | pub(super) async fn send_raw( 44 | producer: &RedisProducer, 45 | payload: &[u8], 46 | ) -> Result<()> { 47 | producer 48 | .redis 49 | .get() 50 | .await 51 | .map_err(QueueError::generic)? 52 | .xadd( 53 | &producer.queue_key, 54 | GENERATE_STREAM_ID, 55 | internal_to_stream_payload!( 56 | InternalPayload::new(payload), 57 | producer.payload_key.as_str() 58 | ), 59 | ) 60 | .await 61 | .map_err(QueueError::generic) 62 | } 63 | 64 | pub(super) async fn receive(consumer: &RedisConsumer) -> Result { 65 | // Ensure an empty vec is never returned 66 | let read_out: StreamReadReply = consumer 67 | .redis 68 | .get() 69 | .await 70 | .map_err(QueueError::generic)? 71 | .xread_options( 72 | &[&consumer.queue_key], 73 | &[LISTEN_STREAM_ID], 74 | &StreamReadOptions::default() 75 | .group(&consumer.consumer_group, &consumer.consumer_name) 76 | .block(100_000) 77 | .count(1), 78 | ) 79 | .await 80 | .map_err(QueueError::generic)?; 81 | 82 | let queue = read_out.keys.into_iter().next().ok_or(QueueError::NoData)?; 83 | let entry = queue.ids.into_iter().next().ok_or(QueueError::NoData)?; 84 | 85 | let internal = internal_from_stream(&entry, &consumer.payload_key)?; 86 | Ok(internal_to_delivery(internal, consumer, entry.id)) 87 | } 88 | 89 | pub(super) async fn receive_all( 90 | consumer: &RedisConsumer, 91 | deadline: Duration, 92 | max_messages: usize, 93 | ) -> Result> { 94 | let read_out: StreamReadReply = consumer 95 | .redis 96 | .get() 97 | .await 98 | .map_err(QueueError::generic)? 99 | .xread_options( 100 | &[&consumer.queue_key], 101 | &[LISTEN_STREAM_ID], 102 | &StreamReadOptions::default() 103 | .group(&consumer.consumer_group, &consumer.consumer_name) 104 | .block( 105 | deadline 106 | .as_millis() 107 | .try_into() 108 | .map_err(QueueError::generic)?, 109 | ) 110 | .count(max_messages), 111 | ) 112 | .await 113 | .map_err(QueueError::generic)?; 114 | 115 | let mut out = Vec::with_capacity(max_messages); 116 | 117 | if let Some(queue) = read_out.keys.into_iter().next() { 118 | for entry in queue.ids { 119 | let internal = internal_from_stream(&entry, &consumer.payload_key)?; 120 | let delivery = internal_to_delivery(internal, consumer, entry.id); 121 | out.push(delivery); 122 | } 123 | } 124 | Ok(out) 125 | } 126 | 127 | const NUM_RECEIVES: &str = "num_receives"; 128 | 129 | fn internal_from_stream(stream_id: &StreamId, payload_key: &str) -> Result { 130 | let StreamId { map, .. } = stream_id; 131 | 132 | let num_receives = if let Some(redis::Value::BulkString(data)) = map.get(NUM_RECEIVES) { 133 | let count = std::str::from_utf8(data) 134 | .map_err(|_| QueueError::Generic("Improper key format".into()))? 135 | .parse::() 136 | .map_err(QueueError::generic)?; 137 | count + 1 138 | } else { 139 | 1 140 | }; 141 | 142 | let payload: Vec = map 143 | .get(payload_key) 144 | .ok_or(QueueError::NoData) 145 | .and_then(|x| redis::from_redis_value(x).map_err(QueueError::generic))?; 146 | 147 | Ok(InternalPayloadOwned { 148 | payload, 149 | num_receives, 150 | }) 151 | } 152 | 153 | fn internal_to_delivery( 154 | InternalPayloadOwned { 155 | payload, 156 | num_receives, 157 | }: InternalPayloadOwned, 158 | consumer: &RedisConsumer, 159 | entry_id: String, 160 | ) -> Delivery { 161 | Delivery::new( 162 | payload, 163 | RedisStreamsAcker { 164 | redis: consumer.redis.clone(), 165 | queue_key: consumer.queue_key.to_owned(), 166 | consumer_group: consumer.consumer_group.to_owned(), 167 | entry_id, 168 | already_acked_or_nacked: false, 169 | num_receives, 170 | dlq_config: consumer.dlq_config.clone(), 171 | payload_key: consumer.payload_key.clone(), 172 | }, 173 | ) 174 | } 175 | 176 | struct RedisStreamsAcker { 177 | redis: bb8::Pool, 178 | queue_key: String, 179 | consumer_group: String, 180 | entry_id: String, 181 | payload_key: String, 182 | 183 | already_acked_or_nacked: bool, 184 | num_receives: usize, 185 | dlq_config: Option, 186 | } 187 | 188 | impl RedisStreamsAcker {} 189 | 190 | impl Acker for RedisStreamsAcker { 191 | async fn ack(&mut self) -> Result<()> { 192 | if self.already_acked_or_nacked { 193 | return Err(QueueError::CannotAckOrNackTwice); 194 | } 195 | 196 | let mut pipeline = redis::pipe(); 197 | pipeline.xack(&self.queue_key, &self.consumer_group, &[&self.entry_id]); 198 | pipeline.xdel(&self.queue_key, &[&self.entry_id]); 199 | 200 | let mut conn = self.redis.get().await.map_err(QueueError::generic)?; 201 | let _: () = pipeline 202 | .query_async(&mut *conn) 203 | .await 204 | .map_err(QueueError::generic)?; 205 | 206 | self.already_acked_or_nacked = true; 207 | 208 | Ok(()) 209 | } 210 | 211 | async fn nack(&mut self) -> Result<()> { 212 | if let Some(dlq_config) = &self.dlq_config { 213 | if dlq_config.max_retries_reached(self.num_receives) { 214 | trace!(entry_id = self.entry_id, "Maximum attempts reached"); 215 | send_to_dlq( 216 | &self.redis, 217 | &self.queue_key, 218 | dlq_config, 219 | &self.entry_id, 220 | &self.payload_key, 221 | ) 222 | .await?; 223 | return self.ack().await; 224 | } 225 | } 226 | 227 | if self.already_acked_or_nacked { 228 | return Err(QueueError::CannotAckOrNackTwice); 229 | } 230 | 231 | self.already_acked_or_nacked = true; 232 | 233 | Ok(()) 234 | } 235 | 236 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> { 237 | Err(QueueError::Unsupported( 238 | "set_ack_deadline is not yet supported by redis streams backend", 239 | )) 240 | } 241 | } 242 | 243 | pub(super) async fn add_to_main_queue( 244 | keys: Vec>, 245 | main_queue_name: &str, 246 | payload_key: &str, 247 | conn: &mut impl redis::aio::ConnectionLike, 248 | ) -> Result<()> { 249 | let mut pipe = redis::pipe(); 250 | // We don't care about existing `num_receives` 251 | // since we're pushing onto a different queue. 252 | for InternalPayload { payload, .. } in keys { 253 | // So reset it to avoid carrying state over: 254 | let internal = InternalPayload::new(payload); 255 | let _ = pipe.xadd( 256 | main_queue_name, 257 | GENERATE_STREAM_ID, 258 | internal_to_stream_payload!(internal, payload_key), 259 | ); 260 | } 261 | 262 | let _: () = pipe.query_async(conn).await.map_err(QueueError::generic)?; 263 | 264 | Ok(()) 265 | } 266 | 267 | struct StreamAutoclaimReply { 268 | ids: Vec, 269 | } 270 | 271 | impl FromRedisValue for StreamAutoclaimReply { 272 | fn from_redis_value(v: &redis::Value) -> RedisResult { 273 | // First try the two member array from before Redis 7.0 274 | match <((), StreamClaimReply)>::from_redis_value(v) { 275 | Ok(res) => Ok(StreamAutoclaimReply { ids: res.1.ids }), 276 | 277 | // If it's a type error, then try the three member array from Redis 7.0 and after 278 | Err(e) if e.kind() == redis::ErrorKind::TypeError => { 279 | <((), StreamClaimReply, ())>::from_redis_value(v) 280 | .map(|ok| StreamAutoclaimReply { ids: ok.1.ids }) 281 | } 282 | // Any other error should be returned as is 283 | Err(e) => Err(e), 284 | } 285 | } 286 | } 287 | 288 | /// Scoops up messages that have been claimed but not handled by a deadline, 289 | /// then re-queues them. 290 | pub(super) async fn background_task_pending( 291 | pool: bb8::Pool, 292 | queue_key: String, 293 | consumer_group: String, 294 | consumer_name: String, 295 | ack_deadline_ms: i64, 296 | payload_key: String, 297 | dlq_config: Option, 298 | ) -> Result<()> { 299 | loop { 300 | if let Err(err) = reenqueue_timed_out_messages( 301 | &pool, 302 | &queue_key, 303 | &consumer_group, 304 | &consumer_name, 305 | ack_deadline_ms, 306 | &payload_key, 307 | &dlq_config, 308 | ) 309 | .await 310 | { 311 | error!("{err}"); 312 | tokio::time::sleep(Duration::from_millis(500)).await; 313 | continue; 314 | } 315 | } 316 | } 317 | 318 | // In order to put it in the DLQ, we first have to get the payload 319 | // from the original message, then push it onto the list. An 320 | // alternative would be to store the full payload on the `Acker` as 321 | // we do with the fallback implementation, but it seems good to 322 | // avoid the additional memory utilization if possible. 323 | async fn send_to_dlq( 324 | redis: &bb8::Pool, 325 | main_queue_key: &str, 326 | dlq_config: &DeadLetterQueueConfig, 327 | entry_id: &str, 328 | payload_key: &str, 329 | ) -> Result<()> { 330 | let DeadLetterQueueConfig { queue_key: dlq, .. } = dlq_config; 331 | let StreamRangeReply { ids, .. } = redis 332 | .get() 333 | .await 334 | .map_err(QueueError::generic)? 335 | .xrange(main_queue_key, entry_id, entry_id) 336 | .await 337 | .map_err(QueueError::generic)?; 338 | 339 | let payload = ids.first().ok_or_else(|| QueueError::NoData)?; 340 | let payload: Vec = payload 341 | .map 342 | .get(payload_key) 343 | .ok_or(QueueError::NoData) 344 | .and_then(|x| redis::from_redis_value(x).map_err(QueueError::generic))?; 345 | 346 | let _: () = redis 347 | .get() 348 | .await 349 | .map_err(QueueError::generic)? 350 | .rpush(dlq, &payload) 351 | .await 352 | .map_err(QueueError::generic)?; 353 | 354 | Ok(()) 355 | } 356 | 357 | async fn reenqueue_timed_out_messages( 358 | pool: &bb8::Pool, 359 | main_queue_name: &str, 360 | consumer_group: &str, 361 | consumer_name: &str, 362 | ack_deadline_ms: i64, 363 | payload_key: &str, 364 | dlq_config: &Option, 365 | ) -> Result<()> { 366 | let mut conn = pool.get().await.map_err(QueueError::generic)?; 367 | 368 | // Every iteration checks whether the processing queue has items that should 369 | // be picked back up, claiming them in the process 370 | let StreamAutoclaimReply { ids } = conn 371 | .xautoclaim_options( 372 | main_queue_name, 373 | consumer_group, 374 | consumer_name, 375 | ack_deadline_ms, 376 | "-", 377 | StreamAutoClaimOptions::default().count(PENDING_BATCH_SIZE), 378 | ) 379 | .await 380 | .map_err(QueueError::generic)?; 381 | 382 | if !ids.is_empty() { 383 | trace!("Moving {} unhandled messages back to the queue", ids.len()); 384 | 385 | let mut pipe = redis::pipe(); 386 | 387 | // And reinsert the map of KV pairs into the MAIN queue with a new stream ID 388 | for stream_id in &ids { 389 | let InternalPayloadOwned { 390 | payload, 391 | num_receives, 392 | } = internal_from_stream(stream_id, payload_key)?; 393 | 394 | if let Some(dlq_config) = &dlq_config { 395 | if num_receives >= dlq_config.max_receives { 396 | trace!( 397 | entry_id = stream_id.id, 398 | "Maximum attempts reached for message, sending to DLQ", 399 | ); 400 | send_to_dlq( 401 | pool, 402 | main_queue_name, 403 | dlq_config, 404 | &stream_id.id, 405 | payload_key, 406 | ) 407 | .await?; 408 | continue; 409 | } 410 | } 411 | let _ = pipe.xadd( 412 | main_queue_name, 413 | GENERATE_STREAM_ID, 414 | internal_to_stream_payload!( 415 | InternalPayload { 416 | payload: payload.as_slice(), 417 | num_receives 418 | }, 419 | payload_key 420 | ), 421 | ); 422 | } 423 | 424 | let _: () = pipe 425 | .query_async(&mut *conn) 426 | .await 427 | .map_err(QueueError::generic)?; 428 | 429 | // Acknowledge all the stale ones so the pending queue is cleared 430 | let ids: Vec<_> = ids.iter().map(|wrapped| &wrapped.id).collect(); 431 | 432 | let mut pipe = redis::pipe(); 433 | pipe.xack(main_queue_name, consumer_group, &ids); 434 | pipe.xdel(main_queue_name, &ids); 435 | 436 | let _: () = pipe 437 | .query_async(&mut *conn) 438 | .await 439 | .map_err(QueueError::generic)?; 440 | } else { 441 | // Wait for half a second before attempting to fetch again if nothing was found 442 | tokio::time::sleep(Duration::from_millis(500)).await; 443 | } 444 | 445 | Ok(()) 446 | } 447 | -------------------------------------------------------------------------------- /omniqueue/src/backends/sqs.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{self, Write}, 3 | future::Future, 4 | num::NonZeroUsize, 5 | time::Duration, 6 | }; 7 | 8 | use aws_sdk_sqs::{ 9 | operation::delete_message::DeleteMessageError, 10 | types::{error::ReceiptHandleIsInvalid, Message, SendMessageBatchRequestEntry}, 11 | Client, 12 | }; 13 | use futures_util::FutureExt as _; 14 | use serde::Serialize; 15 | 16 | #[allow(deprecated)] 17 | use crate::{ 18 | builder::{QueueBuilder, Static}, 19 | queue::{Acker, Delivery, QueueBackend}, 20 | QueueError, Result, 21 | }; 22 | 23 | /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html 24 | const MAX_PAYLOAD_SIZE: usize = 262_144; 25 | /// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html 26 | const MAX_BATCH_SIZE: usize = 10; 27 | 28 | #[derive(Clone, Debug, Eq, PartialEq)] 29 | pub struct SqsConfig { 30 | /// The queue's [DSN](https://aws.amazon.com/route53/what-is-dns/). 31 | pub queue_dsn: String, 32 | 33 | /// Whether to override the AWS endpoint URL with the queue DSN. 34 | pub override_endpoint: bool, 35 | } 36 | 37 | #[derive(Clone, Debug)] 38 | pub struct SqsConfigFull { 39 | queue_dsn: String, 40 | override_endpoint: bool, 41 | sqs_config: Option, 42 | } 43 | 44 | impl SqsConfigFull { 45 | async fn take_sqs_config(&mut self) -> aws_sdk_sqs::Config { 46 | if let Some(cfg) = self.sqs_config.take() { 47 | cfg 48 | } else if self.override_endpoint { 49 | aws_sdk_sqs::Config::from( 50 | &aws_config::from_env() 51 | .endpoint_url(&self.queue_dsn) 52 | .load() 53 | // Segment the async state machine. load future is >7kb at the time of writing. 54 | .boxed() 55 | .await, 56 | ) 57 | } else { 58 | aws_sdk_sqs::Config::from( 59 | &aws_config::load_from_env() 60 | // Same as above 61 | .boxed() 62 | .await, 63 | ) 64 | } 65 | } 66 | } 67 | 68 | #[allow(deprecated)] 69 | impl From for SqsConfigFull { 70 | fn from(cfg: SqsConfig) -> Self { 71 | let SqsConfig { 72 | queue_dsn, 73 | override_endpoint, 74 | } = cfg; 75 | Self { 76 | queue_dsn, 77 | override_endpoint, 78 | sqs_config: None, 79 | } 80 | } 81 | } 82 | 83 | impl From<&str> for SqsConfigFull { 84 | fn from(dsn: &str) -> Self { 85 | Self::from(dsn.to_owned()) 86 | } 87 | } 88 | 89 | impl From for SqsConfigFull { 90 | fn from(dsn: String) -> Self { 91 | Self { 92 | queue_dsn: dsn, 93 | override_endpoint: false, 94 | sqs_config: None, 95 | } 96 | } 97 | } 98 | 99 | pub struct SqsBackend; 100 | 101 | #[allow(deprecated)] 102 | impl SqsBackend { 103 | /// Creates a new Amazon SQS queue builder with the given configuration. 104 | /// 105 | /// You can pass either a queue DSN, or a [`SqsConfig`] instance here. 106 | pub fn builder(cfg: impl Into) -> QueueBuilder { 107 | QueueBuilder::new(cfg.into()) 108 | } 109 | 110 | #[deprecated = "Use SqsBackend::builder(cfg).build_pair() instead"] 111 | pub async fn new_pair(cfg: impl Into) -> Result<(SqsProducer, SqsConsumer)> { 112 | ::new_pair(cfg.into()).await 113 | } 114 | 115 | #[deprecated = "Use SqsBackend::builder(cfg).build_producer() instead"] 116 | pub async fn producing_half(cfg: impl Into) -> Result { 117 | ::producing_half(cfg.into()).await 118 | } 119 | 120 | #[deprecated = "Use SqsBackend::builder(cfg).build_consumer() instead"] 121 | pub async fn consuming_half(cfg: impl Into) -> Result { 122 | ::consuming_half(cfg.into()).await 123 | } 124 | } 125 | 126 | #[allow(deprecated)] 127 | impl QueueBackend for SqsBackend { 128 | type PayloadIn = String; 129 | type PayloadOut = String; 130 | 131 | type Producer = SqsProducer; 132 | type Consumer = SqsConsumer; 133 | 134 | type Config = SqsConfigFull; 135 | 136 | async fn new_pair(mut cfg: SqsConfigFull) -> Result<(SqsProducer, SqsConsumer)> { 137 | let aws_cfg = cfg.take_sqs_config().await; 138 | let client = Client::from_conf(aws_cfg); 139 | 140 | let producer = SqsProducer { 141 | client: client.clone(), 142 | queue_dsn: cfg.queue_dsn.clone(), 143 | }; 144 | 145 | let consumer = SqsConsumer { 146 | client, 147 | queue_dsn: cfg.queue_dsn, 148 | }; 149 | 150 | Ok((producer, consumer)) 151 | } 152 | 153 | async fn producing_half(mut cfg: SqsConfigFull) -> Result { 154 | let aws_cfg = cfg.take_sqs_config().await; 155 | let client = Client::from_conf(aws_cfg); 156 | 157 | let producer = SqsProducer { 158 | client, 159 | queue_dsn: cfg.queue_dsn, 160 | }; 161 | 162 | Ok(producer) 163 | } 164 | 165 | async fn consuming_half(mut cfg: SqsConfigFull) -> Result { 166 | let aws_cfg = cfg.take_sqs_config().await; 167 | let client = Client::from_conf(aws_cfg); 168 | 169 | let consumer = SqsConsumer { 170 | client, 171 | queue_dsn: cfg.queue_dsn, 172 | }; 173 | 174 | Ok(consumer) 175 | } 176 | } 177 | 178 | impl QueueBuilder { 179 | /// Set the SQS configuration to use. 180 | /// 181 | /// If you _don't_ call this method, the SQS configuration will be loaded 182 | /// from the process environment, via [`aws_config::load_from_env`]. 183 | pub fn sqs_config(mut self, value: aws_sdk_sqs::Config) -> Self { 184 | self.config.sqs_config = Some(value); 185 | self 186 | } 187 | 188 | /// Configure whether to override the AWS endpoint URL with the queue DSN. 189 | pub fn override_endpoint(mut self, value: bool) -> Self { 190 | self.config.override_endpoint = value; 191 | self 192 | } 193 | } 194 | 195 | struct SqsAcker { 196 | ack_client: Client, 197 | // FIXME: Cow/Arc this stuff? 198 | queue_dsn: String, 199 | receipt_handle: Option, 200 | 201 | has_been_acked_or_nacked: bool, 202 | } 203 | 204 | impl Acker for SqsAcker { 205 | async fn ack(&mut self) -> Result<()> { 206 | if self.has_been_acked_or_nacked { 207 | return Err(QueueError::CannotAckOrNackTwice); 208 | } 209 | 210 | if let Some(receipt_handle) = &self.receipt_handle { 211 | self.ack_client 212 | .delete_message() 213 | .queue_url(&self.queue_dsn) 214 | .receipt_handle(receipt_handle) 215 | .send() 216 | // Segment the async state machine. send future is >5kb at the time of writing. 217 | .boxed() 218 | .await 219 | .map_err(aws_to_queue_error)?; 220 | 221 | self.has_been_acked_or_nacked = true; 222 | 223 | Ok(()) 224 | } else { 225 | self.has_been_acked_or_nacked = true; 226 | 227 | Err(QueueError::generic( 228 | DeleteMessageError::ReceiptHandleIsInvalid( 229 | ReceiptHandleIsInvalid::builder() 230 | .message("receipt handle must be Some to be acked") 231 | .build(), 232 | ), 233 | )) 234 | } 235 | } 236 | 237 | async fn nack(&mut self) -> Result<()> { 238 | Ok(()) 239 | } 240 | 241 | async fn set_ack_deadline(&mut self, duration: Duration) -> Result<()> { 242 | if let Some(receipt_handle) = &self.receipt_handle { 243 | let duration_secs = duration.as_secs().try_into().map_err(|e| { 244 | QueueError::Generic(Box::::from(format!( 245 | "set_ack_deadline duration {duration:?} is too large: {e:?}" 246 | ))) 247 | })?; 248 | self.ack_client 249 | .change_message_visibility() 250 | .set_visibility_timeout(Some(duration_secs)) 251 | .queue_url(&self.queue_dsn) 252 | .receipt_handle(receipt_handle) 253 | .send() 254 | // Segment the async state machine. send future is >5kb at the time of writing. 255 | .boxed() 256 | .await 257 | .map_err(aws_to_queue_error)?; 258 | 259 | Ok(()) 260 | } else { 261 | Err(QueueError::generic( 262 | DeleteMessageError::ReceiptHandleIsInvalid( 263 | ReceiptHandleIsInvalid::builder() 264 | .message("receipt handle must be Some to set ack deadline") 265 | .build(), 266 | ), 267 | )) 268 | } 269 | } 270 | } 271 | 272 | pub struct SqsProducer { 273 | client: Client, 274 | queue_dsn: String, 275 | } 276 | 277 | impl SqsProducer { 278 | pub async fn send_raw(&self, payload: &str) -> Result<()> { 279 | self.send_raw_scheduled(payload, Duration::ZERO).await 280 | } 281 | 282 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 283 | let payload = serde_json::to_string(payload)?; 284 | self.send_raw(&payload).await 285 | } 286 | 287 | #[tracing::instrument( 288 | name = "send", 289 | skip_all, 290 | fields( 291 | payload_size = payload.len(), 292 | delay = (delay > Duration::ZERO).then(|| tracing::field::debug(delay)) 293 | ) 294 | )] 295 | pub async fn send_raw_scheduled(&self, payload: &str, delay: Duration) -> Result<()> { 296 | if payload.len() > MAX_PAYLOAD_SIZE { 297 | return Err(QueueError::PayloadTooLarge { 298 | limit: MAX_PAYLOAD_SIZE, 299 | actual: payload.len(), 300 | }); 301 | } 302 | 303 | self.client 304 | .send_message() 305 | .queue_url(&self.queue_dsn) 306 | .message_body(payload) 307 | .delay_seconds(delay.as_secs().try_into().map_err(QueueError::generic)?) 308 | .send() 309 | // Segment the async state machine. send future is >5kb at the time of writing. 310 | .boxed() 311 | .await 312 | .map_err(aws_to_queue_error)?; 313 | 314 | Ok(()) 315 | } 316 | 317 | pub async fn send_serde_json_scheduled( 318 | &self, 319 | payload: &P, 320 | delay: Duration, 321 | ) -> Result<()> { 322 | let payload = serde_json::to_string(payload)?; 323 | self.send_raw_scheduled(&payload, delay).await 324 | } 325 | 326 | #[tracing::instrument(name = "send_batch", skip_all)] 327 | async fn send_batch_inner( 328 | &self, 329 | payloads: impl IntoIterator + Send, 330 | convert_payload: impl Fn(I) -> Result, 331 | ) -> Result<()> { 332 | // Convert payloads up front and collect to Vec to run the payload size 333 | // check on everything before submitting the first batch. 334 | let payloads: Vec<_> = payloads 335 | .into_iter() 336 | .map(convert_payload) 337 | .collect::>()?; 338 | 339 | for payload in &payloads { 340 | if payload.len() > MAX_PAYLOAD_SIZE { 341 | return Err(QueueError::PayloadTooLarge { 342 | limit: MAX_PAYLOAD_SIZE, 343 | actual: payload.len(), 344 | }); 345 | } 346 | } 347 | 348 | for payloads in payloads.chunks(MAX_BATCH_SIZE) { 349 | let entries = payloads 350 | .iter() 351 | .enumerate() 352 | .map(|(i, payload)| { 353 | SendMessageBatchRequestEntry::builder() 354 | .message_body(payload) 355 | .id(i.to_string()) 356 | .build() 357 | .map_err(QueueError::generic) 358 | }) 359 | .collect::>()?; 360 | 361 | self.client 362 | .send_message_batch() 363 | .queue_url(&self.queue_dsn) 364 | .set_entries(Some(entries)) 365 | .send() 366 | // Segment the async state machine. send future is >5kb at the time of writing. 367 | .boxed() 368 | .await 369 | .map_err(aws_to_queue_error)?; 370 | } 371 | 372 | Ok(()) 373 | } 374 | 375 | pub async fn redrive_dlq(&self) -> Result<()> { 376 | Err(QueueError::Unsupported( 377 | "redrive_dlq is not supported by SqsBackend", 378 | )) 379 | } 380 | } 381 | 382 | impl crate::QueueProducer for SqsProducer { 383 | type Payload = String; 384 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 385 | 386 | /// This method is overwritten for the SQS backend to be more efficient 387 | /// than the default of sequentially publishing `payloads`. 388 | fn send_raw_batch( 389 | &self, 390 | payloads: impl IntoIterator + Send, IntoIter: Send> + Send, 391 | ) -> impl Future> { 392 | self.send_batch_inner(payloads, |p| Ok(p.as_ref().into())) 393 | } 394 | 395 | /// This method is overwritten for the SQS backend to be more efficient 396 | /// than the default of sequentially publishing `payloads`. 397 | fn send_serde_json_batch( 398 | &self, 399 | payloads: impl IntoIterator + Send, 400 | ) -> impl Future> { 401 | self.send_batch_inner(payloads, |p| Ok(serde_json::to_string(&p)?)) 402 | } 403 | } 404 | impl crate::ScheduledQueueProducer for SqsProducer { 405 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled); 406 | } 407 | 408 | pub struct SqsConsumer { 409 | client: Client, 410 | queue_dsn: String, 411 | } 412 | 413 | impl SqsConsumer { 414 | fn wrap_message(&self, message: &Message) -> Delivery { 415 | Delivery::new( 416 | message.body().unwrap_or_default().as_bytes().to_owned(), 417 | SqsAcker { 418 | ack_client: self.client.clone(), 419 | queue_dsn: self.queue_dsn.clone(), 420 | receipt_handle: message.receipt_handle().map(ToOwned::to_owned), 421 | has_been_acked_or_nacked: false, 422 | }, 423 | ) 424 | } 425 | 426 | pub async fn receive(&self) -> Result { 427 | let out = self 428 | .client 429 | .receive_message() 430 | .set_max_number_of_messages(Some(1)) 431 | .queue_url(&self.queue_dsn) 432 | .send() 433 | // Segment the async state machine. send future is >5kb at the time of writing. 434 | .boxed() 435 | .await 436 | .map_err(aws_to_queue_error)?; 437 | 438 | out.messages() 439 | .iter() 440 | .map(|message| -> Result { Ok(self.wrap_message(message)) }) 441 | .next() 442 | .ok_or(QueueError::NoData)? 443 | } 444 | 445 | pub async fn receive_all( 446 | &self, 447 | max_messages: usize, 448 | deadline: Duration, 449 | ) -> Result> { 450 | let out = self 451 | .client 452 | .receive_message() 453 | .set_wait_time_seconds(Some( 454 | deadline.as_secs().try_into().map_err(QueueError::generic)?, 455 | )) 456 | .set_max_number_of_messages(Some(max_messages.try_into().map_err(QueueError::generic)?)) 457 | .queue_url(&self.queue_dsn) 458 | .send() 459 | // Segment the async state machine. send future is >5kb at the time of writing. 460 | .boxed() 461 | .await 462 | .map_err(aws_to_queue_error)?; 463 | 464 | out.messages() 465 | .iter() 466 | .map(|message| -> Result { Ok(self.wrap_message(message)) }) 467 | .collect::, _>>() 468 | } 469 | } 470 | 471 | impl crate::QueueConsumer for SqsConsumer { 472 | type Payload = String; 473 | omni_delegate!(receive, receive_all); 474 | 475 | fn max_messages(&self) -> Option { 476 | // Not very clearly documented, but this doc mentions "batch of 10 messages" a 477 | // few times: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html 478 | NonZeroUsize::new(10) 479 | } 480 | } 481 | 482 | fn aws_to_queue_error(err: aws_sdk_sqs::error::SdkError) -> QueueError 483 | where 484 | E: std::error::Error + 'static, 485 | { 486 | let mut message = String::new(); 487 | write_err(&mut message, &err).expect("Write to string never fails"); 488 | QueueError::Generic(message.into()) 489 | } 490 | 491 | fn write_err(s: &mut String, err: &dyn std::error::Error) -> fmt::Result { 492 | write!(s, "{err}")?; 493 | if let Some(source) = err.source() { 494 | write!(s, ": ")?; 495 | write_err(s, source)?; 496 | } 497 | 498 | Ok(()) 499 | } 500 | -------------------------------------------------------------------------------- /omniqueue/src/builder.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | 3 | use std::marker::PhantomData; 4 | 5 | use crate::{ 6 | DynConsumer, DynProducer, QueueBackend, QueueConsumer as _, QueueProducer as _, Result, 7 | }; 8 | 9 | #[non_exhaustive] 10 | pub struct Static; 11 | 12 | #[non_exhaustive] 13 | pub struct Dynamic; 14 | 15 | /// Queue builder. 16 | /// 17 | /// Created with 18 | /// [`MemoryQueueBackend::builder`][crate::backends::InMemoryBackend::builder], 19 | /// [`RedisQueueBackend::builder`][crate::backends::RedisBackend::builder] and 20 | /// so on. 21 | pub struct QueueBuilder { 22 | pub(crate) config: Q::Config, 23 | 24 | _pd: PhantomData, 25 | } 26 | 27 | impl QueueBuilder { 28 | /// Creates a new queue builder. 29 | /// 30 | /// This constructor exists primarily as an implementation detail of 31 | /// `SomeQueueBackend::builder` associated function, which are the more 32 | /// convenient way of creating a queue builder. 33 | #[deprecated = "Use SomeBackend::builder() instead"] 34 | pub fn new(config: Q::Config) -> Self { 35 | Self { 36 | config, 37 | _pd: PhantomData, 38 | } 39 | } 40 | 41 | pub async fn build_pair(self) -> Result<(Q::Producer, Q::Consumer)> { 42 | Q::new_pair(self.config).await 43 | } 44 | 45 | pub async fn build_producer(self) -> Result { 46 | Q::producing_half(self.config).await 47 | } 48 | 49 | pub async fn build_consumer(self) -> Result { 50 | Q::consuming_half(self.config).await 51 | } 52 | 53 | pub fn make_dynamic(self) -> QueueBuilder { 54 | QueueBuilder { 55 | config: self.config, 56 | _pd: PhantomData, 57 | } 58 | } 59 | } 60 | 61 | impl QueueBuilder { 62 | pub async fn build_pair(self) -> Result<(DynProducer, DynConsumer)> { 63 | let (p, c) = Q::new_pair(self.config).await?; 64 | Ok((p.into_dyn(), c.into_dyn())) 65 | } 66 | 67 | pub async fn build_producer(self) -> Result { 68 | let p = Q::producing_half(self.config).await?; 69 | 70 | Ok(p.into_dyn()) 71 | } 72 | 73 | pub async fn build_consumer(self) -> Result { 74 | let c = Q::consuming_half(self.config).await?; 75 | 76 | Ok(c.into_dyn()) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /omniqueue/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Omniqueue 2 | //! 3 | //! Omniqueue provides a high-level interface for sending and receiving the 4 | //! following over a range of queue backends: 5 | //! 6 | //! * Raw byte arrays in the way most compatible with the queue backend 7 | //! * JSON encoded byte arrays for types that implement [`serde::Deserialize`] 8 | //! and [`serde::Serialize`] 9 | //! 10 | //! ## Cargo Features 11 | //! 12 | //! Each backend is enabled with its associated cargo feature. All backends are 13 | //! enabled by default. As of present it supports: 14 | //! 15 | //! * In-memory queue 16 | //! * Google Cloud Pub/Sub 17 | //! * RabbitMQ 18 | //! * Redis 19 | //! * Amazon SQS 20 | //! * Azure Queue Storage 21 | //! 22 | //! ## How to Use Omniqueue 23 | //! 24 | //! Each queue backend has a unique configuration type. One of these 25 | //! configurations is taken when constructing the [`QueueBuilder`]. 26 | //! 27 | //! To create a simple producer and/or consumer: 28 | //! 29 | //! ```no_run 30 | //! # async { 31 | //! use omniqueue::backends::{SqsConfig, SqsBackend}; 32 | //! 33 | //! let cfg = SqsConfig { 34 | //! queue_dsn: "http://localhost:9234/queue/queue_name".to_owned(), 35 | //! override_endpoint: true, 36 | //! }; 37 | //! 38 | //! // Either both producer and consumer 39 | //! let (p, mut c) = SqsBackend::builder(cfg.clone()).build_pair().await?; 40 | //! 41 | //! // Or one half 42 | //! let p = SqsBackend::builder(cfg.clone()).build_producer().await?; 43 | //! let mut c = SqsBackend::builder(cfg).build_consumer().await?; 44 | //! # anyhow::Ok(()) 45 | //! # }; 46 | //! ``` 47 | //! 48 | //! Sending and receiving information from this queue is simple: 49 | //! 50 | //! ```no_run 51 | //! # use omniqueue::backends::SqsBackend; 52 | //! # async { 53 | //! # #[derive(Default, serde::Deserialize, serde::Serialize)] 54 | //! # struct ExampleType; 55 | //! # 56 | //! # let (p, mut c) = SqsBackend::builder("").build_pair().await?; 57 | //! p.send_serde_json(&ExampleType::default()).await?; 58 | //! 59 | //! let delivery = c.receive().await?; 60 | //! let payload = delivery.payload_serde_json::()?; 61 | //! delivery.ack().await.map_err(|(e, _)| e)?; 62 | //! # anyhow::Ok(()) 63 | //! # }; 64 | //! ``` 65 | //! 66 | //! ## `DynProducer`s and `DynConsumer`s 67 | //! 68 | //! Dynamic-dispatch can be used easily for when you're not sure which backend 69 | //! to use at compile-time. 70 | //! 71 | //! Making a `DynProducer` or `DynConsumer` is as simple as adding one line to 72 | //! the builder: 73 | //! 74 | //! ```no_run 75 | //! # async { 76 | //! # let cfg = todo!(); 77 | //! use omniqueue::backends::RabbitMqBackend; 78 | //! 79 | //! let (p, mut c) = RabbitMqBackend::builder(cfg) 80 | //! .make_dynamic() 81 | //! .build_pair() 82 | //! .await?; 83 | //! # anyhow::Ok(()) 84 | //! # }; 85 | //! ``` 86 | #![warn(unreachable_pub)] 87 | 88 | use std::fmt::Debug; 89 | 90 | use bytesize::ByteSize; 91 | use thiserror::Error; 92 | 93 | #[macro_use] 94 | mod macros; 95 | 96 | pub mod backends; 97 | pub mod builder; 98 | mod queue; 99 | mod scheduled; 100 | 101 | #[allow(deprecated)] 102 | pub use self::{ 103 | builder::QueueBuilder, 104 | queue::{Delivery, DynConsumer, DynProducer, QueueBackend, QueueConsumer, QueueProducer}, 105 | scheduled::{DynScheduledProducer, ScheduledQueueProducer}, 106 | }; 107 | 108 | /// Type alias for std's `Result` with the error type defaulting to omniqueue's 109 | /// `QueueError`. 110 | pub type Result = std::result::Result; 111 | 112 | #[derive(Debug, Error)] 113 | pub enum QueueError { 114 | #[error("only `new_pair` may be used with this type")] 115 | CannotCreateHalf, 116 | 117 | #[error("a single delivery may only be ACKed or NACKed once")] 118 | CannotAckOrNackTwice, 119 | 120 | #[error("no data was received from the queue")] 121 | NoData, 122 | 123 | #[error("(de)serialization error")] 124 | Serde(#[from] serde_json::Error), 125 | 126 | #[error("payload too large: {} > {}", ByteSize(*actual as u64), ByteSize(*limit as u64))] 127 | PayloadTooLarge { 128 | /// The size of the serialized message, in bytes. 129 | actual: usize, 130 | 131 | /// The message size limit of the queue, in bytes. 132 | limit: usize, 133 | }, 134 | 135 | #[error("{0}")] 136 | Generic(Box), 137 | 138 | #[error("{0}")] 139 | Unsupported(&'static str), 140 | } 141 | 142 | impl QueueError { 143 | pub fn generic(e: E) -> Self { 144 | Self::Generic(Box::new(e)) 145 | } 146 | } 147 | 148 | pub trait QueuePayload: Send + Sync + 'static { 149 | fn to_bytes_naive(&self) -> Result>; 150 | fn from_bytes_naive(bytes: &[u8]) -> Result>; 151 | } 152 | 153 | impl QueuePayload for Vec { 154 | fn to_bytes_naive(&self) -> Result> { 155 | Ok(self.clone()) 156 | } 157 | 158 | fn from_bytes_naive(bytes: &[u8]) -> Result> { 159 | Ok(Box::new(bytes.to_owned())) 160 | } 161 | } 162 | 163 | impl QueuePayload for String { 164 | fn to_bytes_naive(&self) -> Result> { 165 | Ok(self.as_bytes().to_owned()) 166 | } 167 | 168 | fn from_bytes_naive(bytes: &[u8]) -> Result> { 169 | Ok(Box::new( 170 | String::from_utf8(bytes.to_owned()).map_err(QueueError::generic)?, 171 | )) 172 | } 173 | } 174 | 175 | impl QueuePayload for serde_json::Value { 176 | fn to_bytes_naive(&self) -> Result> { 177 | serde_json::to_vec(self).map_err(Into::into) 178 | } 179 | 180 | fn from_bytes_naive(bytes: &[u8]) -> Result> { 181 | serde_json::from_slice(bytes).map_err(Into::into) 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /omniqueue/src/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! omni_delegate { 2 | ( receive ) => { 3 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 4 | fn receive(&mut self) -> impl std::future::Future> + Send { 5 | Self::receive(self) 6 | } 7 | }; 8 | ( receive_all ) => { 9 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 10 | fn receive_all( 11 | &mut self, 12 | max_messages: usize, 13 | deadline: Duration, 14 | ) -> impl std::future::Future>> + Send { 15 | Self::receive_all(self, max_messages, deadline) 16 | } 17 | }; 18 | ( send_raw ) => { 19 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 20 | fn send_raw( 21 | &self, 22 | payload: &Self::Payload, 23 | ) -> impl std::future::Future> + Send { 24 | Self::send_raw(self, payload) 25 | } 26 | }; 27 | ( send_serde_json ) => { 28 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 29 | fn send_serde_json( 30 | &self, 31 | payload: &P, 32 | ) -> impl std::future::Future> + Send { 33 | Self::send_serde_json(self, payload) 34 | } 35 | }; 36 | ( send_raw_scheduled ) => { 37 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 38 | fn send_raw_scheduled( 39 | &self, 40 | payload: &Self::Payload, 41 | delay: Duration, 42 | ) -> impl std::future::Future> + Send { 43 | Self::send_raw_scheduled(self, payload, delay) 44 | } 45 | }; 46 | ( send_serde_json_scheduled ) => { 47 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 48 | fn send_serde_json_scheduled( 49 | &self, 50 | payload: &P, 51 | delay: Duration, 52 | ) -> impl std::future::Future> + Send { 53 | Self::send_serde_json_scheduled(self, payload, delay) 54 | } 55 | }; 56 | ( redrive_dlq ) => { 57 | #[deny(unconditional_recursion)] // method call must defer to an inherent method 58 | fn redrive_dlq( 59 | &self, 60 | ) -> impl std::future::Future> + Send { 61 | Self::redrive_dlq(self) 62 | } 63 | }; 64 | 65 | ( $method1:ident, $($rest:ident),* $(,)? ) => { 66 | omni_delegate!($method1); 67 | omni_delegate!($($rest),*); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /omniqueue/src/queue/acker.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, time::Duration}; 2 | 3 | use sync_wrapper::SyncWrapper; 4 | 5 | use crate::Result; 6 | 7 | pub(crate) trait Acker: Send { 8 | fn ack(&mut self) -> impl Future> + Send; 9 | fn nack(&mut self) -> impl Future> + Send; 10 | #[cfg_attr(not(feature = "beta"), allow(dead_code))] 11 | fn set_ack_deadline(&mut self, duration: Duration) -> impl Future> + Send; 12 | } 13 | 14 | pub(crate) struct DynAcker(SyncWrapper>); 15 | 16 | impl DynAcker { 17 | pub(super) fn new(inner: impl Acker + 'static) -> Self { 18 | let c = DynAckerInner { inner }; 19 | Self(SyncWrapper::new(Box::new(c))) 20 | } 21 | } 22 | 23 | impl Acker for DynAcker { 24 | async fn ack(&mut self) -> Result<()> { 25 | self.0.get_mut().ack().await 26 | } 27 | 28 | async fn nack(&mut self) -> Result<()> { 29 | self.0.get_mut().nack().await 30 | } 31 | 32 | async fn set_ack_deadline(&mut self, duration: Duration) -> Result<()> { 33 | self.0.get_mut().set_ack_deadline(duration).await 34 | } 35 | } 36 | 37 | trait ErasedAcker: Send { 38 | fn ack(&mut self) -> Pin> + Send + '_>>; 39 | fn nack(&mut self) -> Pin> + Send + '_>>; 40 | #[cfg_attr(not(feature = "beta"), allow(dead_code))] 41 | fn set_ack_deadline( 42 | &mut self, 43 | duration: Duration, 44 | ) -> Pin> + Send + '_>>; 45 | } 46 | 47 | struct DynAckerInner { 48 | inner: C, 49 | } 50 | 51 | impl ErasedAcker for DynAckerInner { 52 | fn ack(&mut self) -> Pin> + Send + '_>> { 53 | Box::pin(async move { self.inner.ack().await }) 54 | } 55 | 56 | fn nack(&mut self) -> Pin> + Send + '_>> { 57 | Box::pin(async move { self.inner.nack().await }) 58 | } 59 | 60 | fn set_ack_deadline( 61 | &mut self, 62 | duration: Duration, 63 | ) -> Pin> + Send + '_>> { 64 | Box::pin(async move { self.inner.set_ack_deadline(duration).await }) 65 | } 66 | } 67 | 68 | #[cfg(test)] 69 | mod tests { 70 | use super::DynAcker; 71 | 72 | fn assert_sync() {} 73 | 74 | #[test] 75 | fn assert_acker_sync() { 76 | assert_sync::(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /omniqueue/src/queue/consumer.rs: -------------------------------------------------------------------------------- 1 | use std::{cmp::min, future::Future, num::NonZeroUsize, pin::Pin, time::Duration}; 2 | 3 | use super::Delivery; 4 | use crate::{QueuePayload, Result}; 5 | 6 | pub trait QueueConsumer: Send + Sized { 7 | type Payload: QueuePayload; 8 | 9 | fn receive(&mut self) -> impl Future> + Send; 10 | 11 | fn receive_all( 12 | &mut self, 13 | max_messages: usize, 14 | deadline: Duration, 15 | ) -> impl Future>> + Send; 16 | 17 | fn into_dyn(self) -> DynConsumer 18 | where 19 | Self: 'static, 20 | { 21 | DynConsumer::new(self) 22 | } 23 | 24 | /// Returns the largest number that may be passed as `max_messages` to 25 | /// `receive_all`. 26 | /// 27 | /// This is used by [`DynConsumer`] to clamp the `max_messages` to what's 28 | /// permissible by the backend that ends up being used. 29 | fn max_messages(&self) -> Option { 30 | None 31 | } 32 | } 33 | 34 | pub struct DynConsumer(Box); 35 | 36 | impl DynConsumer { 37 | fn new(inner: impl QueueConsumer + 'static) -> Self { 38 | let c = DynConsumerInner { inner }; 39 | Self(Box::new(c)) 40 | } 41 | } 42 | 43 | trait ErasedQueueConsumer: Send { 44 | fn receive(&mut self) -> Pin> + Send + '_>>; 45 | fn receive_all( 46 | &mut self, 47 | max_messages: usize, 48 | deadline: Duration, 49 | ) -> Pin>> + Send + '_>>; 50 | fn max_messages(&self) -> Option; 51 | } 52 | 53 | struct DynConsumerInner { 54 | inner: C, 55 | } 56 | 57 | impl ErasedQueueConsumer for DynConsumerInner { 58 | fn receive(&mut self) -> Pin> + Send + '_>> { 59 | Box::pin(async move { 60 | let mut t_payload = self.inner.receive().await?; 61 | Ok(Delivery { 62 | payload: t_payload.take_payload(), 63 | acker: t_payload.acker, 64 | }) 65 | }) 66 | } 67 | 68 | fn receive_all( 69 | &mut self, 70 | max_messages: usize, 71 | deadline: Duration, 72 | ) -> Pin>> + Send + '_>> { 73 | Box::pin(async move { 74 | let xs = self.inner.receive_all(max_messages, deadline).await?; 75 | let mut out = Vec::with_capacity(xs.len()); 76 | for mut t_payload in xs { 77 | out.push(Delivery { 78 | payload: t_payload.take_payload(), 79 | acker: t_payload.acker, 80 | }); 81 | } 82 | Ok(out) 83 | }) 84 | } 85 | 86 | fn max_messages(&self) -> Option { 87 | self.inner.max_messages() 88 | } 89 | } 90 | 91 | impl DynConsumer { 92 | pub async fn receive(&mut self) -> Result { 93 | self.0.receive().await 94 | } 95 | 96 | /// Receive up to `max_messages` from the queue, waiting up to `deadline` 97 | /// for more messages to arrive. 98 | /// 99 | /// Unlike the `receive_all` methods on specific backends, this method 100 | /// clamps `max_messages` to what's permissible by the backend, so you don't 101 | /// have to know which backend is actually in use as a user of this type. 102 | pub async fn receive_all( 103 | &mut self, 104 | max_messages: usize, 105 | deadline: Duration, 106 | ) -> Result> { 107 | let max_messages = match self.max_messages() { 108 | Some(backend_max) => min(max_messages, backend_max.get()), 109 | None => max_messages, 110 | }; 111 | self.0.receive_all(max_messages, deadline).await 112 | } 113 | } 114 | 115 | impl crate::QueueConsumer for DynConsumer { 116 | type Payload = Vec; 117 | omni_delegate!(receive, receive_all); 118 | 119 | fn into_dyn(self) -> DynConsumer { 120 | self 121 | } 122 | 123 | fn max_messages(&self) -> Option { 124 | self.0.max_messages() 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /omniqueue/src/queue/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg_attr(not(feature = "beta"), allow(unused_imports))] 2 | use std::{fmt, future::Future, time::Duration}; 3 | 4 | use serde::de::DeserializeOwned; 5 | 6 | use crate::{QueueError, QueuePayload, Result}; 7 | 8 | mod acker; 9 | mod consumer; 10 | mod producer; 11 | 12 | use self::acker::DynAcker; 13 | pub(crate) use self::{acker::Acker, producer::ErasedQueueProducer}; 14 | pub use self::{ 15 | consumer::{DynConsumer, QueueConsumer}, 16 | producer::{DynProducer, QueueProducer}, 17 | }; 18 | 19 | /// A marker trait with utility functions meant for the creation of new 20 | /// producers and/or consumers. 21 | /// 22 | /// This trait is meant to be implemented on an empty struct representing the 23 | /// backend as a whole. 24 | #[deprecated = "This trait is likely to be removed in the future, please open an issue if you find it useful"] 25 | #[allow(deprecated)] 26 | pub trait QueueBackend { 27 | type PayloadIn: QueuePayload; 28 | type PayloadOut: QueuePayload; 29 | 30 | type Producer: QueueProducer; 31 | type Consumer: QueueConsumer; 32 | 33 | type Config; 34 | 35 | #[deprecated = "Use SomeBackend::builder(config).build_pair() instead"] 36 | fn new_pair( 37 | config: Self::Config, 38 | ) -> impl Future> + Send; 39 | 40 | #[deprecated = "Use SomeBackend::builder(config).build_producer() instead"] 41 | fn producing_half(config: Self::Config) -> impl Future> + Send; 42 | 43 | #[deprecated = "Use SomeBackend::builder(config).build_consumer() instead"] 44 | fn consuming_half(config: Self::Config) -> impl Future> + Send; 45 | } 46 | 47 | /// The output of queue backends 48 | pub struct Delivery { 49 | payload: Option>, 50 | acker: DynAcker, 51 | } 52 | 53 | impl Delivery { 54 | #[cfg_attr( 55 | not(any( 56 | feature = "in_memory", 57 | feature = "gcp_pubsub", 58 | feature = "rabbitmq", 59 | feature = "redis", 60 | feature = "sqs", 61 | feature = "azure_queue_storage" 62 | )), 63 | allow(dead_code) 64 | )] 65 | pub(crate) fn new(payload: Vec, acker: impl Acker + 'static) -> Self { 66 | Self { 67 | payload: Some(payload), 68 | acker: DynAcker::new(acker), 69 | } 70 | } 71 | 72 | /// Acknowledges the receipt and successful processing of this [`Delivery`]. 73 | /// 74 | /// On failure, `self` is returned alongside the error to allow retrying. 75 | /// 76 | /// The exact nature of this will vary per backend, but usually it ensures 77 | /// that the same message is not reprocessed. 78 | pub async fn ack(mut self) -> Result<(), (QueueError, Self)> { 79 | self.acker.ack().await.map_err(|e| (e, self)) 80 | } 81 | 82 | #[cfg(feature = "beta")] 83 | /// Sets the deadline for acknowledging this [`Delivery`] to `duration`, 84 | /// starting from the time this method is called. 85 | /// 86 | /// The exact nature of this will vary per backend, but usually ensures 87 | /// that the same message will not be reprocessed if `ack()` is called 88 | /// within an interval of `duration` from the time this method is 89 | /// called. For example, this corresponds to the 'visibility timeout' in 90 | /// SQS, and the 'ack deadline' in GCP 91 | pub async fn set_ack_deadline(&mut self, duration: Duration) -> Result<(), QueueError> { 92 | self.acker.set_ack_deadline(duration).await 93 | } 94 | 95 | /// Explicitly does not Acknowledge the successful processing of this 96 | /// [`Delivery`]. 97 | /// 98 | /// On failure, `self` is returned alongside the error to allow retrying. 99 | /// 100 | /// The exact nature of this will vary by backend, but usually it ensures 101 | /// that the same message is either reinserted into the same queue or is 102 | /// sent to a separate collection. 103 | pub async fn nack(mut self) -> Result<(), (QueueError, Self)> { 104 | self.acker.nack().await.map_err(|e| (e, self)) 105 | } 106 | 107 | /// This method will take the contained bytes out of the delivery, doing no 108 | /// further processing. 109 | /// 110 | /// Once called, subsequent calls to any payload methods will fail. 111 | pub fn take_payload(&mut self) -> Option> { 112 | self.payload.take() 113 | } 114 | 115 | /// This method 116 | pub fn borrow_payload(&self) -> Option<&[u8]> { 117 | self.payload.as_deref() 118 | } 119 | 120 | pub fn payload_serde_json(&self) -> Result> { 121 | let Some(bytes) = self.payload.as_ref() else { 122 | return Ok(None); 123 | }; 124 | serde_json::from_slice(bytes).map_err(Into::into) 125 | } 126 | } 127 | 128 | impl fmt::Debug for Delivery { 129 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 130 | f.debug_struct("Delivery").finish() 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /omniqueue/src/queue/producer.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin}; 2 | 3 | use serde::Serialize; 4 | 5 | use crate::{QueuePayload, Result}; 6 | 7 | pub trait QueueProducer: Send + Sync + Sized { 8 | type Payload: QueuePayload; 9 | 10 | fn send_raw(&self, payload: &Self::Payload) -> impl Future> + Send; 11 | 12 | fn redrive_dlq(&self) -> impl Future> + Send; 13 | 14 | /// Send a batch of raw messages. 15 | /// 16 | /// The default implementation of this sends the payloads sequentially using 17 | /// [`send_raw`][QueueProducer::send_raw]. Specific backends use more 18 | /// efficient implementations where the underlying protocols support it. 19 | #[tracing::instrument(name = "send_batch", skip_all)] 20 | fn send_raw_batch( 21 | &self, 22 | payloads: impl IntoIterator + Send, IntoIter: Send> + Send, 23 | ) -> impl Future> + Send { 24 | async move { 25 | for payload in payloads { 26 | self.send_raw(payload.as_ref()).await?; 27 | } 28 | Ok(()) 29 | } 30 | } 31 | 32 | fn send_bytes(&self, payload: &[u8]) -> impl Future> + Send { 33 | async move { 34 | let payload = Self::Payload::from_bytes_naive(payload)?; 35 | self.send_raw(&payload).await 36 | } 37 | } 38 | 39 | #[tracing::instrument(name = "send_batch", skip_all)] 40 | fn send_bytes_batch( 41 | &self, 42 | payloads: impl IntoIterator + Send, IntoIter: Send> + Send, 43 | ) -> impl Future> + Send { 44 | async move { 45 | let payloads: Vec<_> = payloads 46 | .into_iter() 47 | .map(|p| Self::Payload::from_bytes_naive(p.as_ref())) 48 | .collect::>()?; 49 | self.send_raw_batch(payloads).await 50 | } 51 | } 52 | 53 | fn send_serde_json( 54 | &self, 55 | payload: &P, 56 | ) -> impl Future> + Send { 57 | async move { 58 | let payload = serde_json::to_vec(payload)?; 59 | self.send_bytes(&payload).await 60 | } 61 | } 62 | 63 | #[tracing::instrument(name = "send_batch", skip_all)] 64 | fn send_serde_json_batch( 65 | &self, 66 | payloads: impl IntoIterator + Send, 67 | ) -> impl Future> + Send { 68 | async move { 69 | let payloads: Vec<_> = payloads 70 | .into_iter() 71 | .map(|payload| { 72 | let payload = serde_json::to_vec(&payload)?; 73 | Self::Payload::from_bytes_naive(&payload) 74 | }) 75 | .collect::>()?; 76 | self.send_raw_batch(payloads).await 77 | } 78 | } 79 | 80 | fn into_dyn(self) -> DynProducer 81 | where 82 | Self: 'static, 83 | { 84 | DynProducer::new(self) 85 | } 86 | } 87 | 88 | pub struct DynProducer(Box); 89 | 90 | impl DynProducer { 91 | fn new(inner: impl QueueProducer + 'static) -> Self { 92 | let dyn_inner = DynProducerInner { inner }; 93 | Self(Box::new(dyn_inner)) 94 | } 95 | } 96 | 97 | pub(crate) trait ErasedQueueProducer: Send + Sync { 98 | fn send_raw<'a>( 99 | &'a self, 100 | payload: &'a [u8], 101 | ) -> Pin> + Send + 'a>>; 102 | 103 | fn redrive_dlq<'a>(&'a self) -> Pin> + Send + 'a>>; 104 | } 105 | 106 | struct DynProducerInner

{ 107 | inner: P, 108 | } 109 | 110 | impl ErasedQueueProducer for DynProducerInner

{ 111 | fn send_raw<'a>( 112 | &'a self, 113 | payload: &'a [u8], 114 | ) -> Pin> + Send + 'a>> { 115 | Box::pin(async move { self.inner.send_bytes(payload).await }) 116 | } 117 | 118 | fn redrive_dlq<'a>(&'a self) -> Pin> + Send + 'a>> { 119 | Box::pin(async move { self.inner.redrive_dlq().await }) 120 | } 121 | } 122 | 123 | impl DynProducer { 124 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> { 125 | self.0.send_raw(payload).await 126 | } 127 | 128 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 129 | let payload = serde_json::to_vec(payload)?; 130 | self.send_raw(&payload).await 131 | } 132 | 133 | pub async fn redrive_dlq(&self) -> Result<()> { 134 | self.0.redrive_dlq().await 135 | } 136 | } 137 | 138 | impl crate::QueueProducer for DynProducer { 139 | type Payload = Vec; 140 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 141 | } 142 | -------------------------------------------------------------------------------- /omniqueue/src/scheduled/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, time::Duration}; 2 | 3 | use serde::Serialize; 4 | 5 | use crate::{queue::ErasedQueueProducer, QueuePayload, QueueProducer, Result}; 6 | 7 | pub trait ScheduledQueueProducer: QueueProducer { 8 | fn send_raw_scheduled( 9 | &self, 10 | payload: &Self::Payload, 11 | delay: Duration, 12 | ) -> impl Future> + Send; 13 | 14 | fn send_bytes_scheduled( 15 | &self, 16 | payload: &[u8], 17 | delay: Duration, 18 | ) -> impl Future> + Send { 19 | async move { 20 | let payload = Self::Payload::from_bytes_naive(payload)?; 21 | self.send_raw_scheduled(&payload, delay).await 22 | } 23 | } 24 | 25 | fn send_serde_json_scheduled( 26 | &self, 27 | payload: &P, 28 | delay: Duration, 29 | ) -> impl Future> + Send { 30 | async move { 31 | let payload = serde_json::to_vec(payload)?; 32 | self.send_bytes_scheduled(&payload, delay).await 33 | } 34 | } 35 | 36 | fn into_dyn_scheduled(self) -> DynScheduledProducer 37 | where 38 | Self: 'static, 39 | { 40 | DynScheduledProducer::new(self) 41 | } 42 | } 43 | 44 | pub struct DynScheduledProducer(Box); 45 | 46 | impl DynScheduledProducer { 47 | fn new(inner: impl ScheduledQueueProducer + 'static) -> Self { 48 | let dyn_inner = DynScheduledProducerInner { inner }; 49 | Self(Box::new(dyn_inner)) 50 | } 51 | } 52 | 53 | trait ErasedScheduledQueueProducer: ErasedQueueProducer { 54 | fn send_raw_scheduled<'a>( 55 | &'a self, 56 | payload: &'a [u8], 57 | delay: Duration, 58 | ) -> Pin> + Send + 'a>>; 59 | } 60 | 61 | struct DynScheduledProducerInner

{ 62 | inner: P, 63 | } 64 | 65 | impl ErasedQueueProducer for DynScheduledProducerInner

{ 66 | fn send_raw<'a>( 67 | &'a self, 68 | payload: &'a [u8], 69 | ) -> Pin> + Send + 'a>> { 70 | Box::pin(async move { self.inner.send_bytes(payload).await }) 71 | } 72 | fn redrive_dlq<'a>(&'a self) -> Pin> + Send + 'a>> { 73 | Box::pin(async move { self.inner.redrive_dlq().await }) 74 | } 75 | } 76 | 77 | impl ErasedScheduledQueueProducer for DynScheduledProducerInner

{ 78 | fn send_raw_scheduled<'a>( 79 | &'a self, 80 | payload: &'a [u8], 81 | delay: Duration, 82 | ) -> Pin> + Send + 'a>> { 83 | Box::pin(async move { self.inner.send_bytes_scheduled(payload, delay).await }) 84 | } 85 | } 86 | 87 | impl DynScheduledProducer { 88 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> { 89 | self.0.send_raw(payload).await 90 | } 91 | 92 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> { 93 | let payload = serde_json::to_vec(payload)?; 94 | self.send_raw(&payload).await 95 | } 96 | 97 | pub async fn send_raw_scheduled(&self, payload: &[u8], delay: Duration) -> Result<()> { 98 | self.0.send_raw_scheduled(payload, delay).await 99 | } 100 | 101 | pub async fn send_serde_json_scheduled( 102 | &self, 103 | payload: &P, 104 | delay: Duration, 105 | ) -> Result<()> { 106 | let payload = serde_json::to_vec(payload)?; 107 | self.0.send_raw_scheduled(&payload, delay).await 108 | } 109 | 110 | pub async fn redrive_dlq(&self) -> Result<()> { 111 | self.0.redrive_dlq().await 112 | } 113 | } 114 | 115 | impl crate::QueueProducer for DynScheduledProducer { 116 | type Payload = Vec; 117 | omni_delegate!(send_raw, send_serde_json, redrive_dlq); 118 | } 119 | impl crate::ScheduledQueueProducer for DynScheduledProducer { 120 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled); 121 | } 122 | -------------------------------------------------------------------------------- /omniqueue/tests/it/azure_queue_storage.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashSet, 3 | time::{Duration, Instant}, 4 | }; 5 | 6 | use azure_storage::StorageCredentials; 7 | use azure_storage_queues::QueueServiceClientBuilder; 8 | use omniqueue::{ 9 | backends::{AqsBackend, AqsConfig, AqsConsumer, AqsProducer}, 10 | QueueError, 11 | }; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | async fn create_queue_get_a_pair() -> (AqsProducer, AqsConsumer) { 15 | create_queue_get_a_pair_with_receive_timeout(None).await 16 | } 17 | 18 | async fn create_queue_get_a_pair_with_receive_timeout( 19 | receive_timeout: Option, 20 | ) -> (AqsProducer, AqsConsumer) { 21 | let queue_name: String = std::iter::repeat_with(fastrand::lowercase) 22 | .take(8) 23 | .collect(); 24 | 25 | let credentials = StorageCredentials::access_key( 26 | azure_storage::EMULATOR_ACCOUNT.to_string(), 27 | azure_storage::EMULATOR_ACCOUNT_KEY.to_string(), 28 | ); 29 | let cfg = AqsConfig { 30 | queue_name, 31 | empty_receive_delay: None, 32 | message_ttl: Duration::from_secs(90), 33 | storage_account: azure_storage::EMULATOR_ACCOUNT.to_string(), 34 | credentials: credentials.clone(), 35 | cloud_uri: Some(format!( 36 | "http://localhost:10001/{}", 37 | azure_storage::EMULATOR_ACCOUNT 38 | )), 39 | receive_timeout, 40 | }; 41 | 42 | let cli = QueueServiceClientBuilder::new(cfg.storage_account.clone(), credentials) 43 | .cloud_location(azure_storage::CloudLocation::Custom { 44 | account: cfg.storage_account.clone(), 45 | uri: cfg.cloud_uri.clone().unwrap(), 46 | }) 47 | .build() 48 | .queue_client(cfg.queue_name.clone()); 49 | 50 | cli.create().into_future().await.unwrap(); 51 | 52 | AqsBackend::builder(cfg).build_pair().await.unwrap() 53 | } 54 | 55 | #[derive(Debug, Deserialize, Serialize, Eq, Hash, PartialEq)] 56 | pub struct ExType { 57 | a: String, 58 | } 59 | 60 | #[tokio::test] 61 | async fn test_raw_send_recv() { 62 | let (producer, mut consumer) = create_queue_get_a_pair().await; 63 | 64 | let payload = "test123"; 65 | producer.send_raw(payload).await.unwrap(); 66 | 67 | let mut d = consumer.receive().await.unwrap(); 68 | assert_eq!( 69 | payload, 70 | &String::from_utf8(d.take_payload().unwrap()).unwrap() 71 | ); 72 | d.ack().await.unwrap(); 73 | } 74 | 75 | #[tokio::test] 76 | async fn test_serde_send_recv() { 77 | let (producer, mut consumer) = create_queue_get_a_pair().await; 78 | 79 | let payload = ExType { 80 | a: "test123".to_string(), 81 | }; 82 | producer.send_serde_json(&payload).await.unwrap(); 83 | 84 | let d = consumer.receive().await.unwrap(); 85 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 86 | d.ack().await.unwrap(); 87 | } 88 | 89 | // Note: Azure Queue Storage doesn't guarantee order of messages, hence 90 | // the HashSet popping and length validation instead of assuming 91 | // particular values: 92 | #[tokio::test] 93 | async fn test_send_recv_all_partial() { 94 | let (producer, mut consumer) = create_queue_get_a_pair().await; 95 | 96 | let mut res = (0..10usize) 97 | .map(|i| ExType { 98 | a: format!("test{i}"), 99 | }) 100 | .collect::>(); 101 | 102 | for payload in &res { 103 | producer.send_serde_json(payload).await.unwrap(); 104 | } 105 | 106 | // Receive more than was sent, should return immediately 107 | let now = Instant::now(); 108 | let deadline = Duration::from_secs(5); 109 | let d = consumer.receive_all(20, deadline).await.unwrap(); 110 | assert!(now.elapsed() < deadline); 111 | assert_eq!(d.len(), 10); 112 | for i in d { 113 | res.remove(&i.payload_serde_json::().unwrap().unwrap()); 114 | i.ack().await.unwrap(); 115 | } 116 | } 117 | 118 | #[tokio::test] 119 | async fn test_send_recv_all_full() { 120 | let (producer, mut consumer) = create_queue_get_a_pair().await; 121 | 122 | let mut res = (0..10usize) 123 | .map(|i| ExType { 124 | a: format!("test{i}"), 125 | }) 126 | .collect::>(); 127 | 128 | for payload in &res { 129 | producer.send_serde_json(payload).await.unwrap(); 130 | } 131 | 132 | let d = consumer 133 | .receive_all(10, Duration::from_secs(1)) 134 | .await 135 | .unwrap(); 136 | assert_eq!(d.len(), 10); 137 | for i in d { 138 | res.remove(&i.payload_serde_json::().unwrap().unwrap()); 139 | i.ack().await.unwrap(); 140 | } 141 | assert!(res.is_empty()); 142 | } 143 | 144 | #[tokio::test] 145 | async fn test_send_recv_all_full_then_partial() { 146 | let (producer, mut consumer) = create_queue_get_a_pair().await; 147 | 148 | let mut res = (0..10usize) 149 | .map(|i| ExType { 150 | a: format!("test{i}"), 151 | }) 152 | .collect::>(); 153 | 154 | for payload in &res { 155 | producer.send_serde_json(payload).await.unwrap(); 156 | } 157 | 158 | for (received_count, remaining_item_count) in [(6, 4), (4, 0)] { 159 | let now = Instant::now(); 160 | let deadline = Duration::from_secs(2); 161 | let d = consumer.receive_all(6, deadline).await.unwrap(); 162 | assert_eq!(d.len(), received_count); 163 | for i in d { 164 | let p = i.payload_serde_json::().unwrap().unwrap(); 165 | res.remove(&p); 166 | i.ack().await.unwrap(); 167 | } 168 | assert_eq!(res.len(), remaining_item_count); 169 | assert!(now.elapsed() < deadline); 170 | } 171 | } 172 | 173 | #[tokio::test] 174 | async fn test_scheduled_recv() { 175 | let (producer, mut consumer) = create_queue_get_a_pair().await; 176 | 177 | let payload = "test123"; 178 | let delay = Duration::from_secs(1); 179 | producer.send_raw_scheduled(payload, delay).await.unwrap(); 180 | 181 | let d = consumer.receive().await; 182 | match d { 183 | Err(QueueError::NoData) => {} 184 | _ => panic!("Unexpected result"), 185 | } 186 | 187 | // Give it some buffer: 188 | tokio::time::sleep(delay + Duration::from_millis(100)).await; 189 | 190 | let mut d = consumer.receive().await.unwrap(); 191 | assert_eq!( 192 | payload, 193 | &String::from_utf8(d.take_payload().unwrap()).unwrap() 194 | ); 195 | d.ack().await.unwrap(); 196 | } 197 | 198 | #[tokio::test] 199 | async fn test_scheduled_recv_all() { 200 | let (producer, mut consumer) = create_queue_get_a_pair().await; 201 | 202 | let payload = "test123"; 203 | let delay = Duration::from_secs(1); 204 | producer.send_raw_scheduled(payload, delay).await.unwrap(); 205 | 206 | let d = consumer.receive_all(1, Duration::ZERO).await.unwrap(); 207 | assert!(d.is_empty()); 208 | 209 | tokio::time::sleep(delay + Duration::from_millis(100)).await; 210 | 211 | let mut d = consumer.receive_all(1, Duration::ZERO).await.unwrap(); 212 | assert_eq!(d.len(), 1); 213 | let mut d = d.pop().unwrap(); 214 | assert_eq!( 215 | payload, 216 | &String::from_utf8(d.take_payload().unwrap()).unwrap() 217 | ); 218 | d.ack().await.unwrap(); 219 | } 220 | 221 | #[tokio::test] 222 | async fn test_empty_recv_all() { 223 | let (_producer, mut consumer) = create_queue_get_a_pair().await; 224 | 225 | let deadline = Duration::from_secs(1); 226 | 227 | let now = Instant::now(); 228 | let d = consumer.receive_all(1, deadline).await.unwrap(); 229 | assert!(now.elapsed() > deadline); 230 | assert!(d.is_empty()); 231 | } 232 | 233 | #[tokio::test] 234 | async fn test_receive_timeout() { 235 | let (producer, mut consumer) = 236 | create_queue_get_a_pair_with_receive_timeout(Some(Duration::from_secs(2))).await; 237 | 238 | let payload = "test123"; 239 | producer.send_raw(payload).await.unwrap(); 240 | 241 | let mut d = consumer.receive().await.unwrap(); 242 | assert_eq!( 243 | payload, 244 | &String::from_utf8(d.take_payload().unwrap()).unwrap() 245 | ); 246 | 247 | tokio::time::sleep(Duration::from_secs(2) + Duration::from_millis(100)).await; 248 | 249 | let mut d = consumer.receive().await.unwrap(); 250 | assert_eq!( 251 | payload, 252 | &String::from_utf8(d.take_payload().unwrap()).unwrap() 253 | ); 254 | d.ack().await.unwrap(); 255 | 256 | tokio::time::sleep(Duration::from_secs(2) + Duration::from_millis(100)).await; 257 | 258 | match consumer.receive().await { 259 | Err(QueueError::NoData) => {} 260 | _ => panic!("Unexpected result"), 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /omniqueue/tests/it/gcp_pubsub.rs: -------------------------------------------------------------------------------- 1 | //! Support for Google Cloud Pub/Sub. 2 | //! 3 | //! In this system subscriptions are like queue bindings to topics. 4 | //! Consumers need a subscription id to start receiving messages. 5 | //! We don't have any public API for managing/creating/deleting subscriptions in 6 | //! this module, so this is left to the user to do via whatever method they 7 | //! like. 8 | //! 9 | //! - 10 | //! - 11 | //! - (how to publish messages 12 | //! ad hoc, helpful for debugging) 13 | //! 14 | //! Don't have a better place to mention this just yet. 15 | //! When testing against the gcloud emulator, you need to set 16 | //! `PUBSUB_EMULATOR_HOST` to the bind address, and `PUBSUB_PROJECT_ID` 17 | //! (matching however the emulator was configured). This should bypass the need 18 | //! for credentials and so on. ```sh 19 | //! export PUBSUB_EMULATOR_HOST=localhost:8085 20 | //! export PUBSUB_PROJECT_ID=local-project 21 | //! ``` 22 | //! > N.b. the rust client hardcodes the project id to `local-project` when it 23 | //! > sees the 24 | //! > `PUBSUB_EMULATOR_HOST` env var in use, so if you see errors about 25 | //! > resources not found etc, it 26 | //! > might be because of a project mismatch. 27 | //! 28 | //! To use the `gcloud` CLI with the emulator (useful for creating 29 | //! topics/subscriptions), you have to configure an override for the pubsub API: 30 | //! ```sh 31 | //! gcloud config set api_endpoint_overrides/pubsub "http://${PUBSUB_EMULATOR_HOST}/" 32 | //! ``` 33 | //! Note that you'll also have to manually set it back to the default as needed: 34 | //! ```sh 35 | //! gcloud config unset api_endpoint_overrides/pubsub 36 | //! ``` 37 | //! h/t 38 | //! 39 | //! Also note, and this is odd, `gcloud` will prompt you to login even though 40 | //! you're trying to connect to a local process. 41 | //! Go ahead and follow the prompts to get your CLI working. 42 | //! 43 | //! I guess it still wants to talk to GCP for other interactions other than the 44 | //! pubsub API. 45 | //! 46 | //! ## Example `gcloud` usage: 47 | //! ```sh 48 | //! gcloud --project=local-project pubsub topics create tester 49 | //! gcloud --project=local-project pubsub topics create dead-letters 50 | //! gcloud --project=local-project pubsub subscriptions create local-1 \ 51 | //! --topic=tester \ 52 | //! --dead-letter-topic=dead-letters \ 53 | //! --max-delivery-attempts=5 54 | //! gcloud --project local-project pubsub topics publish tester --message='{"my 55 | //! message": 1234}' ``` 56 | 57 | use std::time::{Duration, Instant}; 58 | 59 | use gcloud_googleapis::pubsub::v1::DeadLetterPolicy; 60 | use gcloud_pubsub::{ 61 | client::{Client, ClientConfig}, 62 | subscription::SubscriptionConfig, 63 | }; 64 | use omniqueue::{ 65 | backends::{GcpPubSubBackend, GcpPubSubConfig}, 66 | QueueBuilder, 67 | }; 68 | use serde::{Deserialize, Serialize}; 69 | 70 | const DEFAULT_PUBSUB_EMULATOR_HOST: &str = "localhost:8085"; 71 | /// Controls how many times a message can be nack'd before it lands on the dead 72 | /// letter topic. 73 | const MAX_DELIVERY_ATTEMPTS: i32 = 5; 74 | 75 | async fn get_client() -> Client { 76 | // The `Default` impl for `ClientConfig` looks for this env var. When set it 77 | // branches for local-mode use using the addr in the env var and a hardcoded 78 | // project id of `local-project`. 79 | if std::env::var("PUBSUB_EMULATOR_HOST").is_err() { 80 | std::env::set_var("PUBSUB_EMULATOR_HOST", DEFAULT_PUBSUB_EMULATOR_HOST); 81 | } 82 | Client::new(ClientConfig::default()).await.unwrap() 83 | } 84 | 85 | // FIXME: check to see if there's already one of these in here somewhere 86 | fn random_chars() -> impl Iterator { 87 | std::iter::repeat_with(fastrand::alphanumeric) 88 | } 89 | 90 | /// Returns a [`QueueBuilder`] configured to connect to the GCP emulator 91 | /// instance spawned by the file `testing-docker-compose.yaml` in the root of 92 | /// the repository. 93 | /// 94 | /// Additionally this will make a temporary topic/subscription on that instance 95 | /// for the duration of the test such as to ensure there is no stealing. 96 | async fn make_test_queue() -> QueueBuilder { 97 | let client = get_client().await; 98 | 99 | let topic_name: String = "topic-".chars().chain(random_chars().take(8)).collect(); 100 | // Need to define a dead letter topic to avoid the "bad" test cases from 101 | // pulling the nacked messages again and again. 102 | let dead_letter_topic_name: String = "topic-".chars().chain(random_chars().take(8)).collect(); 103 | let subscription_name: String = "subscription-" 104 | .chars() 105 | .chain(random_chars().take(8)) 106 | .collect(); 107 | 108 | let topic = client.create_topic(&topic_name, None, None).await.unwrap(); 109 | let dead_letter_topic = client 110 | .create_topic(&dead_letter_topic_name, None, None) 111 | .await 112 | .unwrap(); 113 | let subscription = client 114 | .create_subscription( 115 | &subscription_name, 116 | &topic_name, 117 | SubscriptionConfig { 118 | // Messages published to the topic need to supply a unique ID to make use of this 119 | enable_exactly_once_delivery: true, 120 | dead_letter_policy: Some(DeadLetterPolicy { 121 | dead_letter_topic: dead_letter_topic.fully_qualified_name().into(), 122 | max_delivery_attempts: MAX_DELIVERY_ATTEMPTS, 123 | }), 124 | ..Default::default() 125 | }, 126 | None, 127 | ) 128 | .await 129 | .unwrap(); 130 | 131 | let config = GcpPubSubConfig { 132 | topic_id: topic.id(), 133 | subscription_id: subscription.id(), 134 | credentials_file: None, 135 | }; 136 | 137 | GcpPubSubBackend::builder(config) 138 | } 139 | 140 | #[tokio::test] 141 | async fn test_raw_send_recv() { 142 | let payload = b"{\"test\": \"data\"}"; 143 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 144 | 145 | p.send_raw(payload).await.unwrap(); 146 | 147 | let d = c.receive().await.unwrap(); 148 | assert_eq!(d.borrow_payload().unwrap(), payload); 149 | } 150 | 151 | #[tokio::test] 152 | async fn test_bytes_send_recv() { 153 | use omniqueue::QueueProducer as _; 154 | 155 | let payload = b"hello"; 156 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 157 | 158 | p.send_bytes(payload).await.unwrap(); 159 | 160 | let d = c.receive().await.unwrap(); 161 | assert_eq!(d.borrow_payload().unwrap(), payload); 162 | d.ack().await.unwrap(); 163 | } 164 | 165 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 166 | pub struct ExType { 167 | a: u8, 168 | } 169 | 170 | #[tokio::test] 171 | async fn test_serde_send_recv() { 172 | let payload = ExType { a: 2 }; 173 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 174 | 175 | p.send_serde_json(&payload).await.unwrap(); 176 | 177 | let d = c.receive().await.unwrap(); 178 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 179 | d.ack().await.unwrap(); 180 | } 181 | 182 | /// Consumer will return immediately if there are fewer than max messages to 183 | /// start with. 184 | #[tokio::test] 185 | async fn test_send_recv_all_partial() { 186 | let payload = ExType { a: 2 }; 187 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 188 | 189 | p.send_serde_json(&payload).await.unwrap(); 190 | let deadline = Duration::from_secs(1); 191 | 192 | let now = Instant::now(); 193 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 194 | assert_eq!(xs.len(), 1); 195 | let d = xs.remove(0); 196 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 197 | d.ack().await.unwrap(); 198 | assert!(now.elapsed() <= deadline); 199 | } 200 | 201 | /// Consumer should yield items immediately if there's a full batch ready on the 202 | /// first poll. 203 | #[tokio::test] 204 | async fn test_send_recv_all_full() { 205 | let payload1 = ExType { a: 1 }; 206 | let payload2 = ExType { a: 2 }; 207 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 208 | 209 | p.send_serde_json(&payload1).await.unwrap(); 210 | p.send_serde_json(&payload2).await.unwrap(); 211 | let deadline = Duration::from_secs(1); 212 | 213 | let now = Instant::now(); 214 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 215 | assert_eq!(xs.len(), 2); 216 | let d1 = xs.remove(0); 217 | assert_eq!( 218 | d1.payload_serde_json::().unwrap().unwrap(), 219 | payload1 220 | ); 221 | d1.ack().await.unwrap(); 222 | 223 | let d2 = xs.remove(0); 224 | assert_eq!( 225 | d2.payload_serde_json::().unwrap().unwrap(), 226 | payload2 227 | ); 228 | d2.ack().await.unwrap(); 229 | // N.b. it's still possible this could turn up false if the test runs too 230 | // slow. 231 | assert!(now.elapsed() < deadline); 232 | } 233 | 234 | /// Consumer will return the full batch immediately, but also return immediately 235 | /// if a partial batch is ready. 236 | #[tokio::test] 237 | async fn test_send_recv_all_full_then_partial() { 238 | let payload1 = ExType { a: 1 }; 239 | let payload2 = ExType { a: 2 }; 240 | let payload3 = ExType { a: 3 }; 241 | let (p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 242 | 243 | p.send_serde_json(&payload1).await.unwrap(); 244 | p.send_serde_json(&payload2).await.unwrap(); 245 | p.send_serde_json(&payload3).await.unwrap(); 246 | 247 | let deadline = Duration::from_secs(1); 248 | let now1 = Instant::now(); 249 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 250 | assert_eq!(xs.len(), 2); 251 | let d1 = xs.remove(0); 252 | assert_eq!( 253 | d1.payload_serde_json::().unwrap().unwrap(), 254 | payload1 255 | ); 256 | d1.ack().await.unwrap(); 257 | 258 | let d2 = xs.remove(0); 259 | assert_eq!( 260 | d2.payload_serde_json::().unwrap().unwrap(), 261 | payload2 262 | ); 263 | d2.ack().await.unwrap(); 264 | assert!(now1.elapsed() < deadline); 265 | 266 | // 2nd call 267 | let now2 = Instant::now(); 268 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 269 | assert_eq!(ys.len(), 1); 270 | let d3 = ys.remove(0); 271 | assert_eq!( 272 | d3.payload_serde_json::().unwrap().unwrap(), 273 | payload3 274 | ); 275 | d3.ack().await.unwrap(); 276 | assert!(now2.elapsed() <= deadline); 277 | } 278 | 279 | /// Consumer will NOT wait indefinitely for at least one item. 280 | #[tokio::test] 281 | async fn test_send_recv_all_late_arriving_items() { 282 | let (_p, mut c) = make_test_queue().await.build_pair().await.unwrap(); 283 | 284 | let deadline = Duration::from_secs(1); 285 | let now = Instant::now(); 286 | let xs = c.receive_all(2, deadline).await.unwrap(); 287 | let elapsed = now.elapsed(); 288 | 289 | assert_eq!(xs.len(), 0); 290 | // Elapsed should be around the deadline, ballpark 291 | assert!(elapsed >= deadline); 292 | assert!(elapsed <= deadline + Duration::from_millis(200)); 293 | } 294 | -------------------------------------------------------------------------------- /omniqueue/tests/it/main.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "azure_queue_storage")] 2 | mod azure_queue_storage; 3 | #[cfg(feature = "gcp_pubsub")] 4 | mod gcp_pubsub; 5 | #[cfg(feature = "rabbitmq")] 6 | mod rabbitmq; 7 | #[cfg(feature = "redis")] 8 | mod redis; 9 | #[cfg(feature = "redis_cluster")] 10 | mod redis_cluster; 11 | #[cfg(feature = "redis")] 12 | mod redis_fallback; 13 | #[cfg(feature = "sqs")] 14 | mod sqs; 15 | -------------------------------------------------------------------------------- /omniqueue/tests/it/rabbitmq.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use lapin::{ 4 | options::{ 5 | BasicConsumeOptions, BasicPublishOptions, ExchangeDeclareOptions, QueueDeclareOptions, 6 | }, 7 | types::{AMQPValue, FieldTable}, 8 | BasicProperties, Connection, ConnectionProperties, ExchangeKind, 9 | }; 10 | use omniqueue::{ 11 | backends::{RabbitMqBackend, RabbitMqConfig}, 12 | QueueBuilder, 13 | }; 14 | use serde::{Deserialize, Serialize}; 15 | 16 | const MQ_URI: &str = "amqp://guest:guest@localhost:5672/%2f"; 17 | 18 | /// Returns a [`QueueBuilder`] configured to connect to the RabbitMQ instance 19 | /// spawned by the file `testing-docker-compose.yaml` in the root of the 20 | /// repository. 21 | /// 22 | /// Additionally this will make a temporary queue on that instance for the 23 | /// duration of the test such as to ensure there is no stealing.w 24 | async fn make_test_queue( 25 | prefetch_count: Option, 26 | reinsert_on_nack: bool, 27 | ) -> QueueBuilder { 28 | let options = ConnectionProperties::default() 29 | .with_connection_name( 30 | std::iter::repeat_with(fastrand::alphanumeric) 31 | .take(8) 32 | .collect::() 33 | .into(), 34 | ) 35 | .with_executor(tokio_executor_trait::Tokio::current()) 36 | .with_reactor(tokio_reactor_trait::Tokio); 37 | let connection = Connection::connect(MQ_URI, options.clone()).await.unwrap(); 38 | let channel = connection.create_channel().await.unwrap(); 39 | 40 | let queue_name: String = std::iter::repeat_with(fastrand::alphanumeric) 41 | .take(8) 42 | .collect(); 43 | 44 | channel 45 | .queue_declare( 46 | &queue_name, 47 | QueueDeclareOptions { 48 | auto_delete: true, 49 | ..Default::default() 50 | }, 51 | FieldTable::default(), 52 | ) 53 | .await 54 | .unwrap(); 55 | 56 | const DELAY_EXCHANGE: &str = "later-alligator"; 57 | let mut args = FieldTable::default(); 58 | args.insert( 59 | "x-delayed-type".into(), 60 | AMQPValue::LongString("direct".into()), 61 | ); 62 | channel 63 | .exchange_declare( 64 | DELAY_EXCHANGE, 65 | ExchangeKind::Custom("x-delayed-message".to_string()), 66 | ExchangeDeclareOptions { 67 | auto_delete: true, 68 | ..Default::default() 69 | }, 70 | args, 71 | ) 72 | .await 73 | .unwrap(); 74 | channel 75 | .queue_bind( 76 | &queue_name, 77 | DELAY_EXCHANGE, 78 | &queue_name, 79 | Default::default(), 80 | Default::default(), 81 | ) 82 | .await 83 | .unwrap(); 84 | 85 | let config = RabbitMqConfig { 86 | uri: MQ_URI.to_owned(), 87 | connection_properties: options, 88 | publish_exchange: DELAY_EXCHANGE.to_string(), 89 | publish_routing_key: queue_name.clone(), 90 | publish_options: BasicPublishOptions::default(), 91 | publish_properties: BasicProperties::default(), 92 | consume_queue: queue_name, 93 | consumer_tag: "test".to_owned(), 94 | consume_options: BasicConsumeOptions::default(), 95 | consume_arguments: FieldTable::default(), 96 | consume_prefetch_count: prefetch_count, 97 | requeue_on_nack: reinsert_on_nack, 98 | }; 99 | 100 | RabbitMqBackend::builder(config) 101 | } 102 | 103 | #[tokio::test] 104 | async fn test_bytes_send_recv() { 105 | use omniqueue::QueueProducer as _; 106 | 107 | let payload = b"hello"; 108 | let (p, mut c) = make_test_queue(None, false) 109 | .await 110 | .build_pair() 111 | .await 112 | .unwrap(); 113 | 114 | p.send_bytes(payload).await.unwrap(); 115 | 116 | let d = c.receive().await.unwrap(); 117 | assert_eq!(d.borrow_payload().unwrap(), payload); 118 | d.ack().await.unwrap(); 119 | 120 | // The RabbitMQ native payload type is a Vec, so we can also send raw 121 | p.send_raw(payload).await.unwrap(); 122 | 123 | let d = c.receive().await.unwrap(); 124 | assert_eq!(d.borrow_payload().unwrap(), payload); 125 | d.ack().await.unwrap(); 126 | } 127 | 128 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 129 | pub struct ExType { 130 | a: u8, 131 | } 132 | 133 | #[tokio::test] 134 | async fn test_serde_send_recv() { 135 | let payload = ExType { a: 2 }; 136 | let (p, mut c) = make_test_queue(None, false) 137 | .await 138 | .build_pair() 139 | .await 140 | .unwrap(); 141 | 142 | p.send_serde_json(&payload).await.unwrap(); 143 | 144 | let d = c.receive().await.unwrap(); 145 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 146 | d.ack().await.unwrap(); 147 | } 148 | 149 | /// Consumer will return immediately if there are fewer than max messages to 150 | /// start with. 151 | #[tokio::test] 152 | async fn test_send_recv_all_partial() { 153 | let payload = ExType { a: 2 }; 154 | let (p, mut c) = make_test_queue(None, false) 155 | .await 156 | .build_pair() 157 | .await 158 | .unwrap(); 159 | 160 | p.send_serde_json(&payload).await.unwrap(); 161 | let deadline = Duration::from_secs(1); 162 | 163 | let now = Instant::now(); 164 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 165 | assert_eq!(xs.len(), 1); 166 | let d = xs.remove(0); 167 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 168 | d.ack().await.unwrap(); 169 | assert!(now.elapsed() <= deadline); 170 | } 171 | 172 | /// Consumer should yield items immediately if there's a full batch ready on the 173 | /// first poll. 174 | #[tokio::test] 175 | async fn test_send_recv_all_full() { 176 | let payload1 = ExType { a: 1 }; 177 | let payload2 = ExType { a: 2 }; 178 | let (p, mut c) = make_test_queue(None, false) 179 | .await 180 | .build_pair() 181 | .await 182 | .unwrap(); 183 | 184 | p.send_serde_json(&payload1).await.unwrap(); 185 | p.send_serde_json(&payload2).await.unwrap(); 186 | 187 | // XXX: rabbit's receive_all impl relies on stream items to be in a ready 188 | // state in order for them to be batched together. Sleeping to help them 189 | // settle before we poll. 190 | tokio::time::sleep(Duration::from_millis(100)).await; 191 | let deadline = Duration::from_secs(1); 192 | 193 | let now = Instant::now(); 194 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 195 | assert_eq!(xs.len(), 2); 196 | let d1 = xs.remove(0); 197 | assert_eq!( 198 | d1.payload_serde_json::().unwrap().unwrap(), 199 | payload1 200 | ); 201 | d1.ack().await.unwrap(); 202 | 203 | let d2 = xs.remove(0); 204 | assert_eq!( 205 | d2.payload_serde_json::().unwrap().unwrap(), 206 | payload2 207 | ); 208 | d2.ack().await.unwrap(); 209 | // N.b. it's still possible this could turn up false if the test runs too 210 | // slow. 211 | assert!(now.elapsed() < deadline); 212 | } 213 | 214 | /// Consumer will return the full batch immediately, but also return immediately 215 | /// if a partial batch is ready. 216 | #[tokio::test] 217 | async fn test_send_recv_all_full_then_partial() { 218 | let payload1 = ExType { a: 1 }; 219 | let payload2 = ExType { a: 2 }; 220 | let payload3 = ExType { a: 3 }; 221 | let (p, mut c) = make_test_queue(None, false) 222 | .await 223 | .build_pair() 224 | .await 225 | .unwrap(); 226 | 227 | p.send_serde_json(&payload1).await.unwrap(); 228 | p.send_serde_json(&payload2).await.unwrap(); 229 | p.send_serde_json(&payload3).await.unwrap(); 230 | 231 | // XXX: rabbit's receive_all impl relies on stream items to be in a ready 232 | // state in order for them to be batched together. Sleeping to help them 233 | // settle before we poll. 234 | tokio::time::sleep(Duration::from_millis(100)).await; 235 | 236 | let deadline = Duration::from_secs(1); 237 | let now1 = Instant::now(); 238 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 239 | assert_eq!(xs.len(), 2); 240 | let d1 = xs.remove(0); 241 | assert_eq!( 242 | d1.payload_serde_json::().unwrap().unwrap(), 243 | payload1 244 | ); 245 | d1.ack().await.unwrap(); 246 | 247 | let d2 = xs.remove(0); 248 | assert_eq!( 249 | d2.payload_serde_json::().unwrap().unwrap(), 250 | payload2 251 | ); 252 | d2.ack().await.unwrap(); 253 | assert!(now1.elapsed() < deadline); 254 | 255 | // 2nd call 256 | let now2 = Instant::now(); 257 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 258 | assert_eq!(ys.len(), 1); 259 | let d3 = ys.remove(0); 260 | assert_eq!( 261 | d3.payload_serde_json::().unwrap().unwrap(), 262 | payload3 263 | ); 264 | d3.ack().await.unwrap(); 265 | assert!(now2.elapsed() <= deadline); 266 | } 267 | 268 | /// Consumer will NOT wait indefinitely for at least one item. 269 | #[tokio::test] 270 | async fn test_send_recv_all_late_arriving_items() { 271 | let (_p, mut c) = make_test_queue(None, false) 272 | .await 273 | .build_pair() 274 | .await 275 | .unwrap(); 276 | 277 | let deadline = Duration::from_secs(1); 278 | let now = Instant::now(); 279 | let xs = c.receive_all(2, deadline).await.unwrap(); 280 | let elapsed = now.elapsed(); 281 | 282 | assert_eq!(xs.len(), 0); 283 | // Elapsed should be around the deadline, ballpark 284 | assert!(elapsed >= deadline); 285 | assert!(elapsed <= deadline + Duration::from_millis(200)); 286 | } 287 | 288 | #[tokio::test] 289 | async fn test_scheduled() { 290 | let payload1 = ExType { a: 1 }; 291 | let (p, mut c) = make_test_queue(None, false) 292 | .await 293 | .build_pair() 294 | .await 295 | .unwrap(); 296 | 297 | let delay = Duration::from_secs(3); 298 | let now = Instant::now(); 299 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap(); 300 | let delivery = c 301 | .receive_all(1, delay * 2) 302 | .await 303 | .unwrap() 304 | .into_iter() 305 | .next() 306 | .unwrap(); 307 | assert!(now.elapsed() >= delay); 308 | assert!(now.elapsed() < delay * 2); 309 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap()); 310 | } 311 | -------------------------------------------------------------------------------- /omniqueue/tests/it/redis_cluster.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use omniqueue::{ 4 | backends::{ 5 | redis::DeadLetterQueueConfig, RedisBackend, RedisClusterBackendBuilder, RedisConfig, 6 | }, 7 | Delivery, 8 | }; 9 | use redis::{cluster::ClusterClient, AsyncCommands, Commands}; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | const ROOT_URL: &str = "redis://localhost:6380"; 13 | 14 | pub struct RedisStreamDrop(String); 15 | impl Drop for RedisStreamDrop { 16 | fn drop(&mut self) { 17 | let client = ClusterClient::new(vec![ROOT_URL]).unwrap(); 18 | let mut conn = client.get_connection().unwrap(); 19 | let _: () = conn.del(&self.0).unwrap(); 20 | } 21 | } 22 | 23 | /// Returns a [`QueueBuilder`] configured to connect to the Redis instance 24 | /// spawned by the file `testing-docker-compose.yaml` in the root of the 25 | /// repository. 26 | /// 27 | /// Additionally this will make a temporary stream on that instance for the 28 | /// duration of the test such as to ensure there is no stealing 29 | /// 30 | /// This will also return a [`RedisStreamDrop`] to clean up the stream after the 31 | /// test ends. 32 | async fn make_test_queue() -> (RedisClusterBackendBuilder, RedisStreamDrop) { 33 | let stream_name: String = std::iter::repeat_with(fastrand::alphanumeric) 34 | .take(8) 35 | .collect(); 36 | 37 | let client = ClusterClient::new(vec![ROOT_URL]).unwrap(); 38 | let mut conn = client.get_async_connection().await.unwrap(); 39 | 40 | let _: () = conn 41 | .xgroup_create_mkstream(&stream_name, "test_cg", 0i8) 42 | .await 43 | .unwrap(); 44 | 45 | let config = RedisConfig { 46 | dsn: ROOT_URL.to_owned(), 47 | max_connections: 8, 48 | reinsert_on_nack: false, 49 | queue_key: stream_name.clone(), 50 | delayed_queue_key: format!("{stream_name}::delay"), 51 | delayed_lock_key: format!("{stream_name}::delay_lock"), 52 | consumer_group: "test_cg".to_owned(), 53 | consumer_name: "test_cn".to_owned(), 54 | payload_key: "payload".to_owned(), 55 | ack_deadline_ms: 5_000, 56 | dlq_config: None, 57 | sentinel_config: None, 58 | }; 59 | 60 | ( 61 | RedisBackend::builder(config).cluster(), 62 | RedisStreamDrop(stream_name), 63 | ) 64 | } 65 | 66 | #[tokio::test] 67 | async fn test_raw_send_recv() { 68 | let (builder, _drop) = make_test_queue().await; 69 | let payload = b"{\"test\": \"data\"}"; 70 | let (p, mut c) = builder.build_pair().await.unwrap(); 71 | 72 | p.send_raw(payload).await.unwrap(); 73 | 74 | let d = c.receive().await.unwrap(); 75 | assert_eq!(d.borrow_payload().unwrap(), payload); 76 | } 77 | 78 | #[tokio::test] 79 | async fn test_bytes_send_recv() { 80 | use omniqueue::QueueProducer as _; 81 | 82 | let (builder, _drop) = make_test_queue().await; 83 | let payload = b"hello"; 84 | let (p, mut c) = builder.build_pair().await.unwrap(); 85 | 86 | p.send_bytes(payload).await.unwrap(); 87 | 88 | let d = c.receive().await.unwrap(); 89 | assert_eq!(d.borrow_payload().unwrap(), payload); 90 | d.ack().await.unwrap(); 91 | } 92 | 93 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 94 | pub struct ExType { 95 | a: u8, 96 | } 97 | 98 | #[tokio::test] 99 | async fn test_serde_send_recv() { 100 | let (builder, _drop) = make_test_queue().await; 101 | let payload = ExType { a: 2 }; 102 | let (p, mut c) = builder.build_pair().await.unwrap(); 103 | 104 | p.send_serde_json(&payload).await.unwrap(); 105 | 106 | let d = c.receive().await.unwrap(); 107 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 108 | d.ack().await.unwrap(); 109 | } 110 | 111 | /// Consumer will return immediately if there are fewer than max messages to 112 | /// start with. 113 | #[tokio::test] 114 | async fn test_send_recv_all_partial() { 115 | let (builder, _drop) = make_test_queue().await; 116 | 117 | let payload = ExType { a: 2 }; 118 | let (p, mut c) = builder.build_pair().await.unwrap(); 119 | 120 | p.send_serde_json(&payload).await.unwrap(); 121 | let deadline = Duration::from_secs(1); 122 | 123 | let now = Instant::now(); 124 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 125 | assert_eq!(xs.len(), 1); 126 | let d = xs.remove(0); 127 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 128 | d.ack().await.unwrap(); 129 | assert!(now.elapsed() <= deadline); 130 | } 131 | 132 | /// Consumer should yield items immediately if there's a full batch ready on the 133 | /// first poll. 134 | #[tokio::test] 135 | async fn test_send_recv_all_full() { 136 | let payload1 = ExType { a: 1 }; 137 | let payload2 = ExType { a: 2 }; 138 | 139 | let (builder, _drop) = make_test_queue().await; 140 | 141 | let (p, mut c) = builder.build_pair().await.unwrap(); 142 | 143 | p.send_serde_json(&payload1).await.unwrap(); 144 | p.send_serde_json(&payload2).await.unwrap(); 145 | let deadline = Duration::from_secs(1); 146 | 147 | let now = Instant::now(); 148 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 149 | assert_eq!(xs.len(), 2); 150 | let d1 = xs.remove(0); 151 | assert_eq!( 152 | d1.payload_serde_json::().unwrap().unwrap(), 153 | payload1 154 | ); 155 | d1.ack().await.unwrap(); 156 | 157 | let d2 = xs.remove(0); 158 | assert_eq!( 159 | d2.payload_serde_json::().unwrap().unwrap(), 160 | payload2 161 | ); 162 | d2.ack().await.unwrap(); 163 | // N.b. it's still possible this could turn up false if the test runs too 164 | // slow. 165 | assert!(now.elapsed() < deadline); 166 | } 167 | 168 | /// Consumer will return the full batch immediately, but also return immediately 169 | /// if a partial batch is ready. 170 | #[tokio::test] 171 | async fn test_send_recv_all_full_then_partial() { 172 | let payload1 = ExType { a: 1 }; 173 | let payload2 = ExType { a: 2 }; 174 | let payload3 = ExType { a: 3 }; 175 | 176 | let (builder, _drop) = make_test_queue().await; 177 | 178 | let (p, mut c) = builder.build_pair().await.unwrap(); 179 | 180 | p.send_serde_json(&payload1).await.unwrap(); 181 | p.send_serde_json(&payload2).await.unwrap(); 182 | p.send_serde_json(&payload3).await.unwrap(); 183 | 184 | let deadline = Duration::from_secs(1); 185 | let now1 = Instant::now(); 186 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 187 | assert_eq!(xs.len(), 2); 188 | let d1 = xs.remove(0); 189 | assert_eq!( 190 | d1.payload_serde_json::().unwrap().unwrap(), 191 | payload1 192 | ); 193 | d1.ack().await.unwrap(); 194 | 195 | let d2 = xs.remove(0); 196 | assert_eq!( 197 | d2.payload_serde_json::().unwrap().unwrap(), 198 | payload2 199 | ); 200 | d2.ack().await.unwrap(); 201 | assert!(now1.elapsed() < deadline); 202 | 203 | // 2nd call 204 | let now2 = Instant::now(); 205 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 206 | assert_eq!(ys.len(), 1); 207 | let d3 = ys.remove(0); 208 | assert_eq!( 209 | d3.payload_serde_json::().unwrap().unwrap(), 210 | payload3 211 | ); 212 | d3.ack().await.unwrap(); 213 | assert!(now2.elapsed() < deadline); 214 | } 215 | 216 | /// Consumer will NOT wait indefinitely for at least one item. 217 | #[tokio::test] 218 | async fn test_send_recv_all_late_arriving_items() { 219 | let (builder, _drop) = make_test_queue().await; 220 | 221 | let (_p, mut c) = builder.build_pair().await.unwrap(); 222 | 223 | let deadline = Duration::from_secs(1); 224 | let now = Instant::now(); 225 | let xs = c.receive_all(2, deadline).await.unwrap(); 226 | let elapsed = now.elapsed(); 227 | 228 | assert_eq!(xs.len(), 0); 229 | // Elapsed should be around the deadline, ballpark 230 | assert!(elapsed >= deadline); 231 | assert!(elapsed <= deadline + Duration::from_millis(200)); 232 | } 233 | 234 | #[tokio::test] 235 | async fn test_scheduled() { 236 | let payload1 = ExType { a: 1 }; 237 | 238 | let (builder, _drop) = make_test_queue().await; 239 | let (p, mut c) = builder.build_pair().await.unwrap(); 240 | 241 | let delay = Duration::from_secs(3); 242 | let now = Instant::now(); 243 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap(); 244 | let delivery = c 245 | .receive_all(1, delay * 2) 246 | .await 247 | .unwrap() 248 | .into_iter() 249 | .next() 250 | .unwrap(); 251 | assert!(now.elapsed() >= delay); 252 | assert!(now.elapsed() < delay * 2); 253 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap()); 254 | } 255 | 256 | #[tokio::test] 257 | async fn test_pending() { 258 | let payload1 = ExType { a: 1 }; 259 | let payload2 = ExType { a: 2 }; 260 | let (builder, _drop) = make_test_queue().await; 261 | 262 | let (p, mut c) = builder.build_pair().await.unwrap(); 263 | 264 | p.send_serde_json(&payload1).await.unwrap(); 265 | p.send_serde_json(&payload2).await.unwrap(); 266 | let delivery1 = c.receive().await.unwrap(); 267 | let delivery2 = c.receive().await.unwrap(); 268 | 269 | // All items claimed, but not yet ack'd. There shouldn't be anything 270 | // available yet. 271 | assert!(c 272 | .receive_all(1, Duration::from_millis(1)) 273 | .await 274 | .unwrap() 275 | .is_empty()); 276 | 277 | assert_eq!( 278 | Some(&payload1), 279 | delivery1.payload_serde_json().unwrap().as_ref() 280 | ); 281 | assert_eq!( 282 | Some(&payload2), 283 | delivery2.payload_serde_json().unwrap().as_ref() 284 | ); 285 | 286 | // ack 2, but neglect 1 287 | let _ = delivery2.ack().await; 288 | 289 | // After the deadline, the first payload should appear again. 290 | let delivery3 = c.receive().await.unwrap(); 291 | assert_eq!( 292 | Some(&payload1), 293 | delivery3.payload_serde_json().unwrap().as_ref() 294 | ); 295 | 296 | // queue should be empty once again 297 | assert!(c 298 | .receive_all(1, Duration::from_millis(1)) 299 | .await 300 | .unwrap() 301 | .is_empty()); 302 | } 303 | 304 | #[tokio::test] 305 | async fn test_deadletter_config() { 306 | let payload = ExType { a: 1 }; 307 | let payload_str = serde_json::to_string(&payload).unwrap(); 308 | 309 | let stream_name: String = std::iter::repeat_with(fastrand::alphanumeric) 310 | .take(8) 311 | .collect(); 312 | 313 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 314 | .take(8) 315 | .collect(); 316 | 317 | let client = ClusterClient::new(vec![ROOT_URL]).unwrap(); 318 | let mut conn = client.get_async_connection().await.unwrap(); 319 | 320 | let _: () = conn 321 | .xgroup_create_mkstream(&stream_name, "test_cg", 0i8) 322 | .await 323 | .unwrap(); 324 | 325 | let max_receives = 5; 326 | 327 | let config = RedisConfig { 328 | dsn: ROOT_URL.to_owned(), 329 | max_connections: 8, 330 | reinsert_on_nack: false, 331 | queue_key: stream_name.clone(), 332 | delayed_queue_key: format!("{stream_name}::delayed"), 333 | delayed_lock_key: format!("{stream_name}::delayed_lock"), 334 | consumer_group: "test_cg".to_owned(), 335 | consumer_name: "test_cn".to_owned(), 336 | payload_key: "payload".to_owned(), 337 | ack_deadline_ms: 20, 338 | dlq_config: Some(DeadLetterQueueConfig { 339 | queue_key: dlq_key.to_owned(), 340 | max_receives, 341 | }), 342 | sentinel_config: None, 343 | }; 344 | 345 | let check_dlq = |asserted_len: usize| { 346 | let dlq_key = dlq_key.clone(); 347 | let client = client.clone(); 348 | async move { 349 | let mut conn = client.get_async_connection().await.unwrap(); 350 | let mut res: Vec = conn.lrange(&dlq_key, 0, 0).await.unwrap(); 351 | assert!(res.len() == asserted_len); 352 | res.pop() 353 | } 354 | }; 355 | 356 | let (builder, _drop) = ( 357 | RedisBackend::builder(config).cluster(), 358 | RedisStreamDrop(stream_name.clone()), 359 | ); 360 | 361 | let (p, mut c) = builder.build_pair().await.unwrap(); 362 | 363 | // Test send to DLQ via `ack_deadline_ms` expiration: 364 | p.send_serde_json(&payload).await.unwrap(); 365 | 366 | for _ in 0..max_receives { 367 | check_dlq(0).await; 368 | let delivery = c.receive().await.unwrap(); 369 | assert_eq!( 370 | Some(&payload), 371 | delivery.payload_serde_json().unwrap().as_ref() 372 | ); 373 | } 374 | 375 | // Give this some time because the reenqueuing can sleep for up to 500ms 376 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 377 | let delivery = c 378 | .receive_all(1, std::time::Duration::from_millis(1)) 379 | .await 380 | .unwrap(); 381 | assert!(delivery.is_empty()); 382 | 383 | // Expected message should be on DLQ: 384 | let res = check_dlq(1).await; 385 | assert_eq!(payload_str, res.unwrap()); 386 | 387 | // Test send to DLQ via explicit `nack`ing: 388 | let _: () = conn 389 | .xadd(&stream_name, "*", &[("payload", payload_str.as_bytes())]) 390 | .await 391 | .unwrap(); 392 | 393 | let assert_delivery = |delivery: &Delivery| { 394 | assert_eq!( 395 | Some(&payload), 396 | delivery.payload_serde_json().unwrap().as_ref() 397 | ); 398 | }; 399 | 400 | for _ in 0..max_receives { 401 | let delivery = c.receive().await.unwrap(); 402 | assert_delivery(&delivery); 403 | delivery.nack().await.unwrap(); 404 | } 405 | 406 | // Give this some time because the reenqueuing can sleep for up to 500ms 407 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 408 | let delivery = c 409 | .receive_all(1, std::time::Duration::from_millis(1)) 410 | .await 411 | .unwrap(); 412 | assert!(delivery.is_empty()); 413 | 414 | // Expected message should be on DLQ: 415 | let res = check_dlq(1).await; 416 | assert_eq!(payload_str, res.unwrap()); 417 | 418 | // Redrive DLQ, receive from main queue, ack: 419 | p.redrive_dlq().await.unwrap(); 420 | 421 | let delivery = c.receive().await.unwrap(); 422 | assert_delivery(&delivery); 423 | delivery.ack().await.unwrap(); 424 | 425 | check_dlq(0).await; 426 | } 427 | 428 | #[tokio::test] 429 | async fn test_deadletter_config_order() { 430 | let payload1 = ExType { a: 1 }; 431 | let payload2 = ExType { a: 2 }; 432 | let payload3 = ExType { a: 3 }; 433 | 434 | let stream_name: String = std::iter::repeat_with(fastrand::alphanumeric) 435 | .take(8) 436 | .collect(); 437 | 438 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 439 | .take(8) 440 | .collect(); 441 | 442 | let client = ClusterClient::new(vec![ROOT_URL]).unwrap(); 443 | let mut conn = client.get_async_connection().await.unwrap(); 444 | 445 | let _: () = conn 446 | .xgroup_create_mkstream(&stream_name, "test_cg", 0i8) 447 | .await 448 | .unwrap(); 449 | 450 | let max_receives = 1; 451 | 452 | let config = RedisConfig { 453 | dsn: ROOT_URL.to_owned(), 454 | max_connections: 8, 455 | reinsert_on_nack: false, 456 | queue_key: stream_name.clone(), 457 | delayed_queue_key: format!("{stream_name}::delayed"), 458 | delayed_lock_key: format!("{stream_name}::delayed_lock"), 459 | consumer_group: "test_cg".to_owned(), 460 | consumer_name: "test_cn".to_owned(), 461 | payload_key: "payload".to_owned(), 462 | ack_deadline_ms: 20, 463 | dlq_config: Some(DeadLetterQueueConfig { 464 | queue_key: dlq_key.to_owned(), 465 | max_receives, 466 | }), 467 | sentinel_config: None, 468 | }; 469 | 470 | let check_dlq = |asserted_len: usize| { 471 | let dlq_key = dlq_key.clone(); 472 | let client = client.clone(); 473 | async move { 474 | let mut conn = client.get_async_connection().await.unwrap(); 475 | let mut res: Vec = conn.lrange(&dlq_key, 0, -1).await.unwrap(); 476 | assert!(res.len() == asserted_len); 477 | res.pop() 478 | } 479 | }; 480 | 481 | let (builder, _drop) = ( 482 | RedisBackend::builder(config).cluster(), 483 | RedisStreamDrop(stream_name.clone()), 484 | ); 485 | 486 | let (p, mut c) = builder.build_pair().await.unwrap(); 487 | 488 | // Test send to DLQ via `ack_deadline_ms` expiration: 489 | p.send_serde_json(&payload1).await.unwrap(); 490 | p.send_serde_json(&payload2).await.unwrap(); 491 | p.send_serde_json(&payload3).await.unwrap(); 492 | 493 | for payload in [&payload1, &payload2, &payload3] { 494 | let delivery = c.receive().await.unwrap(); 495 | assert_eq!( 496 | Some(payload), 497 | delivery.payload_serde_json().unwrap().as_ref() 498 | ); 499 | delivery.nack().await.unwrap(); 500 | } 501 | 502 | // Give this some time because the reenqueuing can sleep for up to 500ms 503 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 504 | 505 | // Expected messages should be on DLQ: 506 | check_dlq(3).await; 507 | 508 | // Redrive DLQ, receive from main queue, ack: 509 | p.redrive_dlq().await.unwrap(); 510 | 511 | for payload in [&payload1, &payload2, &payload3] { 512 | let delivery = c.receive().await.unwrap(); 513 | assert_eq!( 514 | Some(payload), 515 | delivery.payload_serde_json().unwrap().as_ref() 516 | ); 517 | delivery.ack().await.unwrap(); 518 | } 519 | } 520 | // A message without a `num_receives` field shouldn't 521 | // cause issues: 522 | #[tokio::test] 523 | async fn test_backward_compatible() { 524 | let stream_name: String = std::iter::repeat_with(fastrand::alphanumeric) 525 | .take(8) 526 | .collect(); 527 | 528 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 529 | .take(8) 530 | .collect(); 531 | 532 | let client = ClusterClient::new(vec![ROOT_URL]).unwrap(); 533 | let mut conn = client.get_async_connection().await.unwrap(); 534 | 535 | let _: () = conn 536 | .xgroup_create_mkstream(&stream_name, "test_cg", 0i8) 537 | .await 538 | .unwrap(); 539 | 540 | let max_receives = 5; 541 | 542 | let config = RedisConfig { 543 | dsn: ROOT_URL.to_owned(), 544 | max_connections: 8, 545 | reinsert_on_nack: false, 546 | queue_key: stream_name.clone(), 547 | delayed_queue_key: format!("{stream_name}::delayed"), 548 | delayed_lock_key: format!("{stream_name}::delayed_lock"), 549 | consumer_group: "test_cg".to_owned(), 550 | consumer_name: "test_cn".to_owned(), 551 | payload_key: "payload".to_owned(), 552 | ack_deadline_ms: 20, 553 | dlq_config: Some(DeadLetterQueueConfig { 554 | queue_key: dlq_key.to_owned(), 555 | max_receives, 556 | }), 557 | sentinel_config: None, 558 | }; 559 | 560 | let (builder, _drop) = ( 561 | RedisBackend::builder(config).cluster(), 562 | RedisStreamDrop(stream_name.clone()), 563 | ); 564 | 565 | let (_p, mut c) = builder.build_pair().await.unwrap(); 566 | 567 | let org_payload = ExType { a: 1 }; 568 | let org_payload_str = serde_json::to_string(&org_payload).unwrap(); 569 | 570 | let _: () = conn 571 | .xadd( 572 | &stream_name, 573 | "*", 574 | // We don't have the `num_receives` field: 575 | &[("payload", org_payload_str.as_bytes())], 576 | ) 577 | .await 578 | .unwrap(); 579 | 580 | for _ in 0..max_receives { 581 | let delivery = c.receive().await.unwrap(); 582 | assert_eq!( 583 | Some(&org_payload), 584 | delivery.payload_serde_json().unwrap().as_ref() 585 | ); 586 | } 587 | 588 | // Give this some time because the reenqueuing can sleep for up to 500ms 589 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 590 | let delivery = c 591 | .receive_all(1, std::time::Duration::from_millis(1)) 592 | .await 593 | .unwrap(); 594 | assert!(delivery.is_empty()); 595 | } 596 | -------------------------------------------------------------------------------- /omniqueue/tests/it/redis_fallback.rs: -------------------------------------------------------------------------------- 1 | use core::str; 2 | use std::time::{Duration, Instant}; 3 | 4 | use omniqueue::{ 5 | backends::{ 6 | redis::{DeadLetterQueueConfig, RedisBackendBuilder}, 7 | RedisBackend, RedisConfig, 8 | }, 9 | Delivery, 10 | }; 11 | use redis::{AsyncCommands, Client, Commands}; 12 | use serde::{Deserialize, Serialize}; 13 | use svix_ksuid::KsuidLike; 14 | 15 | const ROOT_URL: &str = "redis://localhost"; 16 | 17 | pub struct RedisKeyDrop(String); 18 | impl Drop for RedisKeyDrop { 19 | fn drop(&mut self) { 20 | let client = Client::open(ROOT_URL).unwrap(); 21 | let mut conn = client.get_connection().unwrap(); 22 | let _: () = conn.del(&self.0).unwrap(); 23 | } 24 | } 25 | 26 | /// Returns a [`QueueBuilder`] configured to connect to the Redis instance 27 | /// spawned by the file `testing-docker-compose.yaml` in the root of the 28 | /// repository. 29 | /// 30 | /// Additionally this will make a temporary stream on that instance for the 31 | /// duration of the test such as to ensure there is no stealing 32 | /// 33 | /// This will also return a [`RedisKeyDrop`] to clean up the stream after the 34 | /// test ends. 35 | async fn make_test_queue() -> (RedisBackendBuilder, RedisKeyDrop) { 36 | let queue_key: String = std::iter::repeat_with(fastrand::alphanumeric) 37 | .take(8) 38 | .collect(); 39 | 40 | let config = RedisConfig { 41 | dsn: ROOT_URL.to_owned(), 42 | max_connections: 8, 43 | reinsert_on_nack: false, 44 | queue_key: queue_key.clone(), 45 | delayed_queue_key: format!("{queue_key}::delayed"), 46 | delayed_lock_key: format!("{queue_key}::delayed_lock"), 47 | consumer_group: "test_cg".to_owned(), 48 | consumer_name: "test_cn".to_owned(), 49 | payload_key: "payload".to_owned(), 50 | ack_deadline_ms: 5_000, 51 | dlq_config: None, 52 | sentinel_config: None, 53 | }; 54 | 55 | ( 56 | RedisBackend::builder(config).use_redis_streams(false), 57 | RedisKeyDrop(queue_key), 58 | ) 59 | } 60 | 61 | #[tokio::test] 62 | async fn test_raw_send_recv() { 63 | let (builder, _drop) = make_test_queue().await; 64 | let payload = b"{\"test\": \"data\"}"; 65 | let (p, mut c) = builder.build_pair().await.unwrap(); 66 | 67 | p.send_raw(payload).await.unwrap(); 68 | 69 | let d = c.receive().await.unwrap(); 70 | assert_eq!(d.borrow_payload().unwrap(), payload); 71 | } 72 | 73 | #[tokio::test] 74 | async fn test_bytes_send_recv() { 75 | use omniqueue::QueueProducer as _; 76 | 77 | let (builder, _drop) = make_test_queue().await; 78 | let payload = b"hello"; 79 | let (p, mut c) = builder.build_pair().await.unwrap(); 80 | 81 | p.send_bytes(payload).await.unwrap(); 82 | 83 | let d = c.receive().await.unwrap(); 84 | assert_eq!(d.borrow_payload().unwrap(), payload); 85 | d.ack().await.unwrap(); 86 | } 87 | 88 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 89 | pub struct ExType { 90 | a: u8, 91 | } 92 | 93 | #[tokio::test] 94 | async fn test_serde_send_recv() { 95 | let (builder, _drop) = make_test_queue().await; 96 | let payload = ExType { a: 2 }; 97 | let (p, mut c) = builder.build_pair().await.unwrap(); 98 | 99 | p.send_serde_json(&payload).await.unwrap(); 100 | 101 | let d = c.receive().await.unwrap(); 102 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 103 | d.ack().await.unwrap(); 104 | } 105 | 106 | // Fallback implementation currently implements receive_all such that it always 107 | // only returns the first item, uncomment when the implementation is changed. 108 | /* 109 | /// Consumer will return immediately if there are fewer than max messages to 110 | /// start with. 111 | #[tokio::test] 112 | async fn test_send_recv_all_partial() { 113 | let (builder, _drop) = make_test_queue().await; 114 | 115 | let payload = ExType { a: 2 }; 116 | let (p, mut c) = builder.build_pair().await.unwrap(); 117 | 118 | p.send_serde_json(&payload).await.unwrap(); 119 | let deadline = Duration::from_secs(1); 120 | 121 | let now = Instant::now(); 122 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 123 | assert_eq!(xs.len(), 1); 124 | let d = xs.remove(0); 125 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 126 | d.ack().await.unwrap(); 127 | assert!(now.elapsed() <= deadline); 128 | } 129 | 130 | /// Consumer should yield items immediately if there's a full batch ready on the 131 | /// first poll. 132 | #[tokio::test] 133 | async fn test_send_recv_all_full() { 134 | let payload1 = ExType { a: 1 }; 135 | let payload2 = ExType { a: 2 }; 136 | 137 | let (builder, _drop) = make_test_queue().await; 138 | 139 | let (p, mut c) = builder.build_pair().await.unwrap(); 140 | 141 | p.send_serde_json(&payload1).await.unwrap(); 142 | p.send_serde_json(&payload2).await.unwrap(); 143 | let deadline = Duration::from_secs(1); 144 | 145 | let now = Instant::now(); 146 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 147 | assert_eq!(xs.len(), 2); 148 | let d1 = xs.remove(0); 149 | assert_eq!( 150 | d1.payload_serde_json::().unwrap().unwrap(), 151 | payload1 152 | ); 153 | d1.ack().await.unwrap(); 154 | 155 | let d2 = xs.remove(0); 156 | assert_eq!( 157 | d2.payload_serde_json::().unwrap().unwrap(), 158 | payload2 159 | ); 160 | d2.ack().await.unwrap(); 161 | // N.b. it's still possible this could turn up false if the test runs too 162 | // slow. 163 | assert!(now.elapsed() < deadline); 164 | } 165 | 166 | /// Consumer will return the full batch immediately, but also return immediately 167 | /// if a partial batch is ready. 168 | #[tokio::test] 169 | async fn test_send_recv_all_full_then_partial() { 170 | let payload1 = ExType { a: 1 }; 171 | let payload2 = ExType { a: 2 }; 172 | let payload3 = ExType { a: 3 }; 173 | 174 | let (builder, _drop) = make_test_queue().await; 175 | 176 | let (p, mut c) = builder.build_pair().await.unwrap(); 177 | 178 | p.send_serde_json(&payload1).await.unwrap(); 179 | p.send_serde_json(&payload2).await.unwrap(); 180 | p.send_serde_json(&payload3).await.unwrap(); 181 | 182 | let deadline = Duration::from_secs(1); 183 | let now1 = Instant::now(); 184 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 185 | assert_eq!(xs.len(), 2); 186 | let d1 = xs.remove(0); 187 | assert_eq!( 188 | d1.payload_serde_json::().unwrap().unwrap(), 189 | payload1 190 | ); 191 | d1.ack().await.unwrap(); 192 | 193 | let d2 = xs.remove(0); 194 | assert_eq!( 195 | d2.payload_serde_json::().unwrap().unwrap(), 196 | payload2 197 | ); 198 | d2.ack().await.unwrap(); 199 | assert!(now1.elapsed() < deadline); 200 | 201 | // 2nd call 202 | let now2 = Instant::now(); 203 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 204 | assert_eq!(ys.len(), 1); 205 | let d3 = ys.remove(0); 206 | assert_eq!( 207 | d3.payload_serde_json::().unwrap().unwrap(), 208 | payload3 209 | ); 210 | d3.ack().await.unwrap(); 211 | assert!(now2.elapsed() < deadline); 212 | } 213 | 214 | /// Consumer will NOT wait indefinitely for at least one item. 215 | #[tokio::test] 216 | async fn test_send_recv_all_late_arriving_items() { 217 | let (builder, _drop) = make_test_queue().await; 218 | 219 | let (_p, mut c) = builder.build_pair().await.unwrap(); 220 | 221 | let deadline = Duration::from_secs(1); 222 | let now = Instant::now(); 223 | let xs = c.receive_all(2, deadline).await.unwrap(); 224 | let elapsed = now.elapsed(); 225 | 226 | assert_eq!(xs.len(), 0); 227 | // Elapsed should be around the deadline, ballpark 228 | assert!(elapsed >= deadline); 229 | assert!(elapsed <= deadline + Duration::from_millis(200)); 230 | } 231 | */ 232 | 233 | #[tokio::test] 234 | async fn test_scheduled() { 235 | let payload1 = ExType { a: 1 }; 236 | let (builder, _drop) = make_test_queue().await; 237 | 238 | let (p, mut c) = builder.build_pair().await.unwrap(); 239 | 240 | let delay = Duration::from_secs(3); 241 | let now = Instant::now(); 242 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap(); 243 | let delivery = c 244 | .receive_all(1, delay * 2) 245 | .await 246 | .unwrap() 247 | .into_iter() 248 | .next() 249 | .unwrap(); 250 | assert!(now.elapsed() >= delay); 251 | assert!(now.elapsed() < delay * 2); 252 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap()); 253 | } 254 | 255 | #[tokio::test] 256 | async fn test_pending() { 257 | let payload1 = ExType { a: 1 }; 258 | let payload2 = ExType { a: 2 }; 259 | let (builder, _drop) = make_test_queue().await; 260 | 261 | let (p, mut c) = builder.build_pair().await.unwrap(); 262 | 263 | p.send_serde_json(&payload1).await.unwrap(); 264 | p.send_serde_json(&payload2).await.unwrap(); 265 | let delivery1 = c.receive().await.unwrap(); 266 | let delivery2 = c.receive().await.unwrap(); 267 | 268 | // All items claimed, but not yet ack'd. There shouldn't be anything available 269 | // yet. 270 | assert!(c 271 | .receive_all(1, Duration::from_millis(1)) 272 | .await 273 | .unwrap() 274 | .is_empty()); 275 | 276 | assert_eq!( 277 | Some(&payload1), 278 | delivery1.payload_serde_json().unwrap().as_ref() 279 | ); 280 | assert_eq!( 281 | Some(&payload2), 282 | delivery2.payload_serde_json().unwrap().as_ref() 283 | ); 284 | 285 | // ack 2, but neglect 1 286 | let _ = delivery2.ack().await; 287 | 288 | // After the deadline, the first payload should appear again. 289 | let delivery3 = c.receive().await.unwrap(); 290 | assert_eq!( 291 | Some(&payload1), 292 | delivery3.payload_serde_json().unwrap().as_ref() 293 | ); 294 | 295 | // queue should be empty once again 296 | assert!(c 297 | .receive_all(1, Duration::from_millis(1)) 298 | .await 299 | .unwrap() 300 | .is_empty()); 301 | } 302 | 303 | #[tokio::test] 304 | async fn test_deadletter_config() { 305 | let payload = ExType { a: 1 }; 306 | 307 | let queue_key: String = std::iter::repeat_with(fastrand::alphanumeric) 308 | .take(8) 309 | .collect(); 310 | 311 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 312 | .take(8) 313 | .collect(); 314 | 315 | let max_receives = 5; 316 | 317 | let config = RedisConfig { 318 | dsn: ROOT_URL.to_owned(), 319 | max_connections: 8, 320 | reinsert_on_nack: false, 321 | queue_key: queue_key.clone(), 322 | delayed_queue_key: format!("{queue_key}::delayed"), 323 | delayed_lock_key: format!("{queue_key}::delayed_lock"), 324 | consumer_group: "test_cg".to_owned(), 325 | consumer_name: "test_cn".to_owned(), 326 | payload_key: "payload".to_owned(), 327 | ack_deadline_ms: 1, 328 | dlq_config: Some(DeadLetterQueueConfig { 329 | queue_key: dlq_key.to_owned(), 330 | max_receives, 331 | }), 332 | sentinel_config: None, 333 | }; 334 | 335 | let check_dlq = |asserted_len: usize| { 336 | let dlq_key = dlq_key.clone(); 337 | async move { 338 | let client = Client::open(ROOT_URL).unwrap(); 339 | let mut conn = client.get_multiplexed_async_connection().await.unwrap(); 340 | let mut res: Vec = conn.lrange(&dlq_key, 0, 0).await.unwrap(); 341 | assert!(res.len() == asserted_len); 342 | res.pop() 343 | } 344 | }; 345 | 346 | let (builder, _drop) = ( 347 | RedisBackend::builder(config).use_redis_streams(false), 348 | RedisKeyDrop(queue_key), 349 | ); 350 | 351 | let (p, mut c) = builder.build_pair().await.unwrap(); 352 | 353 | // Test send to DLQ via `ack_deadline_ms` expiration: 354 | p.send_serde_json(&payload).await.unwrap(); 355 | 356 | let assert_delivery = |delivery: &Delivery| { 357 | assert_eq!( 358 | Some(&payload), 359 | delivery.payload_serde_json().unwrap().as_ref() 360 | ); 361 | }; 362 | 363 | for _ in 0..max_receives { 364 | check_dlq(0).await; 365 | let delivery = c.receive().await.unwrap(); 366 | assert_delivery(&delivery); 367 | } 368 | 369 | // Give this some time because the reenqueuing can sleep for up to 500ms 370 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 371 | let delivery = c 372 | .receive_all(1, std::time::Duration::from_millis(1)) 373 | .await 374 | .unwrap(); 375 | assert!(delivery.is_empty()); 376 | 377 | // Expected message should be on DLQ: 378 | let res = check_dlq(1).await; 379 | assert_eq!(serde_json::to_string(&payload).unwrap(), res.unwrap()); 380 | 381 | // Redrive DLQ, receive from main queue, ack: 382 | p.redrive_dlq().await.unwrap(); 383 | 384 | let delivery = c.receive().await.unwrap(); 385 | assert_delivery(&delivery); 386 | delivery.ack().await.unwrap(); 387 | 388 | check_dlq(0).await; 389 | 390 | /* This portion of test is flaky due to https://github.com/svix/omniqueue-rs/issues/102 391 | 392 | // Test send to DLQ via explicit `nack`ing: 393 | p.send_serde_json(&payload).await.unwrap(); 394 | 395 | for _ in 0..max_receives { 396 | check_dlq(0).await; 397 | let delivery = c.receive().await.unwrap(); 398 | assert_delivery(&delivery); 399 | delivery.nack().await.unwrap(); 400 | } 401 | 402 | // Give this some time because the reenqueuing can sleep for up to 500ms 403 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 404 | let delivery = c 405 | .receive_all(1, std::time::Duration::from_millis(1)) 406 | .await 407 | .unwrap(); 408 | assert!(delivery.is_empty()); 409 | 410 | // Expected message should be on DLQ: 411 | let res = check_dlq(1).await; 412 | assert_eq!(serde_json::to_string(&payload).unwrap(), res.unwrap()); 413 | 414 | */ 415 | } 416 | 417 | #[tokio::test] 418 | async fn test_deadletter_config_order() { 419 | let payload1 = ExType { a: 1 }; 420 | let payload2 = ExType { a: 2 }; 421 | let payload3 = ExType { a: 3 }; 422 | 423 | let queue_key: String = std::iter::repeat_with(fastrand::alphanumeric) 424 | .take(8) 425 | .collect(); 426 | 427 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 428 | .take(8) 429 | .collect(); 430 | 431 | let max_receives = 1; 432 | 433 | let config = RedisConfig { 434 | dsn: ROOT_URL.to_owned(), 435 | max_connections: 8, 436 | reinsert_on_nack: false, 437 | queue_key: queue_key.clone(), 438 | delayed_queue_key: format!("{queue_key}::delayed"), 439 | delayed_lock_key: format!("{queue_key}::delayed_lock"), 440 | consumer_group: "test_cg".to_owned(), 441 | consumer_name: "test_cn".to_owned(), 442 | payload_key: "payload".to_owned(), 443 | ack_deadline_ms: 1, 444 | dlq_config: Some(DeadLetterQueueConfig { 445 | queue_key: dlq_key.to_owned(), 446 | max_receives, 447 | }), 448 | sentinel_config: None, 449 | }; 450 | 451 | let check_dlq = |asserted_len: usize| { 452 | let dlq_key = dlq_key.clone(); 453 | async move { 454 | let client = Client::open(ROOT_URL).unwrap(); 455 | let mut conn = client.get_multiplexed_async_connection().await.unwrap(); 456 | let mut res: Vec = conn.lrange(&dlq_key, 0, -1).await.unwrap(); 457 | assert!(res.len() == asserted_len); 458 | res.pop() 459 | } 460 | }; 461 | 462 | let (builder, _drop) = ( 463 | RedisBackend::builder(config).use_redis_streams(false), 464 | RedisKeyDrop(queue_key), 465 | ); 466 | 467 | let (p, mut c) = builder.build_pair().await.unwrap(); 468 | 469 | // Test send to DLQ via `ack_deadline_ms` expiration: 470 | p.send_serde_json(&payload1).await.unwrap(); 471 | p.send_serde_json(&payload2).await.unwrap(); 472 | p.send_serde_json(&payload3).await.unwrap(); 473 | 474 | for payload in [&payload1, &payload2, &payload3] { 475 | let delivery = c.receive().await.unwrap(); 476 | assert_eq!( 477 | Some(payload), 478 | delivery.payload_serde_json().unwrap().as_ref() 479 | ); 480 | delivery.nack().await.unwrap(); 481 | } 482 | 483 | // Give this some time because the reenqueuing can sleep for up to 500ms 484 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 485 | 486 | // Expected messages should be on DLQ: 487 | check_dlq(3).await; 488 | 489 | // Redrive DLQ, receive from main queue, ack: 490 | p.redrive_dlq().await.unwrap(); 491 | 492 | for payload in [&payload1, &payload2, &payload3] { 493 | let delivery = c.receive().await.unwrap(); 494 | assert_eq!( 495 | Some(payload), 496 | delivery.payload_serde_json().unwrap().as_ref() 497 | ); 498 | delivery.ack().await.unwrap(); 499 | } 500 | } 501 | 502 | // A message without a `num_receives` field shouldn't 503 | // cause issues: 504 | #[tokio::test] 505 | async fn test_backward_compatible() { 506 | let queue_key: String = std::iter::repeat_with(fastrand::alphanumeric) 507 | .take(8) 508 | .collect(); 509 | 510 | let dlq_key: String = std::iter::repeat_with(fastrand::alphanumeric) 511 | .take(8) 512 | .collect(); 513 | 514 | let max_receives = 5; 515 | 516 | let config = RedisConfig { 517 | dsn: ROOT_URL.to_owned(), 518 | max_connections: 8, 519 | reinsert_on_nack: false, 520 | queue_key: queue_key.clone(), 521 | delayed_queue_key: format!("{queue_key}::delayed"), 522 | delayed_lock_key: format!("{queue_key}::delayed_lock"), 523 | consumer_group: "test_cg".to_owned(), 524 | consumer_name: "test_cn".to_owned(), 525 | payload_key: "payload".to_owned(), 526 | ack_deadline_ms: 20, 527 | dlq_config: Some(DeadLetterQueueConfig { 528 | queue_key: dlq_key.to_owned(), 529 | max_receives, 530 | }), 531 | sentinel_config: None, 532 | }; 533 | 534 | let (builder, _drop) = ( 535 | RedisBackend::builder(config).use_redis_streams(false), 536 | RedisKeyDrop(queue_key.clone()), 537 | ); 538 | 539 | let (_p, mut c) = builder.build_pair().await.unwrap(); 540 | 541 | let org_payload = ExType { a: 1 }; 542 | 543 | // Old payload format: 544 | let id = svix_ksuid::Ksuid::new(None, None).to_base62(); 545 | let org_payload_str = serde_json::to_string(&org_payload).unwrap(); 546 | let mut payload = Vec::with_capacity(id.len() + org_payload_str.len() + 1); 547 | payload.extend(id.as_bytes()); 548 | payload.push(b'|'); 549 | payload.extend(org_payload_str.as_bytes()); 550 | 551 | let client = Client::open(ROOT_URL).unwrap(); 552 | let mut conn = client.get_multiplexed_async_connection().await.unwrap(); 553 | let _: () = conn.lpush(&queue_key, &payload).await.unwrap(); 554 | 555 | for _ in 0..max_receives { 556 | let delivery = c.receive().await.unwrap(); 557 | assert_eq!( 558 | Some(&org_payload), 559 | delivery.payload_serde_json().unwrap().as_ref() 560 | ); 561 | } 562 | 563 | // Give this some time because the reenqueuing can sleep for up to 500ms 564 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 565 | let delivery = c 566 | .receive_all(1, std::time::Duration::from_millis(1)) 567 | .await 568 | .unwrap(); 569 | assert!(delivery.is_empty()); 570 | } 571 | -------------------------------------------------------------------------------- /omniqueue/tests/it/sqs.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use aws_sdk_sqs::Client; 4 | use omniqueue::{ 5 | backends::{SqsBackend, SqsConfig}, 6 | QueueBuilder, 7 | }; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | const ROOT_URL: &str = "http://localhost:9324"; 11 | const DEFAULT_CFG: [(&str, &str); 3] = [ 12 | ("AWS_DEFAULT_REGION", "localhost"), 13 | ("AWS_ACCESS_KEY_ID", "x"), 14 | ("AWS_SECRET_ACCESS_KEY", "x"), 15 | ]; 16 | 17 | /// Returns a [`QueueBuilder`] configured to connect to the SQS instance spawned 18 | /// by the file `testing-docker-compose.yaml` in the root of the repository. 19 | /// 20 | /// Additionally this will make a temporary queue on that instance for the 21 | /// duration of the test such as to ensure there is no stealing.w 22 | async fn make_test_queue() -> QueueBuilder { 23 | for (var, val) in &DEFAULT_CFG { 24 | if std::env::var(var).is_err() { 25 | std::env::set_var(var, val); 26 | } 27 | } 28 | 29 | let config = aws_config::from_env().endpoint_url(ROOT_URL).load().await; 30 | let client = Client::new(&config); 31 | 32 | let queue_name: String = std::iter::repeat_with(fastrand::alphanumeric) 33 | .take(8) 34 | .collect(); 35 | client 36 | .create_queue() 37 | .queue_name(&queue_name) 38 | .send() 39 | .await 40 | .unwrap(); 41 | 42 | let config = SqsConfig { 43 | queue_dsn: format!("{ROOT_URL}/queue/{queue_name}"), 44 | override_endpoint: true, 45 | }; 46 | 47 | SqsBackend::builder(config) 48 | } 49 | 50 | #[tokio::test] 51 | async fn test_raw_send_recv() { 52 | let payload = "{\"test\": \"data\"}"; 53 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 54 | 55 | p.send_raw(payload).await.unwrap(); 56 | 57 | let d = c.receive().await.unwrap(); 58 | assert_eq!(d.borrow_payload().unwrap(), payload.as_bytes()); 59 | } 60 | 61 | #[tokio::test] 62 | async fn test_bytes_send_recv() { 63 | use omniqueue::QueueProducer as _; 64 | 65 | let payload = b"hello"; 66 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 67 | 68 | p.send_bytes(payload).await.unwrap(); 69 | 70 | let d = c.receive().await.unwrap(); 71 | assert_eq!(d.borrow_payload().unwrap(), payload); 72 | d.ack().await.unwrap(); 73 | } 74 | 75 | #[derive(Debug, Deserialize, Serialize, PartialEq)] 76 | pub struct ExType { 77 | a: u8, 78 | } 79 | 80 | #[tokio::test] 81 | async fn test_serde_send_recv() { 82 | let payload = ExType { a: 2 }; 83 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 84 | 85 | p.send_serde_json(&payload).await.unwrap(); 86 | 87 | let d = c.receive().await.unwrap(); 88 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 89 | d.ack().await.unwrap(); 90 | } 91 | 92 | /// Consumer will return immediately if there are fewer than max messages to 93 | /// start with. 94 | #[tokio::test] 95 | async fn test_send_recv_all_partial() { 96 | let payload = ExType { a: 2 }; 97 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 98 | 99 | p.send_serde_json(&payload).await.unwrap(); 100 | let deadline = Duration::from_secs(1); 101 | 102 | let now = Instant::now(); 103 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 104 | assert_eq!(xs.len(), 1); 105 | let d = xs.remove(0); 106 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload); 107 | d.ack().await.unwrap(); 108 | assert!(now.elapsed() <= deadline); 109 | } 110 | 111 | /// Consumer should yield items immediately if there's a full batch ready on the 112 | /// first poll. 113 | #[tokio::test] 114 | async fn test_send_recv_all_full() { 115 | let payload1 = ExType { a: 1 }; 116 | let payload2 = ExType { a: 2 }; 117 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 118 | 119 | p.send_serde_json(&payload1).await.unwrap(); 120 | p.send_serde_json(&payload2).await.unwrap(); 121 | let deadline = Duration::from_secs(1); 122 | 123 | let now = Instant::now(); 124 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 125 | assert_eq!(xs.len(), 2); 126 | let d1 = xs.remove(0); 127 | assert_eq!( 128 | d1.payload_serde_json::().unwrap().unwrap(), 129 | payload1 130 | ); 131 | d1.ack().await.unwrap(); 132 | 133 | let d2 = xs.remove(0); 134 | assert_eq!( 135 | d2.payload_serde_json::().unwrap().unwrap(), 136 | payload2 137 | ); 138 | d2.ack().await.unwrap(); 139 | // N.b. it's still possible this could turn up false if the test runs too 140 | // slow. 141 | assert!(now.elapsed() < deadline); 142 | } 143 | 144 | /// Consumer will return the full batch immediately, but also return immediately 145 | /// if a partial batch is ready. 146 | #[tokio::test] 147 | async fn test_send_recv_all_full_then_partial() { 148 | let payload1 = ExType { a: 1 }; 149 | let payload2 = ExType { a: 2 }; 150 | let payload3 = ExType { a: 3 }; 151 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 152 | 153 | p.send_serde_json(&payload1).await.unwrap(); 154 | p.send_serde_json(&payload2).await.unwrap(); 155 | p.send_serde_json(&payload3).await.unwrap(); 156 | 157 | let deadline = Duration::from_secs(1); 158 | let now1 = Instant::now(); 159 | let mut xs = c.receive_all(2, deadline).await.unwrap(); 160 | assert_eq!(xs.len(), 2); 161 | let d1 = xs.remove(0); 162 | assert_eq!( 163 | d1.payload_serde_json::().unwrap().unwrap(), 164 | payload1 165 | ); 166 | d1.ack().await.unwrap(); 167 | 168 | let d2 = xs.remove(0); 169 | assert_eq!( 170 | d2.payload_serde_json::().unwrap().unwrap(), 171 | payload2 172 | ); 173 | d2.ack().await.unwrap(); 174 | assert!(now1.elapsed() < deadline); 175 | 176 | // 2nd call 177 | let now2 = Instant::now(); 178 | let mut ys = c.receive_all(2, deadline).await.unwrap(); 179 | assert_eq!(ys.len(), 1); 180 | let d3 = ys.remove(0); 181 | assert_eq!( 182 | d3.payload_serde_json::().unwrap().unwrap(), 183 | payload3 184 | ); 185 | d3.ack().await.unwrap(); 186 | assert!(now2.elapsed() < deadline); 187 | } 188 | 189 | /// Consumer will NOT wait indefinitely for at least one item. 190 | #[tokio::test] 191 | async fn test_send_recv_all_late_arriving_items() { 192 | let (_p, c) = make_test_queue().await.build_pair().await.unwrap(); 193 | 194 | let deadline = Duration::from_secs(1); 195 | let now = Instant::now(); 196 | let xs = c.receive_all(2, deadline).await.unwrap(); 197 | let elapsed = now.elapsed(); 198 | 199 | assert_eq!(xs.len(), 0); 200 | // Elapsed should be around the deadline, ballpark 201 | assert!(elapsed >= deadline); 202 | assert!(elapsed <= deadline + Duration::from_millis(200)); 203 | } 204 | 205 | #[tokio::test] 206 | async fn test_scheduled() { 207 | let payload1 = ExType { a: 1 }; 208 | let (p, c) = make_test_queue().await.build_pair().await.unwrap(); 209 | 210 | let delay = Duration::from_secs(3); 211 | let now = Instant::now(); 212 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap(); 213 | let delivery = c 214 | .receive_all(1, delay * 2) 215 | .await 216 | .unwrap() 217 | .into_iter() 218 | .next() 219 | .unwrap(); 220 | assert!(now.elapsed() >= delay); 221 | assert!(now.elapsed() < delay * 2); 222 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap()); 223 | } 224 | -------------------------------------------------------------------------------- /testing-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | services: 3 | rabbitmq: 4 | image: docker.io/rabbitmq:3.11.11-management-alpine 5 | ports: 6 | - "5672:5672" 7 | - "15672:15672" 8 | environment: 9 | RABBITMQ_PLUGINS_DIR: "/opt/rabbitmq/plugins:/usr/lib/rabbitmq/plugins" 10 | volumes: 11 | - ./_rabbit/enabled_plugins:/etc/rabbitmq/enabled_plugins 12 | - ./_rabbit/plugins:/usr/lib/rabbitmq/plugins 13 | 14 | elasticmq: # Drop-in SQS replacement 15 | image: docker.io/softwaremill/elasticmq-native:1.5.7 16 | ports: 17 | - "9324:9324" 18 | - "9325:9325" 19 | 20 | redis: 21 | image: docker.io/redis:7 22 | ports: 23 | - "6379:6379" 24 | 25 | redis-cluster: 26 | image: docker.io/bitnami/redis-cluster:7.0 27 | environment: 28 | ALLOW_EMPTY_PASSWORD: "yes" 29 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 30 | REDIS_CLUSTER_CREATOR: "yes" 31 | REDIS_CLUSTER_REPLICAS: "1" 32 | ports: 33 | - "6380:6379" 34 | depends_on: 35 | - redis-cluster-node-0 36 | - redis-cluster-node-1 37 | - redis-cluster-node-2 38 | - redis-cluster-node-3 39 | - redis-cluster-node-4 40 | 41 | redis-cluster-node-0: 42 | image: docker.io/bitnami/redis-cluster:7.0 43 | environment: 44 | ALLOW_EMPTY_PASSWORD: "yes" 45 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 46 | ports: 47 | - "6381:6379" 48 | 49 | redis-cluster-node-1: 50 | image: docker.io/bitnami/redis-cluster:7.0 51 | environment: 52 | ALLOW_EMPTY_PASSWORD: "yes" 53 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 54 | ports: 55 | - "6382:6379" 56 | 57 | redis-cluster-node-2: 58 | image: docker.io/bitnami/redis-cluster:7.0 59 | environment: 60 | ALLOW_EMPTY_PASSWORD: "yes" 61 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 62 | ports: 63 | - "6383:6379" 64 | 65 | redis-cluster-node-3: 66 | image: docker.io/bitnami/redis-cluster:7.0 67 | environment: 68 | ALLOW_EMPTY_PASSWORD: "yes" 69 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 70 | ports: 71 | - "6384:6379" 72 | 73 | redis-cluster-node-4: 74 | image: docker.io/bitnami/redis-cluster:7.0 75 | environment: 76 | ALLOW_EMPTY_PASSWORD: "yes" 77 | REDIS_NODES: "redis-cluster redis-cluster-node-0 redis-cluster-node-1 redis-cluster-node-2 redis-cluster-node-3 redis-cluster-node-4" 78 | ports: 79 | - "6385:6379" 80 | 81 | redis-sentinel: 82 | image: docker.io/redis:7 83 | ports: 84 | - "26379:26379" 85 | command: > 86 | sh -c 'echo "bind 0.0.0.0" > /etc/sentinel.conf && 87 | echo "sentinel monitor master0 redis-master-0 6379 2" >> /etc/sentinel.conf && 88 | echo "sentinel resolve-hostnames yes" >> /etc/sentinel.conf && 89 | echo "sentinel down-after-milliseconds master0 10000" >> /etc/sentinel.conf && 90 | echo "sentinel failover-timeout master0 10000" >> /etc/sentinel.conf && 91 | echo "sentinel parallel-syncs master0 1" >> /etc/sentinel.conf && 92 | redis-sentinel /etc/sentinel.conf' 93 | 94 | redis-master-0: 95 | image: docker.io/redis:7 96 | ports: 97 | - "6387:6379" 98 | 99 | redis-replica-0: 100 | image: docker.io/redis:7 101 | ports: 102 | - "6388:6379" 103 | command: 104 | [ 105 | "redis-server", 106 | "--appendonly", 107 | "yes", 108 | "--replicaof", 109 | "redis-master-0", 110 | "6379", 111 | "--repl-diskless-load", 112 | "on-empty-db", 113 | "--protected-mode", 114 | "no" 115 | ] 116 | 117 | gcp-pubsub: 118 | image: gcr.io/google.com/cloudsdktool/google-cloud-cli:emulators 119 | ports: 120 | - "8085:8085" 121 | command: [ 122 | "gcloud", "beta", "emulators", "pubsub", "start", 123 | "--project", "local-project", 124 | "--host-port", "0.0.0.0:8085" 125 | ] 126 | 127 | azurite: 128 | image: mcr.microsoft.com/azure-storage/azurite 129 | ports: 130 | - "10001:10001" 131 | --------------------------------------------------------------------------------