├── .github
├── CODEOWNERS
└── workflows
│ ├── lint.yml
│ ├── release.yml
│ └── security.yml
├── .gitignore
├── .rustfmt.toml
├── CHANGELOG.md
├── Cargo.toml
├── LICENSE
├── README.md
├── _rabbit
├── enabled_plugins
└── plugins
│ └── rabbitmq_delayed_message_exchange-3.11.1.ez
├── assets
├── banner_dark.png
└── banner_light.png
├── deny.toml
├── omniqueue
├── Cargo.toml
├── src
│ ├── backends
│ │ ├── azure_queue_storage.rs
│ │ ├── gcp_pubsub.rs
│ │ ├── in_memory.rs
│ │ ├── mod.rs
│ │ ├── rabbitmq.rs
│ │ ├── redis
│ │ │ ├── cluster.rs
│ │ │ ├── fallback.rs
│ │ │ ├── mod.rs
│ │ │ ├── sentinel.rs
│ │ │ └── streams.rs
│ │ └── sqs.rs
│ ├── builder.rs
│ ├── lib.rs
│ ├── macros.rs
│ ├── queue
│ │ ├── acker.rs
│ │ ├── consumer.rs
│ │ ├── mod.rs
│ │ └── producer.rs
│ └── scheduled
│ │ └── mod.rs
└── tests
│ └── it
│ ├── azure_queue_storage.rs
│ ├── gcp_pubsub.rs
│ ├── main.rs
│ ├── rabbitmq.rs
│ ├── redis.rs
│ ├── redis_cluster.rs
│ ├── redis_fallback.rs
│ └── sqs.rs
└── testing-docker-compose.yml
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @svix/Engineering
2 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | env:
4 | CARGO_TERM_COLOR: always
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | pull_request:
11 |
12 | jobs:
13 | check-fmt:
14 | name: Check formatting
15 | runs-on: ubuntu-latest
16 | steps:
17 | - uses: actions/checkout@v4
18 |
19 | - uses: dtolnay/rust-toolchain@nightly
20 | with:
21 | components: rustfmt
22 |
23 | - name: rustfmt
24 | run: cargo fmt -- --check
25 |
26 | test-versions:
27 | name: Library Lint
28 | runs-on: ubuntu-latest
29 | strategy:
30 | matrix:
31 | rust: [stable, beta]
32 | steps:
33 | - uses: actions/checkout@master
34 |
35 | - uses: dtolnay/rust-toolchain@master
36 | with:
37 | toolchain: ${{ matrix.rust }}
38 | components: clippy
39 |
40 | - uses: taiki-e/install-action@cargo-hack
41 |
42 | - uses: Swatinem/rust-cache@v2
43 |
44 | - name: Clippy
45 | run: cargo hack --each-feature clippy --all-targets -- -D warnings
46 |
47 | - name: Start test dependencies
48 | run: docker compose -f "./testing-docker-compose.yml" up -d
49 |
50 | - name: Run tests
51 | env:
52 | AWS_DEFAULT_REGION: localhost
53 | AWS_ACCESS_KEY_ID: x
54 | AWS_SECRET_ACCESS_KEY: x
55 | run: cargo test --all-features
56 |
57 | - name: Stop test dependencies
58 | run: docker compose -f "./testing-docker-compose.yml" down
59 |
60 | typos:
61 | name: Check for typos
62 | runs-on: ubuntu-latest
63 |
64 | steps:
65 | - uses: actions/checkout@v4
66 | - uses: crate-ci/typos@v1.25.0
67 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | env:
4 | CARGO_TERM_COLOR: always
5 |
6 | on:
7 | push:
8 | tags:
9 | - 'v*'
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 | environment: Release
15 | steps:
16 | - uses: actions/checkout@v4
17 |
18 | - uses: dtolnay/rust-toolchain@stable
19 | with:
20 | toolchain: ${{ matrix.rust }}
21 |
22 | - uses: Swatinem/rust-cache@v2
23 | # only restore cache for faster publishing, don't save back results
24 | save-if: false
25 |
26 | - name: Publish
27 | env:
28 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
29 | run: cargo publish --package omniqueue
30 |
--------------------------------------------------------------------------------
/.github/workflows/security.yml:
--------------------------------------------------------------------------------
1 | name: Rust Lib Security
2 |
3 | env:
4 | CARGO_TERM_COLOR: always
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | paths:
11 | - '**/Cargo.toml'
12 | - 'deny.toml'
13 | - '.github/workflows/security.yml'
14 | pull_request:
15 | paths:
16 | - '**/Cargo.toml'
17 | - 'deny.toml'
18 | - '.github/workflows/security.yml'
19 |
20 | jobs:
21 | security_audit:
22 | runs-on: ubuntu-latest
23 | steps:
24 | - uses: actions/checkout@v4
25 | - uses: EmbarkStudios/cargo-deny-action@v2
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Cargo.lock
2 | target
3 | .vscode
4 |
--------------------------------------------------------------------------------
/.rustfmt.toml:
--------------------------------------------------------------------------------
1 | wrap_comments = true
2 | imports_granularity = "Crate"
3 | group_imports = "StdExternalCrate"
4 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # unreleased
2 |
3 | ## Breaking changes
4 |
5 | - Remove `QueueError::Unsupported`
6 | - This variant was never constructed inside `omniqueue`
7 | - Rename `aws_config` to `sqs_config` and use `aws_sdk_sqs::Config`
8 |
9 | ## Additions
10 |
11 | - Add `QueueError::PayloadTooLarge`
12 |
13 | # 0.2.0
14 |
15 | This release is a big one, and we are considering omniqueue out of early development now.
16 | You can expect the API to change much less in the coming releases compared to this one.
17 |
18 | ## Breaking changes
19 |
20 | - **redis: Some implementation changes mean that this backend is runtime-incompatible with the same backend in omniqueue 0.1**
21 | - Revise the public module structure to shorten import paths and make the docs easier to navigate
22 | - Revise the public API to require fewer trait imports for common usage
23 | - Rename a few types and traits
24 | - Most notably, `MemoryQueueBackend` is now named `InMemoryBackend`
25 | - Everything else should be easily found by searching for the old names
26 | - Remove custom encoders / decoders
27 | - Custom encoding can be handled more efficiently by wrapping omniqueue's
28 | `raw` send / receive interfaces into a custom higher-level interface
29 | - Update and prune dependency tree
30 | - Switch omniqueue's public traits from `async_trait` to [native async-in-traits][]
31 | - Simplify generic bounds (only matters if you were using omniqueue in generic code)
32 |
33 | ## Additions
34 |
35 | - Add a backend for Google Cloud's Pub/Sub queue (`gcp_pubsub` feature / module)
36 | - Add some documentation
37 | - Introduce an `omniqueue::Result` type alias
38 |
39 | [native async-in-traits]: https://blog.rust-lang.org/2023/12/21/async-fn-rpit-in-traits.html
40 |
41 | # 0.1.0
42 |
43 | Initial release.
44 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | resolver = "2"
3 |
4 | members = [
5 | "omniqueue",
6 | ]
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Svix
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | # Omniqueue
8 |
9 | Omniqueue is an abstraction layer over queue backends for Rust. It includes support for RabbitMQ,
10 | Redis streams, and SQS.
11 |
12 | Omniqueue provides a high level interface which allows sending and receiving raw byte arrays, any
13 | `serde` `Deserialize` and `Serialize` implementors via JSON encoded byte arrays, or any arbitrary
14 | types for which you have provided an encoding and/or decoding function.
15 |
16 | It is designed to be flexible and to be able to adapt to fit your existing queue configurations, but
17 | with a set of defaults that makes it simple to start sending and receiving quickly.
18 |
19 | ## How to use Omniqueue
20 |
21 | While the exact configuration will depend on the backend used, usage is roughly as follows.
22 |
23 | 1. Add `omniqueue` to your `Cargo.toml`. All backends are enabled by default including RabbitMQ,
24 | Redis (via their stream type), SQS, and an in-memory queue based off of `tokio`'s mpsc
25 | channel which is perfect for testing.
26 |
27 | If you only need some backends, then simply disable the default features, and enable any backends
28 | that you require.
29 |
30 | 2. Construct and use your queue.
31 |
32 | The exact configuration type used will depend on your backend, but it's as simple as:
33 |
34 | ```rust
35 | let cfg = SqsConfig {
36 | queue_dsn: "http://localhost:9324/queue/queue_name".to_owned(),
37 | override_endpoint: true,
38 | };
39 | let (producer, mut consumer) = SqsBackend::builder(cfg).build_pair().await?;
40 |
41 | producer.send_serde_json(&ExampleType::default()).await?;
42 |
43 | let delivery = c.receive().await?;
44 | assert_eq!(
45 | delivery.payload_serde_json::().await?,
46 | Some(ExampleType::default())
47 | );
48 |
49 | delivery.ack().await?;
50 | ```
51 |
52 | The producer and consumers returned implement the `QueueProducer` and `QueueConsumer` traits
53 | respectively. This means you can make functions generic over any queue backend. Alternatively, if
54 | you need dynamic dispatch, it's as simple as one extra line ih the builder:
55 |
56 | ```rust
57 | let cfg = SqsConfig {
58 | queue_dsn: "http://localhost:9324/queue/queue_name".to_owned(),
59 | override_endpoint: true,
60 | };
61 | let (producer, mut consumer) = SqsBackend::builder(cfg)
62 | .make_dynamic()
63 | .build_pair()
64 | .await?;
65 | ```
66 |
67 | ## Encoders and Decoders
68 |
69 | Part of the design of this crate was a clear separation of responsibility. The users of the queue
70 | generically should never have to concern themselves with how any given item is represented within
71 | the queue backend. Instead, they should be allowed to think only in Rust types.
72 |
73 | On the other hand, the users who define which backend to use should be the only ones concerned with
74 | getting the queue's internal representations to the Rust types.
75 |
76 | Enter `CustomEncoder`s and `CustomDecoder`s: these are a simple as closures or function pointers
77 | that convert from regular Rust types to the type expected by the queue backend's input or output.
78 |
79 | They are defined and used as follows:
80 |
81 | ```rust
82 | #[derive(Debug, PartialEq)]
83 | struct ExampleType {
84 | field: u8,
85 | }
86 |
87 |
88 | let (p, mut c) = RabbitMqBackend::builder(cfg)
89 | // RabbitMQ's internal representation is an arbitrary byte array.
90 | .with_encoder(|et: &ExampleType| -> omniqueue::Result> {
91 | Ok(vec![et.field])
92 | })
93 | .with_decoder(|v: &Vec| -> omniqueue::Result {
94 | Ok(ExampleType {
95 | field: *v.first().unwrap_or(&0),
96 | })
97 | })
98 | .build_pair()
99 | .await?;
100 |
101 | let payload = ExampleType { field: 2 };
102 |
103 | p.send_custom(&payload).await?;
104 |
105 | let delivery = c.receive().await?;
106 | assert_eq!(d.payload_custom::()?, Some(payload))
107 |
108 | delivery.ack().await?;
109 | ```
110 |
111 | These functions are called automatically assuming you have an encoder and/or decoder for the right
112 | type. This makes adapting the crate to an existing queue whose internal data layout doesn't match
113 | the defaults to a T as simple as possible.
114 |
--------------------------------------------------------------------------------
/_rabbit/enabled_plugins:
--------------------------------------------------------------------------------
1 | [rabbitmq_management, rabbitmq_delayed_message_exchange].
--------------------------------------------------------------------------------
/_rabbit/plugins/rabbitmq_delayed_message_exchange-3.11.1.ez:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/_rabbit/plugins/rabbitmq_delayed_message_exchange-3.11.1.ez
--------------------------------------------------------------------------------
/assets/banner_dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/assets/banner_dark.png
--------------------------------------------------------------------------------
/assets/banner_light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/svix/omniqueue-rs/6e879b00701e4162491b59db649bde81f5935660/assets/banner_light.png
--------------------------------------------------------------------------------
/deny.toml:
--------------------------------------------------------------------------------
1 | [graph]
2 | targets = [
3 | { triple = "x86_64-pc-windows-gnu" },
4 | { triple = "x86_64-unknown-linux-musl" },
5 | { triple = "x86_64-apple-darwin" },
6 | { triple = "aarch64-apple-darwin" },
7 | ]
8 |
9 | [advisories]
10 | db-path = "~/.cargo/advisory-db"
11 | db-urls = ["https://github.com/rustsec/advisory-db"]
12 | yanked = "deny"
13 | ignore = [
14 | # TODO: Wait for dependencies to upgrade off of paste
15 | "RUSTSEC-2024-0436",
16 | ]
17 |
18 | [licenses]
19 | allow = [
20 | "Apache-2.0",
21 | "BSD-2-Clause",
22 | "BSD-3-Clause",
23 | "CDLA-Permissive-2.0",
24 | "ISC",
25 | "MIT",
26 | "MPL-2.0",
27 | "Unicode-3.0",
28 | ]
29 | confidence-threshold = 0.8
30 | exceptions = [
31 | #{ allow = ["Zlib"], name = "adler32", version = "*" },
32 | ]
33 |
34 | [[licenses.clarify]]
35 | name = "ring"
36 | version = "*"
37 | expression = "MIT AND ISC AND OpenSSL"
38 | license-files = [
39 | { path = "LICENSE", hash = 0xbd0eed23 }
40 | ]
41 |
42 | [[licenses.clarify]]
43 | name = "encoding_rs"
44 | version = "0.8.30"
45 | expression = "MIT OR Apache-2.0"
46 | license-files = [
47 | { path = "COPYRIGHT", hash = 0x39f8ad31 }
48 | ]
49 |
50 | [licenses.private]
51 | ignore = false
52 | registries = []
53 |
54 | [bans]
55 | multiple-versions = "warn"
56 | wildcards = "allow"
57 | highlight = "all"
58 | allow = []
59 | deny = []
60 | skip = []
61 | skip-tree = []
62 |
63 | [sources]
64 | unknown-registry = "warn"
65 | unknown-git = "warn"
66 | allow-registry = ["https://github.com/rust-lang/crates.io-index"]
67 | allow-git = []
68 |
69 |
--------------------------------------------------------------------------------
/omniqueue/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "omniqueue"
3 | version = "0.2.1"
4 | license = "MIT"
5 | description = "An abstraction layer over various queue backends"
6 | authors = ["Svix Inc. "]
7 | repository = "https://github.com/svix/omniqueue-rs/"
8 | readme = "../README.md"
9 | rust-version = "1.79"
10 | edition = "2021"
11 |
12 | [dependencies]
13 | aws-config = { version = "1.1.5", default-features = false, features = ["behavior-version-latest"], optional = true }
14 | aws-sdk-sqs = { version = "1.13.0", optional = true }
15 | azure_storage = { version = "0.21.0", optional = true }
16 | azure_storage_queues = { version = "0.21.0", optional = true }
17 | bb8 = { version = "0.9.0", optional = true }
18 | bb8-redis = { version = "0.23.0", optional = true }
19 | bytesize = "2.0.1"
20 | futures-util = { version = "0.3.28", default-features = false, features = ["async-await", "std"], optional = true }
21 | gcloud-googleapis = { version = "1.2.0", optional = true }
22 | gcloud-pubsub = { version = "1.3.0", optional = true }
23 | lapin = { version = "2", optional = true }
24 | redis = { version = "0.31.0", features = ["tokio-comp", "tokio-native-tls-comp", "streams"], optional = true }
25 | serde = "1.0.196"
26 | serde_json = "1"
27 | svix-ksuid = { version = "0.8.0", optional = true }
28 | sync_wrapper = "1.0.1"
29 | thiserror = "2.0"
30 | time = "0.3.34"
31 | tokio = { version = "1", features = ["rt", "sync", "time"] }
32 | tracing = "0.1"
33 |
34 | [dev-dependencies]
35 | anyhow = "1.0.79"
36 | fastrand = "2.0.1"
37 | rstest = "0.25.0"
38 | serde = { version = "1.0.196", features = ["derive"] }
39 | tokio = { version = "1", features = ["macros"] }
40 | tokio-executor-trait = "2.1"
41 | tokio-reactor-trait = "1.1.0"
42 |
43 | [features]
44 | default = ["in_memory", "gcp_pubsub", "rabbitmq", "redis", "redis_cluster", "sqs"]
45 | in_memory = []
46 | gcp_pubsub = ["dep:futures-util", "dep:gcloud-googleapis", "dep:gcloud-pubsub"]
47 | rabbitmq = ["dep:futures-util", "dep:lapin"]
48 | # Generate message IDs for queue items. Likely not needed outside of Svix.
49 | rabbitmq-with-message-ids = ["rabbitmq", "dep:svix-ksuid"]
50 | redis = ["dep:bb8", "dep:bb8-redis", "dep:redis", "dep:svix-ksuid"]
51 | redis_cluster = ["redis", "redis/cluster-async"]
52 | redis_sentinel = ["redis", "redis/sentinel"]
53 | sqs = ["dep:aws-config", "dep:aws-sdk-sqs", "dep:futures-util"]
54 | azure_queue_storage = ["dep:azure_storage", "dep:azure_storage_queues"]
55 | beta = []
56 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/azure_queue_storage.rs:
--------------------------------------------------------------------------------
1 | use std::{num::NonZeroUsize, time::Duration};
2 |
3 | use azure_storage::StorageCredentials;
4 | use azure_storage_queues::{
5 | operations::Message, PopReceipt, QueueClient, QueueServiceClientBuilder,
6 | };
7 | use serde::Serialize;
8 |
9 | #[allow(deprecated)]
10 | use crate::{
11 | builder::Static, queue::Acker, Delivery, QueueBackend, QueueBuilder, QueueError, Result,
12 | };
13 |
14 | fn get_client(cfg: &AqsConfig) -> QueueClient {
15 | let AqsConfig {
16 | queue_name,
17 | storage_account,
18 | credentials,
19 | cloud_uri,
20 | ..
21 | } = cfg;
22 | let mut builder = QueueServiceClientBuilder::new(storage_account, credentials.clone());
23 | if let Some(cloud_uri) = cloud_uri {
24 | builder = builder.cloud_location(azure_storage::CloudLocation::Custom {
25 | account: cfg.storage_account.clone(),
26 | uri: cloud_uri.clone(),
27 | });
28 | }
29 | builder.build().queue_client(queue_name)
30 | }
31 |
32 | /// Note that blocking receives are not supported by Azure Queue Storage and
33 | /// that message order is not guaranteed.
34 | #[non_exhaustive]
35 | pub struct AqsBackend;
36 |
37 | impl AqsBackend {
38 | /// Creates a new Azure Queue Storage builder with the given
39 | /// configuration.
40 | pub fn builder(cfg: impl Into) -> QueueBuilder {
41 | #[allow(deprecated)]
42 | QueueBuilder::new(cfg.into())
43 | }
44 | }
45 |
46 | const DEFAULT_RECV_TIMEOUT: Duration = Duration::from_secs(180);
47 | const DEFAULT_EMPTY_RECV_DELAY: Duration = Duration::from_millis(200);
48 |
49 | #[derive(Clone)]
50 | pub struct AqsConfig {
51 | pub queue_name: String,
52 | pub empty_receive_delay: Option,
53 | pub message_ttl: Duration,
54 | pub storage_account: String,
55 | pub credentials: StorageCredentials,
56 | pub cloud_uri: Option,
57 | pub receive_timeout: Option,
58 | }
59 |
60 | #[allow(deprecated)]
61 | impl QueueBackend for AqsBackend {
62 | type Config = AqsConfig;
63 |
64 | type PayloadIn = String;
65 | type PayloadOut = String;
66 |
67 | type Producer = AqsProducer;
68 | type Consumer = AqsConsumer;
69 |
70 | async fn new_pair(config: Self::Config) -> Result<(AqsProducer, AqsConsumer)> {
71 | let client = get_client(&config);
72 | Ok((
73 | AqsProducer {
74 | client: client.clone(),
75 | config: config.clone(),
76 | },
77 | AqsConsumer {
78 | client: client.clone(),
79 | config: config.clone(),
80 | },
81 | ))
82 | }
83 |
84 | async fn producing_half(config: Self::Config) -> Result {
85 | let client = get_client(&config);
86 | Ok(AqsProducer { client, config })
87 | }
88 |
89 | async fn consuming_half(config: Self::Config) -> Result {
90 | let client = get_client(&config);
91 | Ok(AqsConsumer { client, config })
92 | }
93 | }
94 |
95 | pub struct AqsProducer {
96 | client: QueueClient,
97 | config: AqsConfig,
98 | }
99 |
100 | impl AqsProducer {
101 | pub async fn send_raw(&self, payload: &str) -> Result<()> {
102 | self.send_raw_scheduled(payload, Duration::ZERO).await
103 | }
104 |
105 | #[tracing::instrument(
106 | name = "send",
107 | skip_all,
108 | fields(
109 | payload_size = payload.len(),
110 | delay = (delay > Duration::ZERO).then(|| tracing::field::debug(delay))
111 | )
112 | )]
113 | pub async fn send_raw_scheduled(&self, payload: &str, delay: Duration) -> Result<()> {
114 | self.client
115 | .put_message(payload)
116 | .visibility_timeout(delay)
117 | .ttl(self.config.message_ttl)
118 | .await
119 | .map_err(QueueError::generic)
120 | .map(|_| ())
121 | }
122 |
123 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> {
124 | let payload = serde_json::to_string(payload)?;
125 | self.send_raw(&payload).await
126 | }
127 |
128 | pub async fn send_serde_json_scheduled(
129 | &self,
130 | payload: &P,
131 | delay: Duration,
132 | ) -> Result<()> {
133 | let payload = serde_json::to_string(payload)?;
134 | self.send_raw_scheduled(&payload, delay).await
135 | }
136 |
137 | pub async fn redrive_dlq(&self) -> Result<()> {
138 | Err(QueueError::Unsupported(
139 | "redrive_dlq is not supported by AqsBackend",
140 | ))
141 | }
142 | }
143 |
144 | impl crate::QueueProducer for AqsProducer {
145 | type Payload = String;
146 | omni_delegate!(send_raw, send_serde_json, redrive_dlq);
147 | }
148 | impl crate::ScheduledQueueProducer for AqsProducer {
149 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled);
150 | }
151 |
152 | /// Note that blocking receives are not supported by Azure Queue Storage and
153 | /// that message order is not guaranteed.
154 | pub struct AqsConsumer {
155 | client: QueueClient,
156 | config: AqsConfig,
157 | }
158 |
159 | struct AqsAcker {
160 | client: QueueClient,
161 | already_acked_or_nacked: bool,
162 | pop_receipt: PopReceipt,
163 | }
164 |
165 | impl Acker for AqsAcker {
166 | async fn ack(&mut self) -> Result<()> {
167 | if self.already_acked_or_nacked {
168 | return Err(QueueError::CannotAckOrNackTwice);
169 | }
170 |
171 | self.client
172 | .pop_receipt_client(self.pop_receipt.clone())
173 | .delete()
174 | .await
175 | .map_err(QueueError::generic)?;
176 | self.already_acked_or_nacked = true;
177 | Ok(())
178 | }
179 |
180 | async fn nack(&mut self) -> Result<()> {
181 | Ok(())
182 | }
183 |
184 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> {
185 | Err(QueueError::Unsupported(
186 | "set_ack_deadline is not yet supported by AqsBackend",
187 | ))
188 | }
189 | }
190 |
191 | impl AqsConsumer {
192 | fn wrap_message(&self, message: &Message) -> Delivery {
193 | Delivery::new(
194 | message.message_text.as_bytes().to_owned(),
195 | AqsAcker {
196 | client: self.client.clone(),
197 | pop_receipt: message.pop_receipt(),
198 | already_acked_or_nacked: false,
199 | },
200 | )
201 | }
202 |
203 | /// Note that blocking receives are not supported by Azure Queue Storage.
204 | /// Calls to this method will return immediately if no messages are
205 | /// available for delivery in the queue.
206 | pub async fn receive(&mut self) -> Result {
207 | self.client
208 | .get_messages()
209 | .visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT))
210 | .await
211 | .map_err(QueueError::generic)
212 | .and_then(|m| m.messages.into_iter().next().ok_or(QueueError::NoData))
213 | .map(|m| self.wrap_message(&m))
214 | }
215 |
216 | pub async fn receive_all(
217 | &mut self,
218 | max_messages: usize,
219 | deadline: Duration,
220 | ) -> Result> {
221 | let end = std::time::Instant::now() + deadline;
222 | let mut interval = tokio::time::interval(
223 | self.config
224 | .empty_receive_delay
225 | .unwrap_or(DEFAULT_EMPTY_RECV_DELAY),
226 | );
227 | loop {
228 | interval.tick().await;
229 | let msgs = self
230 | .client
231 | .get_messages()
232 | .number_of_messages(max_messages.try_into().unwrap_or(u8::MAX))
233 | .visibility_timeout(self.config.receive_timeout.unwrap_or(DEFAULT_RECV_TIMEOUT))
234 | .await
235 | .map_err(QueueError::generic)
236 | .map(|m| {
237 | m.messages
238 | .iter()
239 | .map(|m| self.wrap_message(m))
240 | .collect::>()
241 | })?;
242 | if !msgs.is_empty() {
243 | return Ok(msgs);
244 | }
245 | if std::time::Instant::now() > end {
246 | return Ok(vec![]);
247 | }
248 | }
249 | }
250 | }
251 |
252 | impl crate::QueueConsumer for AqsConsumer {
253 | type Payload = String;
254 | omni_delegate!(receive, receive_all);
255 |
256 | fn max_messages(&self) -> Option {
257 | // https://learn.microsoft.com/en-us/rest/api/storageservices/get-messages#uri-parameters
258 | NonZeroUsize::new(32)
259 | }
260 | }
261 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/gcp_pubsub.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | path::{Path, PathBuf},
3 | sync::Arc,
4 | time::Duration,
5 | };
6 |
7 | use futures_util::{future::try_join_all, StreamExt};
8 | use gcloud_googleapis::pubsub::v1::PubsubMessage;
9 | use gcloud_pubsub::{
10 | client::{google_cloud_auth::credentials::CredentialsFile, Client, ClientConfig},
11 | publisher::Publisher,
12 | subscriber::ReceivedMessage,
13 | subscription::Subscription,
14 | };
15 | use serde::Serialize;
16 |
17 | #[allow(deprecated)]
18 | use crate::{
19 | builder::{QueueBuilder, Static},
20 | queue::{Acker, Delivery, QueueBackend},
21 | QueueError, Result,
22 | };
23 |
24 | pub struct GcpPubSubBackend;
25 |
26 | impl GcpPubSubBackend {
27 | /// Creates a new Google Cloud Pub/Sub queue builder with the given
28 | /// configuration.
29 | pub fn builder(config: GcpPubSubConfig) -> QueueBuilder {
30 | #[allow(deprecated)]
31 | QueueBuilder::new(config)
32 | }
33 | }
34 |
35 | type Payload = Vec;
36 |
37 | // FIXME: topic/subscription are each for read/write. Split config up?
38 | #[derive(Clone, Debug, Eq, PartialEq)]
39 | pub struct GcpPubSubConfig {
40 | pub topic_id: String,
41 | pub subscription_id: String,
42 | pub credentials_file: Option,
43 | }
44 |
45 | /// Make a `ClientConfig` from a `CredentialsFile` on disk.
46 | async fn configure_client_from_file>(cred_file_path: P) -> Result {
47 | let bytes = std::fs::read(cred_file_path).map_err(QueueError::generic)?;
48 | let creds: CredentialsFile = serde_json::from_slice(&bytes).map_err(QueueError::generic)?;
49 | ClientConfig::default()
50 | .with_credentials(creds)
51 | .await
52 | .map_err(QueueError::generic)
53 | }
54 |
55 | /// Making a `ClientConfig` via env vars is possible in two ways:
56 | /// - setting `GOOGLE_APPLICATION_CREDENTIALS` to the file path to have it
57 | /// loaded automatically
58 | /// - setting `GOOGLE_APPLICATION_CREDENTIALS_JSON` to the file contents
59 | /// (avoiding the need for a file on disk).
60 | async fn configure_client_from_env() -> Result {
61 | ClientConfig::default()
62 | .with_auth()
63 | .await
64 | .map_err(QueueError::generic)
65 | }
66 |
67 | async fn get_client(cfg: &GcpPubSubConfig) -> Result {
68 | let config = {
69 | if let Some(fp) = &cfg.credentials_file {
70 | tracing::trace!("reading gcp creds from file: {}", fp.display());
71 | configure_client_from_file(&fp).await?
72 | } else {
73 | tracing::trace!("reading gcp creds from env");
74 | configure_client_from_env().await?
75 | }
76 | };
77 | Client::new(config).await.map_err(QueueError::generic)
78 | }
79 |
80 | #[allow(deprecated)]
81 | impl QueueBackend for GcpPubSubBackend {
82 | type Config = GcpPubSubConfig;
83 |
84 | type PayloadIn = Payload;
85 | type PayloadOut = Payload;
86 |
87 | type Producer = GcpPubSubProducer;
88 | type Consumer = GcpPubSubConsumer;
89 |
90 | async fn new_pair(config: Self::Config) -> Result<(GcpPubSubProducer, GcpPubSubConsumer)> {
91 | let client = get_client(&config).await?;
92 | Ok((
93 | GcpPubSubProducer::new(client.clone(), config.topic_id).await?,
94 | GcpPubSubConsumer::new(client, config.subscription_id).await?,
95 | ))
96 | }
97 |
98 | async fn producing_half(config: Self::Config) -> Result {
99 | let client = get_client(&config).await?;
100 | GcpPubSubProducer::new(client, config.topic_id).await
101 | }
102 |
103 | async fn consuming_half(config: Self::Config) -> Result {
104 | let client = get_client(&config).await?;
105 | GcpPubSubConsumer::new(client, config.subscription_id).await
106 | }
107 | }
108 |
109 | pub struct GcpPubSubProducer {
110 | client: Client,
111 | topic_id: Arc,
112 | }
113 |
114 | impl GcpPubSubProducer {
115 | async fn new(client: Client, topic_id: String) -> Result {
116 | let topic = client.topic(&topic_id);
117 | // Only warn if the topic doesn't exist at this point.
118 | // If it gets created after the fact, we should be able to still use it
119 | // when available, otherwise if it's still missing at that time, error.
120 | if !topic.exists(None).await.map_err(QueueError::generic)? {
121 | tracing::warn!("topic {} does not exist", &topic_id);
122 | }
123 | Ok(Self {
124 | client,
125 | topic_id: Arc::new(topic_id),
126 | })
127 | }
128 |
129 | async fn publisher(&self) -> Result {
130 | // N.b. defer the creation of a publisher/topic until needed. Helps recover when
131 | // the topic does not yet exist, but will soon.
132 | // Might be more expensive to recreate each time, but overall more reliable.
133 | let topic = self.client.topic(&self.topic_id);
134 |
135 | // Publishing to a non-existent topic will cause the publisher to wait
136 | // (forever?) Giving this error will allow dependents to handle the
137 | // error case immediately when this happens, instead of holding the
138 | // connection open indefinitely.
139 | if !topic.exists(None).await.map_err(QueueError::generic)? {
140 | return Err(QueueError::Generic(
141 | format!("topic {} does not exist", &self.topic_id).into(),
142 | ));
143 | }
144 |
145 | // FIXME: may need to expose `PublisherConfig` to caller so they can tweak this
146 | Ok(topic.new_publisher(None))
147 | }
148 |
149 | #[tracing::instrument(
150 | name = "send",
151 | skip_all,
152 | fields(payload_size = payload.len())
153 | )]
154 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> {
155 | let msg = PubsubMessage {
156 | data: payload.to_vec(),
157 | ..Default::default()
158 | };
159 |
160 | let publisher = self.publisher().await?;
161 | let awaiter = publisher.publish(msg).await;
162 | awaiter.get().await.map_err(QueueError::generic)?;
163 | Ok(())
164 | }
165 |
166 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> {
167 | self.send_raw(&serde_json::to_vec(&payload)?).await
168 | }
169 |
170 | pub async fn redrive_dlq(&self) -> Result<()> {
171 | Err(QueueError::Unsupported(
172 | "redrive_dlq is not supported by GcpPubSubBackend",
173 | ))
174 | }
175 | }
176 |
177 | impl std::fmt::Debug for GcpPubSubProducer {
178 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
179 | f.debug_struct("GcpPubSubProducer")
180 | .field("topic_id", &self.topic_id)
181 | .finish()
182 | }
183 | }
184 |
185 | impl crate::QueueProducer for GcpPubSubProducer {
186 | type Payload = Payload;
187 | omni_delegate!(send_raw, send_serde_json, redrive_dlq);
188 |
189 | /// This method is overwritten for the Google Cloud Pub/Sub backend to be
190 | /// more efficient than the default of sequentially publishing `payloads`.
191 | #[tracing::instrument(name = "send_batch", skip_all)]
192 | async fn send_raw_batch(
193 | &self,
194 | payloads: impl IntoIterator + Send, IntoIter: Send> + Send,
195 | ) -> Result<()> {
196 | let msgs = payloads
197 | .into_iter()
198 | .map(|payload| PubsubMessage {
199 | data: payload.as_ref().to_vec(),
200 | ..Default::default()
201 | })
202 | .collect();
203 |
204 | let publisher = self.publisher().await?;
205 | let awaiters = publisher.publish_bulk(msgs).await;
206 | try_join_all(awaiters.into_iter().map(|a| a.get()))
207 | .await
208 | .map_err(QueueError::generic)?;
209 | Ok(())
210 | }
211 |
212 | /// This method is overwritten for the Google Cloud Pub/Sub backend to be
213 | /// more efficient than the default of sequentially publishing `payloads`.
214 | #[tracing::instrument(name = "send_batch", skip_all)]
215 | async fn send_serde_json_batch(
216 | &self,
217 | payloads: impl IntoIterator + Send,
218 | ) -> Result<()> {
219 | let msgs = payloads
220 | .into_iter()
221 | .map(|payload| {
222 | Ok(PubsubMessage {
223 | data: serde_json::to_vec(&payload)?,
224 | ..Default::default()
225 | })
226 | })
227 | .collect::>()?;
228 |
229 | let publisher = self.publisher().await?;
230 | let awaiters = publisher.publish_bulk(msgs).await;
231 | try_join_all(awaiters.into_iter().map(|a| a.get()))
232 | .await
233 | .map_err(QueueError::generic)?;
234 | Ok(())
235 | }
236 | }
237 |
238 | pub struct GcpPubSubConsumer {
239 | client: Client,
240 | subscription_id: Arc,
241 | }
242 |
243 | impl GcpPubSubConsumer {
244 | async fn new(client: Client, subscription_id: String) -> Result {
245 | Ok(Self {
246 | client,
247 | subscription_id: Arc::new(subscription_id),
248 | })
249 | }
250 |
251 | pub async fn receive(&mut self) -> Result {
252 | let subscription = subscription(&self.client, &self.subscription_id).await?;
253 | let mut stream = subscription
254 | .subscribe(None)
255 | .await
256 | .map_err(QueueError::generic)?;
257 |
258 | let recv_msg = stream.next().await.ok_or_else(|| QueueError::NoData)?;
259 |
260 | Ok(self.wrap_recv_msg(recv_msg))
261 | }
262 |
263 | pub async fn receive_all(
264 | &mut self,
265 | max_messages: usize,
266 | deadline: Duration,
267 | ) -> Result> {
268 | let subscription = subscription(&self.client, &self.subscription_id).await?;
269 | match tokio::time::timeout(deadline, subscription.pull(max_messages as _, None)).await {
270 | Ok(messages) => Ok(messages
271 | .map_err(QueueError::generic)?
272 | .into_iter()
273 | .map(|m| self.wrap_recv_msg(m))
274 | .collect()),
275 | // Timeout
276 | Err(_) => Ok(vec![]),
277 | }
278 | }
279 |
280 | fn wrap_recv_msg(&self, mut recv_msg: ReceivedMessage) -> Delivery {
281 | // FIXME: would be nice to avoid having to move the data out here.
282 | // While it's possible to ack via a subscription and an ack_id, nack
283 | // is only possible via a `ReceiveMessage`. This means we either need
284 | // to hold 2 copies of the payload, or move the bytes out so they can be
285 | // returned _outside of the Acker_.
286 | let payload = recv_msg.message.data.drain(..).collect();
287 |
288 | Delivery::new(
289 | payload,
290 | GcpPubSubAcker {
291 | recv_msg,
292 | subscription_id: self.subscription_id.clone(),
293 | },
294 | )
295 | }
296 | }
297 |
298 | impl std::fmt::Debug for GcpPubSubConsumer {
299 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
300 | f.debug_struct("GcpPubSubConsumer")
301 | .field("subscription_id", &self.subscription_id)
302 | .finish()
303 | }
304 | }
305 |
306 | async fn subscription(client: &Client, subscription_id: &str) -> Result {
307 | let subscription = client.subscription(subscription_id);
308 | if !subscription
309 | .exists(None)
310 | .await
311 | .map_err(QueueError::generic)?
312 | {
313 | return Err(QueueError::Generic(
314 | format!("subscription {} does not exist", &subscription_id).into(),
315 | ));
316 | }
317 | Ok(subscription)
318 | }
319 |
320 | impl crate::QueueConsumer for GcpPubSubConsumer {
321 | type Payload = Payload;
322 | omni_delegate!(receive, receive_all);
323 | }
324 |
325 | struct GcpPubSubAcker {
326 | recv_msg: ReceivedMessage,
327 | subscription_id: Arc,
328 | }
329 |
330 | impl std::fmt::Debug for GcpPubSubAcker {
331 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
332 | f.debug_struct("GcpPubSubAcker")
333 | .field("ack_id", &self.recv_msg.ack_id())
334 | .field("message_id", &self.recv_msg.message.message_id)
335 | .field("subscription_id", &self.subscription_id)
336 | .finish()
337 | }
338 | }
339 |
340 | impl Acker for GcpPubSubAcker {
341 | async fn ack(&mut self) -> Result<()> {
342 | self.recv_msg.ack().await.map_err(QueueError::generic)
343 | }
344 |
345 | async fn nack(&mut self) -> Result<()> {
346 | self.recv_msg.nack().await.map_err(QueueError::generic)
347 | }
348 |
349 | async fn set_ack_deadline(&mut self, duration: Duration) -> Result<()> {
350 | let duration_secs = duration.as_secs().try_into().map_err(|e| {
351 | QueueError::Generic(Box::::from(format!(
352 | "set_ack_deadline duration {duration:?} is too large: {e:?}"
353 | )))
354 | })?;
355 |
356 | self.recv_msg
357 | .modify_ack_deadline(duration_secs)
358 | .await
359 | .map_err(QueueError::generic)
360 | }
361 | }
362 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/in_memory.rs:
--------------------------------------------------------------------------------
1 | use std::time::{Duration, Instant};
2 |
3 | use serde::Serialize;
4 | use tokio::sync::mpsc;
5 |
6 | #[allow(deprecated)]
7 | use crate::{
8 | builder::{QueueBuilder, Static},
9 | queue::{Acker, Delivery, QueueBackend},
10 | QueueError, Result,
11 | };
12 |
13 | pub struct InMemoryBackend;
14 |
15 | impl InMemoryBackend {
16 | /// Creates a new in-memory queue builder.
17 | pub fn builder() -> QueueBuilder {
18 | #[allow(deprecated)]
19 | QueueBuilder::new(())
20 | }
21 | }
22 |
23 | #[allow(deprecated)]
24 | impl QueueBackend for InMemoryBackend {
25 | type PayloadIn = Vec;
26 | type PayloadOut = Vec;
27 |
28 | type Producer = InMemoryProducer;
29 | type Consumer = InMemoryConsumer;
30 |
31 | type Config = ();
32 |
33 | async fn new_pair(_config: ()) -> Result<(InMemoryProducer, InMemoryConsumer)> {
34 | let (tx, rx) = mpsc::unbounded_channel();
35 |
36 | Ok((
37 | InMemoryProducer { tx: tx.clone() },
38 | InMemoryConsumer { tx, rx },
39 | ))
40 | }
41 |
42 | async fn producing_half(_config: ()) -> Result {
43 | Err(QueueError::CannotCreateHalf)
44 | }
45 |
46 | async fn consuming_half(_config: ()) -> Result {
47 | Err(QueueError::CannotCreateHalf)
48 | }
49 | }
50 |
51 | pub struct InMemoryProducer {
52 | tx: mpsc::UnboundedSender>,
53 | }
54 |
55 | impl InMemoryProducer {
56 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> {
57 | self.tx.send(payload.to_vec()).map_err(QueueError::generic)
58 | }
59 |
60 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> {
61 | let payload = serde_json::to_vec(payload)?;
62 | self.send_raw(&payload).await
63 | }
64 |
65 | pub async fn send_raw_scheduled(&self, payload: &[u8], delay: Duration) -> Result<()> {
66 | let tx = self.tx.clone();
67 | let payload = payload.to_vec();
68 | tokio::spawn(async move {
69 | tracing::trace!("MemoryQueue: event sent > (delay: {:?})", delay);
70 | tokio::time::sleep(delay).await;
71 | if tx.send(payload).is_err() {
72 | tracing::error!("Receiver dropped");
73 | }
74 | });
75 | Ok(())
76 | }
77 |
78 | pub async fn send_serde_json_scheduled(
79 | &self,
80 | payload: &P,
81 | delay: Duration,
82 | ) -> Result<()> {
83 | let payload = serde_json::to_vec(payload)?;
84 | self.send_raw_scheduled(&payload, delay).await
85 | }
86 |
87 | pub async fn redrive_dlq(&self) -> Result<()> {
88 | Err(QueueError::Unsupported(
89 | "redrive_dlq is not supported by InMemoryBackend",
90 | ))
91 | }
92 | }
93 |
94 | impl crate::QueueProducer for InMemoryProducer {
95 | type Payload = Vec;
96 | omni_delegate!(send_raw, send_serde_json, redrive_dlq);
97 | }
98 | impl crate::ScheduledQueueProducer for InMemoryProducer {
99 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled);
100 | }
101 |
102 | pub struct InMemoryConsumer {
103 | rx: mpsc::UnboundedReceiver>,
104 | tx: mpsc::UnboundedSender>,
105 | }
106 |
107 | impl InMemoryConsumer {
108 | fn wrap_payload(&self, payload: Vec) -> Delivery {
109 | Delivery::new(
110 | payload.clone(),
111 | InMemoryAcker {
112 | tx: self.tx.clone(),
113 | payload_copy: Some(payload),
114 | already_acked_or_nacked: false,
115 | },
116 | )
117 | }
118 |
119 | pub async fn receive(&mut self) -> Result {
120 | let payload = self
121 | .rx
122 | .recv()
123 | .await
124 | .ok_or_else(|| QueueError::Generic("recv failed".into()))?;
125 | Ok(self.wrap_payload(payload))
126 | }
127 |
128 | pub async fn receive_all(
129 | &mut self,
130 | max_messages: usize,
131 | deadline: Duration,
132 | ) -> Result> {
133 | let mut out = Vec::with_capacity(max_messages);
134 | let start = Instant::now();
135 | match tokio::time::timeout(deadline, self.rx.recv()).await {
136 | Ok(Some(x)) => out.push(self.wrap_payload(x)),
137 | // Timeouts and stream termination
138 | Err(_) | Ok(None) => return Ok(out),
139 | }
140 |
141 | if max_messages > 1 {
142 | // `try_recv` will break the loop if no ready items are already
143 | // buffered in the channel. This should allow us to
144 | // opportunistically fill up the buffer in the remaining time.
145 | while let Ok(x) = self.rx.try_recv() {
146 | out.push(self.wrap_payload(x));
147 | if out.len() >= max_messages || start.elapsed() >= deadline {
148 | break;
149 | }
150 | }
151 | }
152 | Ok(out)
153 | }
154 | }
155 |
156 | impl crate::QueueConsumer for InMemoryConsumer {
157 | type Payload = Vec;
158 | omni_delegate!(receive, receive_all);
159 | }
160 |
161 | struct InMemoryAcker {
162 | tx: mpsc::UnboundedSender>,
163 | payload_copy: Option>,
164 | already_acked_or_nacked: bool,
165 | }
166 |
167 | impl Acker for InMemoryAcker {
168 | async fn ack(&mut self) -> Result<()> {
169 | if self.already_acked_or_nacked {
170 | Err(QueueError::CannotAckOrNackTwice)
171 | } else {
172 | self.already_acked_or_nacked = true;
173 | Ok(())
174 | }
175 | }
176 |
177 | async fn nack(&mut self) -> Result<()> {
178 | if self.already_acked_or_nacked {
179 | Err(QueueError::CannotAckOrNackTwice)
180 | } else {
181 | self.already_acked_or_nacked = true;
182 | self.tx
183 | .send(
184 | self.payload_copy
185 | .take()
186 | .ok_or(QueueError::CannotAckOrNackTwice)?,
187 | )
188 | .map_err(QueueError::generic)
189 | }
190 | }
191 |
192 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> {
193 | Err(QueueError::Unsupported(
194 | "set_ack_deadline is not yet supported by InMemoryBackend",
195 | ))
196 | }
197 | }
198 |
199 | #[cfg(test)]
200 | mod tests {
201 | use std::time::{Duration, Instant};
202 |
203 | use serde::{Deserialize, Serialize};
204 |
205 | use super::InMemoryBackend;
206 | use crate::QueueProducer;
207 |
208 | #[derive(Clone, Copy, Debug, Eq, Deserialize, PartialEq, Serialize)]
209 | struct TypeA {
210 | a: i32,
211 | }
212 |
213 | #[tokio::test]
214 | async fn simple_queue_test() {
215 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
216 |
217 | p.send_serde_json(&TypeA { a: 13 }).await.unwrap();
218 | assert_eq!(
219 | c.receive()
220 | .await
221 | .unwrap()
222 | .payload_serde_json::()
223 | .unwrap()
224 | .unwrap(),
225 | TypeA { a: 13 },
226 | );
227 |
228 | p.send_bytes(&serde_json::to_vec(&TypeA { a: 14 }).unwrap())
229 | .await
230 | .unwrap();
231 | assert_eq!(
232 | serde_json::from_slice::(c.receive().await.unwrap().borrow_payload().unwrap())
233 | .unwrap(),
234 | TypeA { a: 14 },
235 | );
236 | }
237 |
238 | #[derive(Debug, Deserialize, Serialize, PartialEq)]
239 | struct ExType {
240 | a: u8,
241 | }
242 |
243 | /// Consumer will return immediately if there are fewer than max messages to
244 | /// start with.
245 | #[tokio::test]
246 | async fn test_send_recv_all_partial() {
247 | let payload = ExType { a: 2 };
248 |
249 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
250 |
251 | p.send_serde_json(&payload).await.unwrap();
252 | let deadline = Duration::from_secs(1);
253 |
254 | let now = Instant::now();
255 | let mut xs = c.receive_all(2, deadline).await.unwrap();
256 | assert_eq!(xs.len(), 1);
257 | let d = xs.remove(0);
258 | assert_eq!(d.payload_serde_json::().unwrap().unwrap(), payload);
259 | d.ack().await.unwrap();
260 | assert!(now.elapsed() <= deadline);
261 | }
262 |
263 | /// Consumer should yield items immediately if there's a full batch ready on
264 | /// the first poll.
265 | #[tokio::test]
266 | async fn test_send_recv_all_full() {
267 | let payload1 = ExType { a: 1 };
268 | let payload2 = ExType { a: 2 };
269 |
270 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
271 |
272 | p.send_serde_json(&payload1).await.unwrap();
273 | p.send_serde_json(&payload2).await.unwrap();
274 | let deadline = Duration::from_secs(1);
275 |
276 | let now = Instant::now();
277 | let mut xs = c.receive_all(2, deadline).await.unwrap();
278 | assert_eq!(xs.len(), 2);
279 | let d1 = xs.remove(0);
280 | assert_eq!(
281 | d1.payload_serde_json::().unwrap().unwrap(),
282 | payload1
283 | );
284 | d1.ack().await.unwrap();
285 |
286 | let d2 = xs.remove(0);
287 | assert_eq!(
288 | d2.payload_serde_json::().unwrap().unwrap(),
289 | payload2
290 | );
291 | d2.ack().await.unwrap();
292 | // N.b. it's still possible this could turn up false if the test runs
293 | // too slow.
294 | assert!(now.elapsed() < deadline);
295 | }
296 |
297 | /// Consumer will return the full batch immediately, but also return
298 | /// immediately if a partial batch is ready.
299 | #[tokio::test]
300 | async fn test_send_recv_all_full_then_partial() {
301 | let payload1 = ExType { a: 1 };
302 | let payload2 = ExType { a: 2 };
303 | let payload3 = ExType { a: 3 };
304 |
305 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
306 |
307 | p.send_serde_json(&payload1).await.unwrap();
308 | p.send_serde_json(&payload2).await.unwrap();
309 | p.send_serde_json(&payload3).await.unwrap();
310 |
311 | let deadline = Duration::from_secs(1);
312 | let now1 = Instant::now();
313 | let mut xs = c.receive_all(2, deadline).await.unwrap();
314 | assert_eq!(xs.len(), 2);
315 | let d1 = xs.remove(0);
316 | assert_eq!(
317 | d1.payload_serde_json::().unwrap().unwrap(),
318 | payload1
319 | );
320 | d1.ack().await.unwrap();
321 |
322 | let d2 = xs.remove(0);
323 | assert_eq!(
324 | d2.payload_serde_json::().unwrap().unwrap(),
325 | payload2
326 | );
327 | d2.ack().await.unwrap();
328 | assert!(now1.elapsed() < deadline);
329 |
330 | // 2nd call
331 | let now2 = Instant::now();
332 | let mut ys = c.receive_all(2, deadline).await.unwrap();
333 | assert_eq!(ys.len(), 1);
334 | let d3 = ys.remove(0);
335 | assert_eq!(
336 | d3.payload_serde_json::().unwrap().unwrap(),
337 | payload3
338 | );
339 | d3.ack().await.unwrap();
340 | assert!(now2.elapsed() <= deadline);
341 | }
342 |
343 | /// Consumer will NOT wait indefinitely for at least one item.
344 | #[tokio::test]
345 | async fn test_send_recv_all_late_arriving_items() {
346 | let (_p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
347 |
348 | let deadline = Duration::from_secs(1);
349 | let now = Instant::now();
350 | let xs = c.receive_all(2, deadline).await.unwrap();
351 | let elapsed = now.elapsed();
352 |
353 | assert_eq!(xs.len(), 0);
354 | // Elapsed should be around the deadline, ballpark
355 | assert!(elapsed >= deadline);
356 | assert!(elapsed <= deadline + Duration::from_millis(200));
357 | }
358 |
359 | #[tokio::test]
360 | async fn test_scheduled() {
361 | let payload1 = ExType { a: 1 };
362 |
363 | let (p, mut c) = InMemoryBackend::builder().build_pair().await.unwrap();
364 |
365 | let delay = Duration::from_millis(100);
366 | let now = Instant::now();
367 | p.send_serde_json_scheduled(&payload1, delay).await.unwrap();
368 | let delivery = c
369 | .receive_all(1, delay * 2)
370 | .await
371 | .unwrap()
372 | .into_iter()
373 | .next()
374 | .unwrap();
375 | assert!(now.elapsed() >= delay);
376 | assert!(now.elapsed() < delay * 2);
377 | assert_eq!(Some(payload1), delivery.payload_serde_json().unwrap());
378 | }
379 | }
380 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/mod.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "azure_queue_storage")]
2 | pub mod azure_queue_storage;
3 | #[cfg(feature = "gcp_pubsub")]
4 | pub mod gcp_pubsub;
5 | #[cfg(feature = "in_memory")]
6 | pub mod in_memory;
7 | #[cfg(feature = "rabbitmq")]
8 | pub mod rabbitmq;
9 | #[cfg(feature = "redis")]
10 | pub mod redis;
11 | #[cfg(feature = "sqs")]
12 | pub mod sqs;
13 |
14 | #[cfg(feature = "azure_queue_storage")]
15 | pub use azure_queue_storage::{AqsBackend, AqsConfig, AqsConsumer, AqsProducer};
16 | #[cfg(feature = "gcp_pubsub")]
17 | pub use gcp_pubsub::{GcpPubSubBackend, GcpPubSubConfig, GcpPubSubConsumer, GcpPubSubProducer};
18 | #[cfg(feature = "in_memory")]
19 | pub use in_memory::{InMemoryBackend, InMemoryConsumer, InMemoryProducer};
20 | #[cfg(feature = "rabbitmq")]
21 | pub use rabbitmq::{RabbitMqBackend, RabbitMqConfig, RabbitMqConsumer, RabbitMqProducer};
22 | #[cfg(feature = "redis")]
23 | pub use redis::{RedisBackend, RedisBackendBuilder, RedisConfig, RedisConsumer, RedisProducer};
24 | #[cfg(feature = "redis_cluster")]
25 | pub use redis::{RedisClusterBackend, RedisClusterBackendBuilder};
26 | #[cfg(feature = "sqs")]
27 | pub use sqs::{SqsBackend, SqsConfig, SqsConsumer, SqsProducer};
28 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/rabbitmq.rs:
--------------------------------------------------------------------------------
1 | use std::time::{Duration, Instant};
2 |
3 | use futures_util::{FutureExt, StreamExt};
4 | use lapin::types::AMQPValue;
5 | pub use lapin::{
6 | acker::Acker as LapinAcker,
7 | options::{
8 | BasicAckOptions, BasicConsumeOptions, BasicNackOptions, BasicPublishOptions,
9 | BasicQosOptions,
10 | },
11 | types::FieldTable,
12 | BasicProperties, Channel, Connection, ConnectionProperties, Consumer,
13 | };
14 | use serde::Serialize;
15 |
16 | #[allow(deprecated)]
17 | use crate::{
18 | builder::{QueueBuilder, Static},
19 | queue::{Acker, Delivery, QueueBackend},
20 | QueueError, Result,
21 | };
22 |
23 | #[derive(Clone)]
24 | pub struct RabbitMqConfig {
25 | pub uri: String,
26 | pub connection_properties: ConnectionProperties,
27 |
28 | pub publish_exchange: String,
29 | pub publish_routing_key: String,
30 | pub publish_options: BasicPublishOptions,
31 | pub publish_properties: BasicProperties,
32 |
33 | pub consume_queue: String,
34 | pub consumer_tag: String,
35 | pub consume_options: BasicConsumeOptions,
36 | pub consume_arguments: FieldTable,
37 |
38 | pub consume_prefetch_count: Option,
39 | pub requeue_on_nack: bool,
40 | }
41 |
42 | pub struct RabbitMqBackend;
43 |
44 | impl RabbitMqBackend {
45 | /// Creates a new RabbitMQ queue builder with the given configuration.
46 | pub fn builder(config: RabbitMqConfig) -> QueueBuilder {
47 | #[allow(deprecated)]
48 | QueueBuilder::new(config)
49 | }
50 | }
51 |
52 | async fn consumer(conn: &Connection, cfg: RabbitMqConfig) -> Result {
53 | let channel_rx = conn.create_channel().await.map_err(QueueError::generic)?;
54 |
55 | if let Some(n) = cfg.consume_prefetch_count {
56 | channel_rx
57 | .basic_qos(n, BasicQosOptions::default())
58 | .await
59 | .map_err(QueueError::generic)?;
60 | }
61 |
62 | Ok(RabbitMqConsumer {
63 | consumer: channel_rx
64 | .basic_consume(
65 | &cfg.consume_queue,
66 | &cfg.consumer_tag,
67 | cfg.consume_options,
68 | cfg.consume_arguments.clone(),
69 | )
70 | .await
71 | .map_err(QueueError::generic)?,
72 | requeue_on_nack: cfg.requeue_on_nack,
73 | })
74 | }
75 |
76 | async fn producer(conn: &Connection, cfg: RabbitMqConfig) -> Result {
77 | let channel_tx = conn.create_channel().await.map_err(QueueError::generic)?;
78 | Ok(RabbitMqProducer {
79 | channel: channel_tx,
80 | exchange: cfg.publish_exchange.clone(),
81 | routing_key: cfg.publish_routing_key.clone(),
82 | options: cfg.publish_options,
83 | properties: cfg.publish_properties.clone(),
84 | })
85 | }
86 |
87 | #[allow(deprecated)]
88 | impl QueueBackend for RabbitMqBackend {
89 | type PayloadIn = Vec;
90 | type PayloadOut = Vec;
91 |
92 | type Producer = RabbitMqProducer;
93 | type Consumer = RabbitMqConsumer;
94 |
95 | type Config = RabbitMqConfig;
96 |
97 | async fn new_pair(cfg: RabbitMqConfig) -> Result<(RabbitMqProducer, RabbitMqConsumer)> {
98 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone())
99 | .await
100 | .map_err(QueueError::generic)?;
101 |
102 | Ok((
103 | producer(&conn, cfg.clone()).await?,
104 | consumer(&conn, cfg.clone()).await?,
105 | ))
106 | }
107 |
108 | async fn producing_half(cfg: RabbitMqConfig) -> Result {
109 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone())
110 | .await
111 | .map_err(QueueError::generic)?;
112 |
113 | producer(&conn, cfg.clone()).await
114 | }
115 |
116 | async fn consuming_half(cfg: RabbitMqConfig) -> Result {
117 | let conn = Connection::connect(&cfg.uri, cfg.connection_properties.clone())
118 | .await
119 | .map_err(QueueError::generic)?;
120 |
121 | consumer(&conn, cfg.clone()).await
122 | }
123 | }
124 |
125 | pub struct RabbitMqProducer {
126 | channel: Channel,
127 | exchange: String,
128 | routing_key: String,
129 | options: BasicPublishOptions,
130 | properties: BasicProperties,
131 | }
132 |
133 | impl RabbitMqProducer {
134 | async fn send_raw_with_headers(
135 | &self,
136 | payload: &[u8],
137 | headers: Option,
138 | ) -> Result<()> {
139 | let mut properties = self.properties.clone();
140 | #[cfg(feature = "rabbitmq-with-message-ids")]
141 | {
142 | use svix_ksuid::{KsuidLike as _, KsuidMs};
143 | use time::OffsetDateTime;
144 |
145 | let id = &KsuidMs::new(Some(OffsetDateTime::now_utc()), None);
146 | properties = properties.with_message_id(id.to_string().into());
147 | }
148 | if let Some(headers) = headers {
149 | properties = properties.with_headers(headers);
150 | }
151 |
152 | self.channel
153 | .basic_publish(
154 | &self.exchange,
155 | &self.routing_key,
156 | self.options,
157 | payload,
158 | properties,
159 | )
160 | .await
161 | .map_err(QueueError::generic)?;
162 |
163 | Ok(())
164 | }
165 |
166 | #[tracing::instrument(
167 | name = "send",
168 | skip_all,
169 | fields(payload_size = payload.len())
170 | )]
171 | pub async fn send_raw(&self, payload: &[u8]) -> Result<()> {
172 | self.send_raw_with_headers(payload, None).await
173 | }
174 |
175 | pub async fn send_serde_json(&self, payload: &P) -> Result<()> {
176 | let payload = serde_json::to_vec(payload)?;
177 | self.send_raw(&payload).await
178 | }
179 |
180 | #[tracing::instrument(
181 | name = "send",
182 | skip_all,
183 | fields(payload_size = payload.len(), delay)
184 | )]
185 | pub async fn send_raw_scheduled(&self, payload: &[u8], delay: Duration) -> Result<()> {
186 | let mut headers = FieldTable::default();
187 |
188 | let delay_ms: u32 = delay
189 | .as_millis()
190 | .try_into()
191 | .map_err(|_| QueueError::Generic("delay is too large".into()))?;
192 | headers.insert("x-delay".into(), AMQPValue::LongUInt(delay_ms));
193 |
194 | self.send_raw_with_headers(payload, Some(headers)).await
195 | }
196 |
197 | pub async fn send_serde_json_scheduled(
198 | &self,
199 | payload: &P,
200 | delay: Duration,
201 | ) -> Result<()> {
202 | let payload = serde_json::to_vec(payload)?;
203 | self.send_raw_scheduled(&payload, delay).await
204 | }
205 |
206 | pub async fn redrive_dlq(&self) -> Result<()> {
207 | Err(QueueError::Unsupported(
208 | "redrive_dlq is not supported by RabbitMqBackend",
209 | ))
210 | }
211 | }
212 |
213 | impl crate::QueueProducer for RabbitMqProducer {
214 | type Payload = Vec;
215 | omni_delegate!(send_raw, send_serde_json, redrive_dlq);
216 | }
217 | impl crate::ScheduledQueueProducer for RabbitMqProducer {
218 | omni_delegate!(send_raw_scheduled, send_serde_json_scheduled);
219 | }
220 |
221 | pub struct RabbitMqConsumer {
222 | consumer: Consumer,
223 | requeue_on_nack: bool,
224 | }
225 |
226 | impl RabbitMqConsumer {
227 | fn wrap_delivery(&self, delivery: lapin::message::Delivery) -> Delivery {
228 | Delivery::new(
229 | delivery.data,
230 | RabbitMqAcker {
231 | acker: Some(delivery.acker),
232 | requeue_on_nack: self.requeue_on_nack,
233 | },
234 | )
235 | }
236 |
237 | pub async fn receive(&mut self) -> Result {
238 | let mut stream =
239 | self.consumer
240 | .clone()
241 | .map(|l: Result| {
242 | let l = l.map_err(QueueError::generic)?;
243 | Ok(self.wrap_delivery(l))
244 | });
245 |
246 | stream.next().await.ok_or(QueueError::NoData)?
247 | }
248 |
249 | pub async fn receive_all(
250 | &mut self,
251 | max_messages: usize,
252 | deadline: Duration,
253 | ) -> Result> {
254 | let mut stream = self.consumer.clone().map(
255 | |l: Result| -> Result {
256 | let l = l.map_err(QueueError::generic)?;
257 | Ok(self.wrap_delivery(l))
258 | },
259 | );
260 | let start = Instant::now();
261 | let mut out = Vec::with_capacity(max_messages);
262 | match tokio::time::timeout(deadline, stream.next()).await {
263 | Ok(Some(x)) => out.push(x?),
264 | // Timeouts and stream termination
265 | Err(_) | Ok(None) => return Ok(out),
266 | }
267 |
268 | if max_messages > 1 {
269 | // `now_or_never` will break the loop if no ready items are already
270 | // buffered in the stream. This should allow us to opportunistically
271 | // fill up the buffer in the remaining time.
272 | while let Some(Some(x)) = stream.next().now_or_never() {
273 | out.push(x?);
274 | if out.len() >= max_messages || start.elapsed() >= deadline {
275 | break;
276 | }
277 | }
278 | }
279 | Ok(out)
280 | }
281 | }
282 |
283 | impl crate::QueueConsumer for RabbitMqConsumer {
284 | type Payload = Vec;
285 | omni_delegate!(receive, receive_all);
286 | }
287 |
288 | struct RabbitMqAcker {
289 | acker: Option,
290 | requeue_on_nack: bool,
291 | }
292 |
293 | impl Acker for RabbitMqAcker {
294 | async fn ack(&mut self) -> Result<()> {
295 | self.acker
296 | .take()
297 | .ok_or(QueueError::CannotAckOrNackTwice)?
298 | .ack(BasicAckOptions { multiple: false })
299 | .await
300 | .map(|_| ())
301 | .map_err(QueueError::generic)
302 | }
303 |
304 | async fn nack(&mut self) -> Result<()> {
305 | self.acker
306 | .take()
307 | .ok_or(QueueError::CannotAckOrNackTwice)?
308 | .nack(BasicNackOptions {
309 | requeue: self.requeue_on_nack,
310 | multiple: false,
311 | })
312 | .await
313 | .map(|_| ())
314 | .map_err(QueueError::generic)
315 | }
316 |
317 | async fn set_ack_deadline(&mut self, _duration: Duration) -> Result<()> {
318 | Err(QueueError::Unsupported(
319 | "set_ack_deadline is not supported by RabbitMQ",
320 | ))
321 | }
322 | }
323 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/redis/cluster.rs:
--------------------------------------------------------------------------------
1 | use redis::{
2 | cluster::{ClusterClient, ClusterClientBuilder},
3 | cluster_routing::{MultipleNodeRoutingInfo, ResponsePolicy, RoutingInfo},
4 | ErrorKind, FromRedisValue, IntoConnectionInfo, RedisError,
5 | };
6 |
7 | /// ConnectionManager that implements `bb8::ManageConnection` and supports
8 | /// asynchronous clustered connections via `redis::cluster::ClusterClient`
9 | #[derive(Clone)]
10 | pub struct RedisClusterConnectionManager {
11 | client: ClusterClient,
12 | }
13 |
14 | impl RedisClusterConnectionManager {
15 | pub fn new(
16 | info: T,
17 | ) -> Result {
18 | Ok(RedisClusterConnectionManager {
19 | client: ClusterClientBuilder::new(vec![info]).build()?,
20 | })
21 | }
22 | }
23 |
24 | impl bb8::ManageConnection for RedisClusterConnectionManager {
25 | type Connection = redis::cluster_async::ClusterConnection;
26 | type Error = RedisError;
27 |
28 | async fn connect(&self) -> Result {
29 | self.client.get_async_connection().await
30 | }
31 |
32 | async fn is_valid(&self, conn: &mut Self::Connection) -> Result<(), Self::Error> {
33 | let pong = conn
34 | .route_command(
35 | &redis::cmd("PING"),
36 | RoutingInfo::MultiNode((
37 | MultipleNodeRoutingInfo::AllMasters,
38 | Some(ResponsePolicy::OneSucceeded),
39 | )),
40 | )
41 | .await
42 | .and_then(|v| String::from_redis_value(&v))?;
43 | match pong.as_str() {
44 | "PONG" => Ok(()),
45 | _ => Err((ErrorKind::ResponseError, "ping request").into()),
46 | }
47 | }
48 |
49 | fn has_broken(&self, _: &mut Self::Connection) -> bool {
50 | false
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/omniqueue/src/backends/redis/fallback.rs:
--------------------------------------------------------------------------------
1 | //! Implementation of the main queue using two lists instead of redis streams,
2 | //! for compatibility with redis versions older than 6.2.0.
3 |
4 | use std::time::Duration;
5 |
6 | use bb8::ManageConnection;
7 | use redis::AsyncCommands;
8 | use svix_ksuid::{KsuidLike as _, KsuidMs};
9 | use time::OffsetDateTime;
10 | use tracing::{error, trace, warn};
11 |
12 | use super::{
13 | internal_from_list, internal_to_list_payload, DeadLetterQueueConfig, InternalPayload,
14 | InternalPayloadOwned, RawPayload, RedisConnection, RedisConsumer, RedisProducer,
15 | };
16 | use crate::{queue::Acker, Delivery, QueueError, Result};
17 |
18 | pub(super) async fn send_raw(
19 | producer: &RedisProducer,
20 | payload: &[u8],
21 | ) -> Result<()> {
22 | producer
23 | .redis
24 | .get()
25 | .await
26 | .map_err(QueueError::generic)?
27 | .lpush(
28 | &producer.queue_key,
29 | internal_to_list_payload(InternalPayload::new(payload)),
30 | )
31 | .await
32 | .map_err(QueueError::generic)
33 | }
34 |
35 | pub(super) async fn receive(consumer: &RedisConsumer) -> Result {
36 | let res = receive_with_timeout(consumer, Duration::ZERO).await?;
37 | res.ok_or_else(|| QueueError::Generic("No data".into()))
38 | }
39 |
40 | pub(super) async fn receive_all(
41 | consumer: &RedisConsumer,
42 | deadline: Duration,
43 | _max_messages: usize,
44 | ) -> Result> {
45 | // FIXME: Run up to max_messages RPOPLPUSH'es until there is a null reply?
46 | let delivery = receive_with_timeout(consumer, deadline).await?;
47 | Ok(delivery.into_iter().collect())
48 | }
49 |
50 | async fn receive_with_timeout(
51 | consumer: &RedisConsumer,
52 | timeout: Duration,
53 | ) -> Result