├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── config.yml
├── dependabot.yml
└── workflows
│ └── rust.yml
├── .gitignore
├── .sqlx
├── query-015acc0b6340889d791aa6b3a022f0fce00b58c416604ef98df32fa16017ef38.json
├── query-03c2ac38e6ec2801eed7e5b19076eafcc589f32e2424460b0c345e6c477a1b28.json
├── query-040f20db39c70255af32875141672da4cc97ae685f948cbd7798dd287b1dabfb.json
├── query-05abadcb1abc6591d8b59bffd7cfe7a40ce6e5d935d34bb377e204acef918ab1.json
├── query-0aabd23ddb629e4b6177789c83104fc8bd6b8d0adf488e86da0d7cc8174bb8c5.json
├── query-0ba3e34a0223cf00d4724fc2502f0565ac0ea1aad6b5bbbf92ea433d0bfe2fa8.json
├── query-0d302d44aacf1e8ee3233b8c4814e7355fc4b57d98550cd43aec5c424ac4cdca.json
├── query-0dece29c8db5fc229c7e3ac6ef549526161e370c453d459ddc717ac8c263ec05.json
├── query-2584080f8ca262aa7526dde00a50ae125a6a8e52f4fc76d05d15f2c6f663d1cd.json
├── query-27475eb9bcb6ae1ebd88ba22d68d7536bb47555e1f5b7c22db8b25552619e6a8.json
├── query-2d6600e95898c5baf3e10780b8110fe0002e6c1056d44cb2534cc47f6e48876e.json
├── query-3aabbeae862994ed4bf90f24d6c7c5992c022ab53683b6a43d86d4ffd294fc79.json
├── query-43c87ad29a6badd238fdf187f8ac5eb350ddb14eb4e115919aed09b33c9a0a4b.json
├── query-451f277986a4d8543187e4266cca5bb8b55246a78583ea7230a264f972f4d0ca.json
├── query-45b1b27f9669db53892c4cfad7d09c0e325cfacbf1c37589300fb48e8d9eac49.json
├── query-4e9a5fc9e05d95f6bec8c7b0fc762e300e5c3843f2bd0613b36f8d8f9ad72e95.json
├── query-54d124a54b2bb28f85b3ee9882f1e103d8e690ea0cb5189411834b9d8b246fc4.json
├── query-56a0f6c70e41d1b89d0b16a4cbbb989aa3da718f76f4f4abf8794e538c4ed2b8.json
├── query-56d62d8d964df1492d1e475e4e4602e811be0a02391b1acb4c687d11e9680795.json
├── query-5948af057d2002149c3864d46a332e133ca32d8d43a2891297424f0215ec42c1.json
├── query-596a1dcc593b0dc55002a47cea8a2e6fa1328977f2a41d01ba092b0854853365.json
├── query-59a613acb1d175d71071b177b3febfb4257711d186adf174b7616545837ca89d.json
├── query-616e1ecc744bf9e743aaed13db27cc77cc74e7f9c164ca0f8dee9091fd955d83.json
├── query-63da920fcf11b0f6fc6cdb39e0dbcc61c3d70196c12097f6609e405ab8a4e38a.json
├── query-6c2de8001f792e42a916f94bbed1894bc00261ccbf6ad59e4ac78a7b0404b953.json
├── query-7513f95eb46920c91e32cf7dac34155bbaac4ee4a5011c110df702eab4c8e03f.json
├── query-75427f58e86684d74b46fedfb5a3612f29270fe025f6d8e4bf9a4445656af703.json
├── query-85a73e45e72618023e0a722bb8a830dd13016316783200f3767114377a3bacfa.json
├── query-8893fcd21ac7b06e9287492e42e56a375b0339d0a19d023c53039e4adcdcef69.json
├── query-8a6beb878ad5d685281a3eb393e7ef4d30b78f14dafe23916ad08b28c4ccc858.json
├── query-8c5868495ee0e66b4b6869f74b06e1e3930ef6ab49a5255816564262943ae5a9.json
├── query-8fc5df7b77183d211aefa0b3209429db4d663fd056e60734ddfd95cb172807a9.json
├── query-9158f9abdc7c3f5c2ffaa876bf3871bbdd3fd31b61869e3ccd9e84aa2e058cfc.json
├── query-9639647f76c40eedcd0649cc5dc0f22809203a0ac8bb32802c7f4db73215a805.json
├── query-96a0895113d5e00a565fe1ad6e9eb1205b4d36e5eef7e5b096fc707ac107c42e.json
├── query-9731fc6ba31c2cc1bd00b8e177c44ca85285a2606f9485a9f38a3585ce48b00b.json
├── query-98aa8d56e8fb6d28480105370b0b6a6d3e68e0e64bdeeed97a04e7597aa43853.json
├── query-98e88f5fc48233b957f267d42a4ea13343d22cfb096ff366d1996cd7894743e8.json
├── query-98f5535ef035447135c267ffb61e3ee1eb5c42a9c3038ccbe7b93c92d393c54f.json
├── query-99f4d5cc8a695112026471d432155221f79ac59531b095076e8867983910571d.json
├── query-a288070a7d1f34fd78d382dc3757557b3f674dd9c46067ed80232cbfbbd56a98.json
├── query-a2e1464d5db4e210aec86ed6c23399eee7ecb84c9b9d4a08e9b3ce1c1b57ec17.json
├── query-a55bfd24f1ca171d27491593b8e96d78ee50e376d08e1a7a873048d7468638d8.json
├── query-a852697a2fe50aa392a54721146ef8dbc6e26843b6e60a737f4d927f34bac1e7.json
├── query-acbcc67ec85d0cf1c80679eb9bf3af6397f5c0ead67a4b1fedfad9f3e1876dd2.json
├── query-b1653fb7a7b49fea06607acc18b775fabc5bb87d0f3eee92b5fc57dc96d2567f.json
├── query-b211278db4a4733dc2a16dfa56ea390053421a45c8254c3cac118f704810f74f.json
├── query-be9089120a2682a52ac8913188f45b904e30185b2afc715ccc7215c150bc756c.json
├── query-c5eed52879c244c536b8b8ad61990e83a881ca89582f53c5edc7f3541c9dc4f6.json
├── query-c82c340f44c57064db3d7ad5742422ced1f6e9628f2f78304f1b1a585e04eaeb.json
├── query-ccf314910c001d4933326838f507380d36a50d63778ed5589d7ffe0fe9f60db3.json
├── query-d1e63e3a8d4d9be55cb5665760152c590e19fba33ccfde199808971b62e9557a.json
├── query-dae0ef841b214a8f0d3981f27aa490d09dd5db2962112316ae892a59fc7f152c.json
├── query-dbcd4dd5bde04b26279a795888276f9369c1322b5ead7cf2f31a3a869afa8e02.json
├── query-de50515666fa9da0b24b0d8db50bd1b03e099ea946c151d821520d1687ad6881.json
├── query-df3288d47a491690460244087d5c0ad37fef221b28efb666cd2bbdcdd849f5b2.json
├── query-e572c3a03a1e78e45550b3d54085cbc4b226df7f0ac392dba81ec76b8e11f26b.json
├── query-f069227d261bdec30ade8d0553bffc2796c3f4914f773ef569526feb0c7062ea.json
├── query-f2bb618b6cad214f8b136bab65a6b56e9e6d909654d2fe7eeb3190a3924fc204.json
└── query-f3388664b9d07ca787a450e25b7559c92d8b2ed917ff8313ab75fa411f23ffe2.json
├── CHANGELOG.md
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
├── docker-compose.yaml
├── examples
├── basic
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
├── graceful_shutdown
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
├── multitask
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
├── rag
│ ├── Cargo.toml
│ ├── src
│ │ └── main.rs
│ └── system-prompt.llm.txt
├── scheduled
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
├── step
│ ├── Cargo.toml
│ └── src
│ │ └── main.rs
└── tracing
│ ├── Cargo.toml
│ └── src
│ └── main.rs
├── migrations
├── 20240921151751_0.sql
├── 20241024174106_1.sql
├── 20241105164503_2.sql
├── 20241110164319_3.sql
├── 20241111174958_4.sql
└── 20241126224448_5.sql
├── rustfmt.toml
└── src
├── job.rs
├── lib.rs
├── queue.rs
├── scheduler.rs
├── task.rs
├── task
└── retry_policy.rs
└── worker.rs
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [maxcountryman]
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🐛 Bug Report
3 | about: If something isn't working as expected 🤔.
4 | ---
5 |
6 |
11 |
12 | - [ ] I have looked for existing issues (including closed) about this
13 |
14 | ## Bug Report
15 |
16 | ### Version
17 |
18 |
21 |
22 | ### Platform
23 |
24 |
27 |
28 | ### Crates
29 |
30 |
34 |
35 | ### Description
36 |
37 |
52 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | contact_links:
2 | - name: 🙏 GitHub Discussions
3 | url: https://github.com/maxcountryman/underway/discussions
4 | about: General support, questions-and-answers, feature requests, etc
5 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | updates:
4 | - package-ecosystem: "cargo"
5 | directory: "/"
6 | schedule:
7 | interval: "daily"
8 |
9 | - package-ecosystem: "github-actions"
10 | directory: "/"
11 | schedule:
12 | interval: "daily"
13 |
--------------------------------------------------------------------------------
/.github/workflows/rust.yml:
--------------------------------------------------------------------------------
1 | name: Rust
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request: {}
8 |
9 | env:
10 | CARGO_TERM_COLOR: always
11 |
12 | jobs:
13 | check:
14 | runs-on: ubuntu-latest
15 | env:
16 | SQLX_OFFLINE: true
17 | steps:
18 | - uses: actions/checkout@v4
19 | - run: |
20 | rustup toolchain install nightly --profile minimal --component rustfmt --component clippy
21 | - uses: Swatinem/rust-cache@v2
22 | - name: clippy
23 | run: |
24 | cargo clippy --all --all-targets --all-features -- -Dwarnings
25 | - name: rustfmt
26 | run: |
27 | cargo +nightly fmt --all -- --check
28 |
29 | check-docs:
30 | runs-on: ubuntu-latest
31 | env:
32 | SQLX_OFFLINE: true
33 | steps:
34 | - uses: actions/checkout@v4
35 | - run: |
36 | rustup toolchain install stable --profile minimal
37 | - uses: Swatinem/rust-cache@v2
38 | - name: cargo doc
39 | env:
40 | RUSTDOCFLAGS: "-D rustdoc::broken-intra-doc-links"
41 | run: |
42 | cargo doc --all-features --no-deps
43 |
44 | test-docs:
45 | needs: check
46 | runs-on: ubuntu-latest
47 | env:
48 | SQLX_OFFLINE: true
49 | steps:
50 | - uses: actions/checkout@v4
51 | - run: |
52 | rustup toolchain install nightly --profile minimal
53 | - uses: Swatinem/rust-cache@v2
54 | - name: Run doc tests
55 | run: |
56 | cargo test --all-features --doc
57 |
58 | test-lib:
59 | needs: check
60 | runs-on: ubuntu-latest
61 | container: rust
62 | services:
63 | postgres:
64 | image: postgres
65 | env:
66 | POSTGRES_USER: postgres
67 | POSTGRES_PASSWORD: postgres
68 | POSTGRES_DB: underway
69 | ports:
70 | - 5432:5432
71 | options: >-
72 | --health-cmd pg_isready
73 | --health-interval 10s
74 | --health-timeout 5s
75 | --health-retries 5
76 | env:
77 | DATABASE_URL: postgres://postgres:postgres@postgres:5432/underway
78 | steps:
79 | - uses: actions/checkout@v4
80 | - run: |
81 | rustup toolchain install nightly --profile minimal
82 | cargo install sqlx-cli --no-default-features --features native-tls,postgres
83 | - uses: Swatinem/rust-cache@v2
84 | - name: Run lib tests
85 | run: |
86 | cargo sqlx database setup
87 | cargo test --workspace --all-features --lib
88 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/target
2 | Cargo.lock
3 |
--------------------------------------------------------------------------------
/.sqlx/query-015acc0b6340889d791aa6b3a022f0fce00b58c416604ef98df32fa16017ef38.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select task_id, state as \"state: TaskState\", completed_at as \"completed_at: i64\"\n from underway.task_attempt\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "task_id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "state: TaskState",
14 | "type_info": {
15 | "Custom": {
16 | "name": "underway.task_state",
17 | "kind": {
18 | "Enum": [
19 | "pending",
20 | "in_progress",
21 | "succeeded",
22 | "cancelled",
23 | "failed"
24 | ]
25 | }
26 | }
27 | }
28 | },
29 | {
30 | "ordinal": 2,
31 | "name": "completed_at: i64",
32 | "type_info": "Timestamptz"
33 | }
34 | ],
35 | "parameters": {
36 | "Left": []
37 | },
38 | "nullable": [
39 | false,
40 | false,
41 | true
42 | ]
43 | },
44 | "hash": "015acc0b6340889d791aa6b3a022f0fce00b58c416604ef98df32fa16017ef38"
45 | }
46 |
--------------------------------------------------------------------------------
/.sqlx/query-03c2ac38e6ec2801eed7e5b19076eafcc589f32e2424460b0c345e6c477a1b28.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "create schema if not exists underway;",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": []
8 | },
9 | "nullable": []
10 | },
11 | "hash": "03c2ac38e6ec2801eed7e5b19076eafcc589f32e2424460b0c345e6c477a1b28"
12 | }
13 |
--------------------------------------------------------------------------------
/.sqlx/query-040f20db39c70255af32875141672da4cc97ae685f948cbd7798dd287b1dabfb.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\", completed_at as \"completed_at: i64\"\n from underway.task where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | },
24 | {
25 | "ordinal": 1,
26 | "name": "completed_at: i64",
27 | "type_info": "Timestamptz"
28 | }
29 | ],
30 | "parameters": {
31 | "Left": [
32 | "Uuid"
33 | ]
34 | },
35 | "nullable": [
36 | false,
37 | true
38 | ]
39 | },
40 | "hash": "040f20db39c70255af32875141672da4cc97ae685f948cbd7798dd287b1dabfb"
41 | }
42 |
--------------------------------------------------------------------------------
/.sqlx/query-05abadcb1abc6591d8b59bffd7cfe7a40ce6e5d935d34bb377e204acef918ab1.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "update underway.task set state = $2 where id = $1",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | ]
24 | },
25 | "nullable": []
26 | },
27 | "hash": "05abadcb1abc6591d8b59bffd7cfe7a40ce6e5d935d34bb377e204acef918ab1"
28 | }
29 |
--------------------------------------------------------------------------------
/.sqlx/query-0aabd23ddb629e4b6177789c83104fc8bd6b8d0adf488e86da0d7cc8174bb8c5.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select id, state as \"state: TaskState\" from underway.task where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "state: TaskState",
14 | "type_info": {
15 | "Custom": {
16 | "name": "underway.task_state",
17 | "kind": {
18 | "Enum": [
19 | "pending",
20 | "in_progress",
21 | "succeeded",
22 | "cancelled",
23 | "failed"
24 | ]
25 | }
26 | }
27 | }
28 | }
29 | ],
30 | "parameters": {
31 | "Left": [
32 | "Uuid"
33 | ]
34 | },
35 | "nullable": [
36 | false,
37 | false
38 | ]
39 | },
40 | "hash": "0aabd23ddb629e4b6177789c83104fc8bd6b8d0adf488e86da0d7cc8174bb8c5"
41 | }
42 |
--------------------------------------------------------------------------------
/.sqlx/query-0ba3e34a0223cf00d4724fc2502f0565ac0ea1aad6b5bbbf92ea433d0bfe2fa8.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select last_heartbeat_at as \"last_heartbeat_at: i64\"\n from underway.task\n where id = $1\n and task_queue_name = $2\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "last_heartbeat_at: i64",
9 | "type_info": "Timestamptz"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid",
15 | "Text"
16 | ]
17 | },
18 | "nullable": [
19 | true
20 | ]
21 | },
22 | "hash": "0ba3e34a0223cf00d4724fc2502f0565ac0ea1aad6b5bbbf92ea433d0bfe2fa8"
23 | }
24 |
--------------------------------------------------------------------------------
/.sqlx/query-0d302d44aacf1e8ee3233b8c4814e7355fc4b57d98550cd43aec5c424ac4cdca.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\"\n from underway.task\n where input->>'job_id' = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | }
24 | ],
25 | "parameters": {
26 | "Left": [
27 | "Text"
28 | ]
29 | },
30 | "nullable": [
31 | false
32 | ]
33 | },
34 | "hash": "0d302d44aacf1e8ee3233b8c4814e7355fc4b57d98550cd43aec5c424ac4cdca"
35 | }
36 |
--------------------------------------------------------------------------------
/.sqlx/query-0dece29c8db5fc229c7e3ac6ef549526161e370c453d459ddc717ac8c263ec05.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task_attempt\n set state = $3,\n updated_at = now(),\n completed_at = now()\n where task_id = $1\n and task_queue_name = $2\n and attempt_number = (\n select attempt_number\n from underway.task_attempt\n where task_id = $1\n and task_queue_name = $2\n and state < $4\n order by attempt_number desc\n limit 1\n )\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | },
24 | {
25 | "Custom": {
26 | "name": "underway.task_state",
27 | "kind": {
28 | "Enum": [
29 | "pending",
30 | "in_progress",
31 | "succeeded",
32 | "cancelled",
33 | "failed"
34 | ]
35 | }
36 | }
37 | }
38 | ]
39 | },
40 | "nullable": []
41 | },
42 | "hash": "0dece29c8db5fc229c7e3ac6ef549526161e370c453d459ddc717ac8c263ec05"
43 | }
44 |
--------------------------------------------------------------------------------
/.sqlx/query-2584080f8ca262aa7526dde00a50ae125a6a8e52f4fc76d05d15f2c6f663d1cd.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select exists (\n select 1 from pg_namespace where nspname = 'underway'\n );\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "exists",
9 | "type_info": "Bool"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": []
14 | },
15 | "nullable": [
16 | null
17 | ]
18 | },
19 | "hash": "2584080f8ca262aa7526dde00a50ae125a6a8e52f4fc76d05d15f2c6f663d1cd"
20 | }
21 |
--------------------------------------------------------------------------------
/.sqlx/query-27475eb9bcb6ae1ebd88ba22d68d7536bb47555e1f5b7c22db8b25552619e6a8.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n insert into underway.task_schedule (\n task_queue_name,\n schedule,\n timezone,\n input\n ) values ($1, $2, $3, $4)\n on conflict (task_queue_name) do update\n set\n schedule = excluded.schedule,\n timezone = excluded.timezone,\n input = excluded.input\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Text",
9 | "Text",
10 | "Text",
11 | "Jsonb"
12 | ]
13 | },
14 | "nullable": []
15 | },
16 | "hash": "27475eb9bcb6ae1ebd88ba22d68d7536bb47555e1f5b7c22db8b25552619e6a8"
17 | }
18 |
--------------------------------------------------------------------------------
/.sqlx/query-2d6600e95898c5baf3e10780b8110fe0002e6c1056d44cb2534cc47f6e48876e.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n insert into underway.task (\n id,\n task_queue_name,\n input,\n timeout,\n heartbeat,\n ttl,\n delay,\n retry_policy,\n concurrency_key,\n priority\n )\n select t.id, $1 as task_queue_name, t.input, $2 as timeout, $3 as heartbeat, $4 as ttl, t.delay, $5 as retry_policy, $6 as concurrency_key, $7 as priority\n from unnest($8::uuid[], $9::jsonb[], $10::interval[]) as t(id, input, delay)\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Text",
9 | "Interval",
10 | "Interval",
11 | "Interval",
12 | {
13 | "Custom": {
14 | "name": "underway.task_retry_policy",
15 | "kind": {
16 | "Composite": [
17 | [
18 | "max_attempts",
19 | "Int4"
20 | ],
21 | [
22 | "initial_interval_ms",
23 | "Int4"
24 | ],
25 | [
26 | "max_interval_ms",
27 | "Int4"
28 | ],
29 | [
30 | "backoff_coefficient",
31 | "Float8"
32 | ]
33 | ]
34 | }
35 | }
36 | },
37 | "Text",
38 | "Int4",
39 | "UuidArray",
40 | "JsonbArray",
41 | "IntervalArray"
42 | ]
43 | },
44 | "nullable": []
45 | },
46 | "hash": "2d6600e95898c5baf3e10780b8110fe0002e6c1056d44cb2534cc47f6e48876e"
47 | }
48 |
--------------------------------------------------------------------------------
/.sqlx/query-3aabbeae862994ed4bf90f24d6c7c5992c022ab53683b6a43d86d4ffd294fc79.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select delay from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "delay",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "3aabbeae862994ed4bf90f24d6c7c5992c022ab53683b6a43d86d4ffd294fc79"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-43c87ad29a6badd238fdf187f8ac5eb350ddb14eb4e115919aed09b33c9a0a4b.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select timeout\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "timeout",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "43c87ad29a6badd238fdf187f8ac5eb350ddb14eb4e115919aed09b33c9a0a4b"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-451f277986a4d8543187e4266cca5bb8b55246a78583ea7230a264f972f4d0ca.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select retry_policy as \"retry_policy: RetryPolicy\"\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "retry_policy: RetryPolicy",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_retry_policy",
12 | "kind": {
13 | "Composite": [
14 | [
15 | "max_attempts",
16 | "Int4"
17 | ],
18 | [
19 | "initial_interval_ms",
20 | "Int4"
21 | ],
22 | [
23 | "max_interval_ms",
24 | "Int4"
25 | ],
26 | [
27 | "backoff_coefficient",
28 | "Float8"
29 | ]
30 | ]
31 | }
32 | }
33 | }
34 | }
35 | ],
36 | "parameters": {
37 | "Left": [
38 | "Uuid"
39 | ]
40 | },
41 | "nullable": [
42 | false
43 | ]
44 | },
45 | "hash": "451f277986a4d8543187e4266cca5bb8b55246a78583ea7230a264f972f4d0ca"
46 | }
47 |
--------------------------------------------------------------------------------
/.sqlx/query-45b1b27f9669db53892c4cfad7d09c0e325cfacbf1c37589300fb48e8d9eac49.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select count(*)\n from underway.task\n where state = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "count",
9 | "type_info": "Int8"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | {
15 | "Custom": {
16 | "name": "underway.task_state",
17 | "kind": {
18 | "Enum": [
19 | "pending",
20 | "in_progress",
21 | "succeeded",
22 | "cancelled",
23 | "failed"
24 | ]
25 | }
26 | }
27 | }
28 | ]
29 | },
30 | "nullable": [
31 | null
32 | ]
33 | },
34 | "hash": "45b1b27f9669db53892c4cfad7d09c0e325cfacbf1c37589300fb48e8d9eac49"
35 | }
36 |
--------------------------------------------------------------------------------
/.sqlx/query-4e9a5fc9e05d95f6bec8c7b0fc762e300e5c3843f2bd0613b36f8d8f9ad72e95.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select delay\n from underway.task\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "delay",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": []
14 | },
15 | "nullable": [
16 | false
17 | ]
18 | },
19 | "hash": "4e9a5fc9e05d95f6bec8c7b0fc762e300e5c3843f2bd0613b36f8d8f9ad72e95"
20 | }
21 |
--------------------------------------------------------------------------------
/.sqlx/query-54d124a54b2bb28f85b3ee9882f1e103d8e690ea0cb5189411834b9d8b246fc4.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "select pg_notify($1, $2)",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "pg_notify",
9 | "type_info": "Void"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Text",
15 | "Text"
16 | ]
17 | },
18 | "nullable": [
19 | null
20 | ]
21 | },
22 | "hash": "54d124a54b2bb28f85b3ee9882f1e103d8e690ea0cb5189411834b9d8b246fc4"
23 | }
24 |
--------------------------------------------------------------------------------
/.sqlx/query-56a0f6c70e41d1b89d0b16a4cbbb989aa3da718f76f4f4abf8794e538c4ed2b8.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task_attempt\n set state = $3\n where task_id = $1\n and task_queue_name = $2\n and state = $4\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | },
24 | {
25 | "Custom": {
26 | "name": "underway.task_state",
27 | "kind": {
28 | "Enum": [
29 | "pending",
30 | "in_progress",
31 | "succeeded",
32 | "cancelled",
33 | "failed"
34 | ]
35 | }
36 | }
37 | }
38 | ]
39 | },
40 | "nullable": []
41 | },
42 | "hash": "56a0f6c70e41d1b89d0b16a4cbbb989aa3da718f76f4f4abf8794e538c4ed2b8"
43 | }
44 |
--------------------------------------------------------------------------------
/.sqlx/query-56d62d8d964df1492d1e475e4e4602e811be0a02391b1acb4c687d11e9680795.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select priority\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "priority",
9 | "type_info": "Int4"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "56d62d8d964df1492d1e475e4e4602e811be0a02391b1acb4c687d11e9680795"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-5948af057d2002149c3864d46a332e133ca32d8d43a2891297424f0215ec42c1.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select id, input, retry_policy as \"retry_policy: RetryPolicy\", concurrency_key, priority\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "input",
14 | "type_info": "Jsonb"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "retry_policy: RetryPolicy",
19 | "type_info": {
20 | "Custom": {
21 | "name": "underway.task_retry_policy",
22 | "kind": {
23 | "Composite": [
24 | [
25 | "max_attempts",
26 | "Int4"
27 | ],
28 | [
29 | "initial_interval_ms",
30 | "Int4"
31 | ],
32 | [
33 | "max_interval_ms",
34 | "Int4"
35 | ],
36 | [
37 | "backoff_coefficient",
38 | "Float8"
39 | ]
40 | ]
41 | }
42 | }
43 | }
44 | },
45 | {
46 | "ordinal": 3,
47 | "name": "concurrency_key",
48 | "type_info": "Text"
49 | },
50 | {
51 | "ordinal": 4,
52 | "name": "priority",
53 | "type_info": "Int4"
54 | }
55 | ],
56 | "parameters": {
57 | "Left": [
58 | "Uuid"
59 | ]
60 | },
61 | "nullable": [
62 | false,
63 | false,
64 | false,
65 | true,
66 | false
67 | ]
68 | },
69 | "hash": "5948af057d2002149c3864d46a332e133ca32d8d43a2891297424f0215ec42c1"
70 | }
71 |
--------------------------------------------------------------------------------
/.sqlx/query-596a1dcc593b0dc55002a47cea8a2e6fa1328977f2a41d01ba092b0854853365.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set state = $3,\n delay = $2,\n updated_at = now()\n where id = $1\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Interval",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | }
24 | ]
25 | },
26 | "nullable": []
27 | },
28 | "hash": "596a1dcc593b0dc55002a47cea8a2e6fa1328977f2a41d01ba092b0854853365"
29 | }
30 |
--------------------------------------------------------------------------------
/.sqlx/query-59a613acb1d175d71071b177b3febfb4257711d186adf174b7616545837ca89d.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select task_id\n from underway.task_attempt\n where task_id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "task_id",
9 | "type_info": "Uuid"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "59a613acb1d175d71071b177b3febfb4257711d186adf174b7616545837ca89d"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-616e1ecc744bf9e743aaed13db27cc77cc74e7f9c164ca0f8dee9091fd955d83.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select count(*)::int as \"count!\"\n from underway.task_attempt\n where task_id = $1\n and task_queue_name = $2\n and state = $3\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "count!",
9 | "type_info": "Int4"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid",
15 | "Text",
16 | {
17 | "Custom": {
18 | "name": "underway.task_state",
19 | "kind": {
20 | "Enum": [
21 | "pending",
22 | "in_progress",
23 | "succeeded",
24 | "cancelled",
25 | "failed"
26 | ]
27 | }
28 | }
29 | }
30 | ]
31 | },
32 | "nullable": [
33 | null
34 | ]
35 | },
36 | "hash": "616e1ecc744bf9e743aaed13db27cc77cc74e7f9c164ca0f8dee9091fd955d83"
37 | }
38 |
--------------------------------------------------------------------------------
/.sqlx/query-63da920fcf11b0f6fc6cdb39e0dbcc61c3d70196c12097f6609e405ab8a4e38a.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n delete from underway.task\n where state != $1 and created_at + ttl < now()\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | {
9 | "Custom": {
10 | "name": "underway.task_state",
11 | "kind": {
12 | "Enum": [
13 | "pending",
14 | "in_progress",
15 | "succeeded",
16 | "cancelled",
17 | "failed"
18 | ]
19 | }
20 | }
21 | }
22 | ]
23 | },
24 | "nullable": []
25 | },
26 | "hash": "63da920fcf11b0f6fc6cdb39e0dbcc61c3d70196c12097f6609e405ab8a4e38a"
27 | }
28 |
--------------------------------------------------------------------------------
/.sqlx/query-6c2de8001f792e42a916f94bbed1894bc00261ccbf6ad59e4ac78a7b0404b953.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select schedule, timezone, input from underway.task_schedule where task_queue_name = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "schedule",
9 | "type_info": "Text"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "timezone",
14 | "type_info": "Text"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "input",
19 | "type_info": "Jsonb"
20 | }
21 | ],
22 | "parameters": {
23 | "Left": [
24 | "Text"
25 | ]
26 | },
27 | "nullable": [
28 | false,
29 | false,
30 | false
31 | ]
32 | },
33 | "hash": "6c2de8001f792e42a916f94bbed1894bc00261ccbf6ad59e4ac78a7b0404b953"
34 | }
35 |
--------------------------------------------------------------------------------
/.sqlx/query-7513f95eb46920c91e32cf7dac34155bbaac4ee4a5011c110df702eab4c8e03f.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select\n id as \"id: TaskId\",\n task_queue_name as \"queue_name\",\n input,\n retry_policy as \"retry_policy: RetryPolicy\",\n timeout,\n heartbeat,\n concurrency_key\n from underway.task\n where input->>'job_id' = $1\n and state = $2\n for update skip locked\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id: TaskId",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "queue_name",
14 | "type_info": "Text"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "input",
19 | "type_info": "Jsonb"
20 | },
21 | {
22 | "ordinal": 3,
23 | "name": "retry_policy: RetryPolicy",
24 | "type_info": {
25 | "Custom": {
26 | "name": "underway.task_retry_policy",
27 | "kind": {
28 | "Composite": [
29 | [
30 | "max_attempts",
31 | "Int4"
32 | ],
33 | [
34 | "initial_interval_ms",
35 | "Int4"
36 | ],
37 | [
38 | "max_interval_ms",
39 | "Int4"
40 | ],
41 | [
42 | "backoff_coefficient",
43 | "Float8"
44 | ]
45 | ]
46 | }
47 | }
48 | }
49 | },
50 | {
51 | "ordinal": 4,
52 | "name": "timeout",
53 | "type_info": "Interval"
54 | },
55 | {
56 | "ordinal": 5,
57 | "name": "heartbeat",
58 | "type_info": "Interval"
59 | },
60 | {
61 | "ordinal": 6,
62 | "name": "concurrency_key",
63 | "type_info": "Text"
64 | }
65 | ],
66 | "parameters": {
67 | "Left": [
68 | "Text",
69 | {
70 | "Custom": {
71 | "name": "underway.task_state",
72 | "kind": {
73 | "Enum": [
74 | "pending",
75 | "in_progress",
76 | "succeeded",
77 | "cancelled",
78 | "failed"
79 | ]
80 | }
81 | }
82 | }
83 | ]
84 | },
85 | "nullable": [
86 | false,
87 | false,
88 | false,
89 | false,
90 | false,
91 | false,
92 | true
93 | ]
94 | },
95 | "hash": "7513f95eb46920c91e32cf7dac34155bbaac4ee4a5011c110df702eab4c8e03f"
96 | }
97 |
--------------------------------------------------------------------------------
/.sqlx/query-75427f58e86684d74b46fedfb5a3612f29270fe025f6d8e4bf9a4445656af703.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select concurrency_key\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "concurrency_key",
9 | "type_info": "Text"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | true
19 | ]
20 | },
21 | "hash": "75427f58e86684d74b46fedfb5a3612f29270fe025f6d8e4bf9a4445656af703"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-85a73e45e72618023e0a722bb8a830dd13016316783200f3767114377a3bacfa.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select id, state as \"state: TaskState\"\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "state: TaskState",
14 | "type_info": {
15 | "Custom": {
16 | "name": "underway.task_state",
17 | "kind": {
18 | "Enum": [
19 | "pending",
20 | "in_progress",
21 | "succeeded",
22 | "cancelled",
23 | "failed"
24 | ]
25 | }
26 | }
27 | }
28 | }
29 | ],
30 | "parameters": {
31 | "Left": [
32 | "Uuid"
33 | ]
34 | },
35 | "nullable": [
36 | false,
37 | false
38 | ]
39 | },
40 | "hash": "85a73e45e72618023e0a722bb8a830dd13016316783200f3767114377a3bacfa"
41 | }
42 |
--------------------------------------------------------------------------------
/.sqlx/query-8893fcd21ac7b06e9287492e42e56a375b0339d0a19d023c53039e4adcdcef69.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select error_message from underway.task_attempt where task_id = $1 order by attempt_number desc\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "error_message",
9 | "type_info": "Text"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | true
19 | ]
20 | },
21 | "hash": "8893fcd21ac7b06e9287492e42e56a375b0339d0a19d023c53039e4adcdcef69"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-8a6beb878ad5d685281a3eb393e7ef4d30b78f14dafe23916ad08b28c4ccc858.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n delete from underway.task_schedule\n where task_queue_name = $1\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Text"
9 | ]
10 | },
11 | "nullable": []
12 | },
13 | "hash": "8a6beb878ad5d685281a3eb393e7ef4d30b78f14dafe23916ad08b28c4ccc858"
14 | }
15 |
--------------------------------------------------------------------------------
/.sqlx/query-8c5868495ee0e66b4b6869f74b06e1e3930ef6ab49a5255816564262943ae5a9.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set state = $2,\n updated_at = now(),\n completed_at = now()\n where id = $1 and state < $3\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | },
23 | {
24 | "Custom": {
25 | "name": "underway.task_state",
26 | "kind": {
27 | "Enum": [
28 | "pending",
29 | "in_progress",
30 | "succeeded",
31 | "cancelled",
32 | "failed"
33 | ]
34 | }
35 | }
36 | }
37 | ]
38 | },
39 | "nullable": []
40 | },
41 | "hash": "8c5868495ee0e66b4b6869f74b06e1e3930ef6ab49a5255816564262943ae5a9"
42 | }
43 |
--------------------------------------------------------------------------------
/.sqlx/query-8fc5df7b77183d211aefa0b3209429db4d663fd056e60734ddfd95cb172807a9.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "select state as \"state: TaskState\" from underway.task where id = $1",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | }
24 | ],
25 | "parameters": {
26 | "Left": [
27 | "Uuid"
28 | ]
29 | },
30 | "nullable": [
31 | false
32 | ]
33 | },
34 | "hash": "8fc5df7b77183d211aefa0b3209429db4d663fd056e60734ddfd95cb172807a9"
35 | }
36 |
--------------------------------------------------------------------------------
/.sqlx/query-9158f9abdc7c3f5c2ffaa876bf3871bbdd3fd31b61869e3ccd9e84aa2e058cfc.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n with next_attempt as (\n select coalesce(max(attempt_number) + 1, 1) as attempt_number\n from underway.task_attempt\n where task_id = $1\n and task_queue_name = $2\n )\n insert into underway.task_attempt (\n task_id,\n task_queue_name,\n state,\n attempt_number\n )\n values (\n $1,\n $2,\n $3,\n (select attempt_number from next_attempt)\n )\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | }
24 | ]
25 | },
26 | "nullable": []
27 | },
28 | "hash": "9158f9abdc7c3f5c2ffaa876bf3871bbdd3fd31b61869e3ccd9e84aa2e058cfc"
29 | }
30 |
--------------------------------------------------------------------------------
/.sqlx/query-9639647f76c40eedcd0649cc5dc0f22809203a0ac8bb32802c7f4db73215a805.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select schedule, timezone, input from underway.task_schedule where task_queue_name = $1\n limit 1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "schedule",
9 | "type_info": "Text"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "timezone",
14 | "type_info": "Text"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "input",
19 | "type_info": "Jsonb"
20 | }
21 | ],
22 | "parameters": {
23 | "Left": [
24 | "Text"
25 | ]
26 | },
27 | "nullable": [
28 | false,
29 | false,
30 | false
31 | ]
32 | },
33 | "hash": "9639647f76c40eedcd0649cc5dc0f22809203a0ac8bb32802c7f4db73215a805"
34 | }
35 |
--------------------------------------------------------------------------------
/.sqlx/query-96a0895113d5e00a565fe1ad6e9eb1205b4d36e5eef7e5b096fc707ac107c42e.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set task_queue_name = $2\n where id = $1\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text"
10 | ]
11 | },
12 | "nullable": []
13 | },
14 | "hash": "96a0895113d5e00a565fe1ad6e9eb1205b4d36e5eef7e5b096fc707ac107c42e"
15 | }
16 |
--------------------------------------------------------------------------------
/.sqlx/query-9731fc6ba31c2cc1bd00b8e177c44ca85285a2606f9485a9f38a3585ce48b00b.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set updated_at = now(),\n last_heartbeat_at = now()\n where id = $1\n and task_queue_name = $2\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text"
10 | ]
11 | },
12 | "nullable": []
13 | },
14 | "hash": "9731fc6ba31c2cc1bd00b8e177c44ca85285a2606f9485a9f38a3585ce48b00b"
15 | }
16 |
--------------------------------------------------------------------------------
/.sqlx/query-98aa8d56e8fb6d28480105370b0b6a6d3e68e0e64bdeeed97a04e7597aa43853.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "select pg_try_advisory_xact_lock(hashtext($1))",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "pg_try_advisory_xact_lock",
9 | "type_info": "Bool"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Text"
15 | ]
16 | },
17 | "nullable": [
18 | null
19 | ]
20 | },
21 | "hash": "98aa8d56e8fb6d28480105370b0b6a6d3e68e0e64bdeeed97a04e7597aa43853"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-98e88f5fc48233b957f267d42a4ea13343d22cfb096ff366d1996cd7894743e8.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task_attempt\n set state = $3,\n updated_at = now(),\n error_message = $4\n where task_id = $1\n and task_queue_name = $2\n and attempt_number = (\n select attempt_number\n from underway.task_attempt\n where task_id = $1\n and task_queue_name = $2\n order by attempt_number desc\n limit 1\n )\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | },
24 | "Text"
25 | ]
26 | },
27 | "nullable": []
28 | },
29 | "hash": "98e88f5fc48233b957f267d42a4ea13343d22cfb096ff366d1996cd7894743e8"
30 | }
31 |
--------------------------------------------------------------------------------
/.sqlx/query-98f5535ef035447135c267ffb61e3ee1eb5c42a9c3038ccbe7b93c92d393c54f.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set state = $2,\n updated_at = now(),\n completed_at = now()\n where id = $1\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | ]
24 | },
25 | "nullable": []
26 | },
27 | "hash": "98f5535ef035447135c267ffb61e3ee1eb5c42a9c3038ccbe7b93c92d393c54f"
28 | }
29 |
--------------------------------------------------------------------------------
/.sqlx/query-99f4d5cc8a695112026471d432155221f79ac59531b095076e8867983910571d.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select delay\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "delay",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "99f4d5cc8a695112026471d432155221f79ac59531b095076e8867983910571d"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-a288070a7d1f34fd78d382dc3757557b3f674dd9c46067ed80232cbfbbd56a98.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\"\n from underway.task_attempt\n where task_id = $1 and state = $2\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | }
24 | ],
25 | "parameters": {
26 | "Left": [
27 | "Uuid",
28 | {
29 | "Custom": {
30 | "name": "underway.task_state",
31 | "kind": {
32 | "Enum": [
33 | "pending",
34 | "in_progress",
35 | "succeeded",
36 | "cancelled",
37 | "failed"
38 | ]
39 | }
40 | }
41 | }
42 | ]
43 | },
44 | "nullable": [
45 | false
46 | ]
47 | },
48 | "hash": "a288070a7d1f34fd78d382dc3757557b3f674dd9c46067ed80232cbfbbd56a98"
49 | }
50 |
--------------------------------------------------------------------------------
/.sqlx/query-a2e1464d5db4e210aec86ed6c23399eee7ecb84c9b9d4a08e9b3ce1c1b57ec17.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\", delay from underway.task where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | },
24 | {
25 | "ordinal": 1,
26 | "name": "delay",
27 | "type_info": "Interval"
28 | }
29 | ],
30 | "parameters": {
31 | "Left": [
32 | "Uuid"
33 | ]
34 | },
35 | "nullable": [
36 | false,
37 | false
38 | ]
39 | },
40 | "hash": "a2e1464d5db4e210aec86ed6c23399eee7ecb84c9b9d4a08e9b3ce1c1b57ec17"
41 | }
42 |
--------------------------------------------------------------------------------
/.sqlx/query-a55bfd24f1ca171d27491593b8e96d78ee50e376d08e1a7a873048d7468638d8.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\" from underway.task where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | }
24 | ],
25 | "parameters": {
26 | "Left": [
27 | "Uuid"
28 | ]
29 | },
30 | "nullable": [
31 | false
32 | ]
33 | },
34 | "hash": "a55bfd24f1ca171d27491593b8e96d78ee50e376d08e1a7a873048d7468638d8"
35 | }
36 |
--------------------------------------------------------------------------------
/.sqlx/query-a852697a2fe50aa392a54721146ef8dbc6e26843b6e60a737f4d927f34bac1e7.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select schedule\n from underway.task_schedule\n where task_queue_name = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "schedule",
9 | "type_info": "Text"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Text"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "a852697a2fe50aa392a54721146ef8dbc6e26843b6e60a737f4d927f34bac1e7"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-acbcc67ec85d0cf1c80679eb9bf3af6397f5c0ead67a4b1fedfad9f3e1876dd2.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select ttl\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "ttl",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "acbcc67ec85d0cf1c80679eb9bf3af6397f5c0ead67a4b1fedfad9f3e1876dd2"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-b1653fb7a7b49fea06607acc18b775fabc5bb87d0f3eee92b5fc57dc96d2567f.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select\n task_id,\n state as \"state: TaskState\",\n completed_at as \"completed_at: i64\"\n from underway.task_attempt\n where task_id = $1\n order by started_at\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "task_id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "state: TaskState",
14 | "type_info": {
15 | "Custom": {
16 | "name": "underway.task_state",
17 | "kind": {
18 | "Enum": [
19 | "pending",
20 | "in_progress",
21 | "succeeded",
22 | "cancelled",
23 | "failed"
24 | ]
25 | }
26 | }
27 | }
28 | },
29 | {
30 | "ordinal": 2,
31 | "name": "completed_at: i64",
32 | "type_info": "Timestamptz"
33 | }
34 | ],
35 | "parameters": {
36 | "Left": [
37 | "Uuid"
38 | ]
39 | },
40 | "nullable": [
41 | false,
42 | false,
43 | true
44 | ]
45 | },
46 | "hash": "b1653fb7a7b49fea06607acc18b775fabc5bb87d0f3eee92b5fc57dc96d2567f"
47 | }
48 |
--------------------------------------------------------------------------------
/.sqlx/query-b211278db4a4733dc2a16dfa56ea390053421a45c8254c3cac118f704810f74f.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set updated_at = now()\n where id = $1\n and task_queue_name = $2\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text"
10 | ]
11 | },
12 | "nullable": []
13 | },
14 | "hash": "b211278db4a4733dc2a16dfa56ea390053421a45c8254c3cac118f704810f74f"
15 | }
16 |
--------------------------------------------------------------------------------
/.sqlx/query-be9089120a2682a52ac8913188f45b904e30185b2afc715ccc7215c150bc756c.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select heartbeat\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "heartbeat",
9 | "type_info": "Interval"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | false
19 | ]
20 | },
21 | "hash": "be9089120a2682a52ac8913188f45b904e30185b2afc715ccc7215c150bc756c"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-c5eed52879c244c536b8b8ad61990e83a881ca89582f53c5edc7f3541c9dc4f6.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task\n set last_heartbeat_at = now() - interval '30 seconds'\n where id = $1\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid"
9 | ]
10 | },
11 | "nullable": []
12 | },
13 | "hash": "c5eed52879c244c536b8b8ad61990e83a881ca89582f53c5edc7f3541c9dc4f6"
14 | }
15 |
--------------------------------------------------------------------------------
/.sqlx/query-c82c340f44c57064db3d7ad5742422ced1f6e9628f2f78304f1b1a585e04eaeb.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select schedule, timezone, input\n from underway.task_schedule\n where task_queue_name = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "schedule",
9 | "type_info": "Text"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "timezone",
14 | "type_info": "Text"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "input",
19 | "type_info": "Jsonb"
20 | }
21 | ],
22 | "parameters": {
23 | "Left": [
24 | "Text"
25 | ]
26 | },
27 | "nullable": [
28 | false,
29 | false,
30 | false
31 | ]
32 | },
33 | "hash": "c82c340f44c57064db3d7ad5742422ced1f6e9628f2f78304f1b1a585e04eaeb"
34 | }
35 |
--------------------------------------------------------------------------------
/.sqlx/query-ccf314910c001d4933326838f507380d36a50d63778ed5589d7ffe0fe9f60db3.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "set local search_path to underway;",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": []
8 | },
9 | "nullable": []
10 | },
11 | "hash": "ccf314910c001d4933326838f507380d36a50d63778ed5589d7ffe0fe9f60db3"
12 | }
13 |
--------------------------------------------------------------------------------
/.sqlx/query-d1e63e3a8d4d9be55cb5665760152c590e19fba33ccfde199808971b62e9557a.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n with available_task as (\n select id\n from underway.task\n where task_queue_name = $1\n and (\n -- Find pending tasks...\n state = $2\n -- ...Or look for stalled tasks.\n or (\n state = $3\n -- Has heartbeat stalled?\n and last_heartbeat_at < now() - heartbeat\n -- Are there remaining retries?\n and (retry_policy).max_attempts > (\n select count(*)\n from underway.task_attempt\n where task_queue_name = $1\n and task_id = id\n )\n )\n )\n and created_at + delay <= now()\n order by\n priority desc,\n created_at,\n id\n limit 1\n for update skip locked\n )\n update underway.task t\n set state = $3,\n last_attempt_at = now(),\n last_heartbeat_at = now()\n from available_task\n where t.task_queue_name = $1\n and t.id = available_task.id\n returning\n t.id as \"id: TaskId\",\n t.task_queue_name as \"queue_name\",\n t.input,\n t.timeout,\n t.heartbeat,\n t.retry_policy as \"retry_policy: RetryPolicy\",\n t.concurrency_key\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id: TaskId",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "queue_name",
14 | "type_info": "Text"
15 | },
16 | {
17 | "ordinal": 2,
18 | "name": "input",
19 | "type_info": "Jsonb"
20 | },
21 | {
22 | "ordinal": 3,
23 | "name": "timeout",
24 | "type_info": "Interval"
25 | },
26 | {
27 | "ordinal": 4,
28 | "name": "heartbeat",
29 | "type_info": "Interval"
30 | },
31 | {
32 | "ordinal": 5,
33 | "name": "retry_policy: RetryPolicy",
34 | "type_info": {
35 | "Custom": {
36 | "name": "underway.task_retry_policy",
37 | "kind": {
38 | "Composite": [
39 | [
40 | "max_attempts",
41 | "Int4"
42 | ],
43 | [
44 | "initial_interval_ms",
45 | "Int4"
46 | ],
47 | [
48 | "max_interval_ms",
49 | "Int4"
50 | ],
51 | [
52 | "backoff_coefficient",
53 | "Float8"
54 | ]
55 | ]
56 | }
57 | }
58 | }
59 | },
60 | {
61 | "ordinal": 6,
62 | "name": "concurrency_key",
63 | "type_info": "Text"
64 | }
65 | ],
66 | "parameters": {
67 | "Left": [
68 | "Text",
69 | {
70 | "Custom": {
71 | "name": "underway.task_state",
72 | "kind": {
73 | "Enum": [
74 | "pending",
75 | "in_progress",
76 | "succeeded",
77 | "cancelled",
78 | "failed"
79 | ]
80 | }
81 | }
82 | },
83 | {
84 | "Custom": {
85 | "name": "underway.task_state",
86 | "kind": {
87 | "Enum": [
88 | "pending",
89 | "in_progress",
90 | "succeeded",
91 | "cancelled",
92 | "failed"
93 | ]
94 | }
95 | }
96 | }
97 | ]
98 | },
99 | "nullable": [
100 | false,
101 | false,
102 | false,
103 | false,
104 | false,
105 | false,
106 | true
107 | ]
108 | },
109 | "hash": "d1e63e3a8d4d9be55cb5665760152c590e19fba33ccfde199808971b62e9557a"
110 | }
111 |
--------------------------------------------------------------------------------
/.sqlx/query-dae0ef841b214a8f0d3981f27aa490d09dd5db2962112316ae892a59fc7f152c.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n insert into underway.task (\n id,\n task_queue_name,\n input,\n timeout,\n heartbeat,\n ttl,\n delay,\n retry_policy,\n concurrency_key,\n priority\n ) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | "Jsonb",
11 | "Interval",
12 | "Interval",
13 | "Interval",
14 | "Interval",
15 | {
16 | "Custom": {
17 | "name": "underway.task_retry_policy",
18 | "kind": {
19 | "Composite": [
20 | [
21 | "max_attempts",
22 | "Int4"
23 | ],
24 | [
25 | "initial_interval_ms",
26 | "Int4"
27 | ],
28 | [
29 | "max_interval_ms",
30 | "Int4"
31 | ],
32 | [
33 | "backoff_coefficient",
34 | "Float8"
35 | ]
36 | ]
37 | }
38 | }
39 | },
40 | "Text",
41 | "Int4"
42 | ]
43 | },
44 | "nullable": []
45 | },
46 | "hash": "dae0ef841b214a8f0d3981f27aa490d09dd5db2962112316ae892a59fc7f152c"
47 | }
48 |
--------------------------------------------------------------------------------
/.sqlx/query-dbcd4dd5bde04b26279a795888276f9369c1322b5ead7cf2f31a3a869afa8e02.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select last_heartbeat_at as \"last_heartbeat_at: i64\"\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "last_heartbeat_at: i64",
9 | "type_info": "Timestamptz"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": [
14 | "Uuid"
15 | ]
16 | },
17 | "nullable": [
18 | true
19 | ]
20 | },
21 | "hash": "dbcd4dd5bde04b26279a795888276f9369c1322b5ead7cf2f31a3a869afa8e02"
22 | }
23 |
--------------------------------------------------------------------------------
/.sqlx/query-de50515666fa9da0b24b0d8db50bd1b03e099ea946c151d821520d1687ad6881.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n update underway.task_attempt\n set state = $3,\n updated_at = now(),\n completed_at = now()\n where task_id = $1\n and task_queue_name = $2\n and attempt_number = (\n select attempt_number\n from underway.task_attempt\n where task_id = $1\n and task_queue_name = $2\n order by attempt_number desc\n limit 1\n )\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Uuid",
9 | "Text",
10 | {
11 | "Custom": {
12 | "name": "underway.task_state",
13 | "kind": {
14 | "Enum": [
15 | "pending",
16 | "in_progress",
17 | "succeeded",
18 | "cancelled",
19 | "failed"
20 | ]
21 | }
22 | }
23 | }
24 | ]
25 | },
26 | "nullable": []
27 | },
28 | "hash": "de50515666fa9da0b24b0d8db50bd1b03e099ea946c151d821520d1687ad6881"
29 | }
30 |
--------------------------------------------------------------------------------
/.sqlx/query-df3288d47a491690460244087d5c0ad37fef221b28efb666cd2bbdcdd849f5b2.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select id, task_queue_name from underway.task where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "id",
9 | "type_info": "Uuid"
10 | },
11 | {
12 | "ordinal": 1,
13 | "name": "task_queue_name",
14 | "type_info": "Text"
15 | }
16 | ],
17 | "parameters": {
18 | "Left": [
19 | "Uuid"
20 | ]
21 | },
22 | "nullable": [
23 | false,
24 | false
25 | ]
26 | },
27 | "hash": "df3288d47a491690460244087d5c0ad37fef221b28efb666cd2bbdcdd849f5b2"
28 | }
29 |
--------------------------------------------------------------------------------
/.sqlx/query-e572c3a03a1e78e45550b3d54085cbc4b226df7f0ac392dba81ec76b8e11f26b.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "select state as \"state: TaskState\", completed_at as \"completed_at: i64\"\n from underway.task where id = $1",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | },
24 | {
25 | "ordinal": 1,
26 | "name": "completed_at: i64",
27 | "type_info": "Timestamptz"
28 | }
29 | ],
30 | "parameters": {
31 | "Left": [
32 | "Uuid"
33 | ]
34 | },
35 | "nullable": [
36 | false,
37 | true
38 | ]
39 | },
40 | "hash": "e572c3a03a1e78e45550b3d54085cbc4b226df7f0ac392dba81ec76b8e11f26b"
41 | }
42 |
--------------------------------------------------------------------------------
/.sqlx/query-f069227d261bdec30ade8d0553bffc2796c3f4914f773ef569526feb0c7062ea.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select exists (\n select 1 from information_schema.tables\n where table_schema = 'underway' and \n table_name = '_sqlx_migrations'\n );\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "exists",
9 | "type_info": "Bool"
10 | }
11 | ],
12 | "parameters": {
13 | "Left": []
14 | },
15 | "nullable": [
16 | null
17 | ]
18 | },
19 | "hash": "f069227d261bdec30ade8d0553bffc2796c3f4914f773ef569526feb0c7062ea"
20 | }
21 |
--------------------------------------------------------------------------------
/.sqlx/query-f2bb618b6cad214f8b136bab65a6b56e9e6d909654d2fe7eeb3190a3924fc204.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n select state as \"state: TaskState\"\n from underway.task\n where id = $1\n ",
4 | "describe": {
5 | "columns": [
6 | {
7 | "ordinal": 0,
8 | "name": "state: TaskState",
9 | "type_info": {
10 | "Custom": {
11 | "name": "underway.task_state",
12 | "kind": {
13 | "Enum": [
14 | "pending",
15 | "in_progress",
16 | "succeeded",
17 | "cancelled",
18 | "failed"
19 | ]
20 | }
21 | }
22 | }
23 | }
24 | ],
25 | "parameters": {
26 | "Left": [
27 | "Uuid"
28 | ]
29 | },
30 | "nullable": [
31 | false
32 | ]
33 | },
34 | "hash": "f2bb618b6cad214f8b136bab65a6b56e9e6d909654d2fe7eeb3190a3924fc204"
35 | }
36 |
--------------------------------------------------------------------------------
/.sqlx/query-f3388664b9d07ca787a450e25b7559c92d8b2ed917ff8313ab75fa411f23ffe2.json:
--------------------------------------------------------------------------------
1 | {
2 | "db_name": "PostgreSQL",
3 | "query": "\n insert into underway.task_queue (name) values ($1)\n on conflict do nothing\n ",
4 | "describe": {
5 | "columns": [],
6 | "parameters": {
7 | "Left": [
8 | "Text"
9 | ]
10 | },
11 | "nullable": []
12 | },
13 | "hash": "f3388664b9d07ca787a450e25b7559c92d8b2ed917ff8313ab75fa411f23ffe2"
14 | }
15 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Unreleased
2 |
3 |
4 | # 0.1.2
5 |
6 | - Provide additional worker and scheduler tracing instrumentation
7 |
8 | # 0.1.1
9 |
10 | - Fix: Job cancellation should lock rows to be cancelled #67
11 |
12 | # 0.1.0
13 |
14 | - Breaking: Worker and scheduler setters have been renamed #42
15 | - Breaking: Migrations have been reworked to compensate for features that will land in `sqlx` 0.9.0 #44
16 | - Breaking: Job enqueues now return an `EnqueuedJob` type #46
17 | - Breaking: Task ID is now a newtype #47
18 | - Breaking: Task dequeues are now encapsulated such that they are visible as they're being processed #55, #59, #60, #64, #65
19 | - Breaking: Task schedule "name" column renamed to "task_queue_name" for consistency #58
20 | - Breaking: Scheduler iterator has been refactored to use `Zoned` directly #62
21 | - Task attempts are now recorded in a separate table, providing a complete log of task execution history #50
22 |
23 | # 0.0.6
24 |
25 | - Breaking: Queue methods now take input by reference
26 | - Breaking: Job methods also take input by reference
27 | - Breaking: The scheduler `run_every` method is removed
28 | - Pending tasks are now processed upon change via a notify channel #25
29 | - An `unschedule` method is provided on `Queue` and `Job` #40
30 | - Graceful shutdown is now available on `JobHandle` returned from `start` #37
31 |
32 | # 0.0.5
33 |
34 | - Breaking: Tasks require an associated type Output
35 | - Breaking: Tasks require a transaction as their first execute argument
36 | - Breaking: Database locking methods are now free functions
37 | - Breaking: Job interface rewritten for step functions #24
38 |
39 | # 0.0.4
40 |
41 | - Breaking: Renamed builders to `Builder` #15
42 | - Breaking: Made task deletion routine a free function #13
43 | - Breaking: `Job::run` now runs both the worker and scheduler #12
44 | - Ensure scheduler singleton behavior
45 |
46 | # 0.0.3
47 |
48 | - Added `ToTaskResult` trait for better task result ergonomics #10
49 |
50 | # 0.0.2
51 |
52 | - Jobs may provide state #9
53 | - Breaking: `queue` must now be defined after `execute`
54 | - Workers may be gracefully shutdown via `graceful_shutdown` #8
55 | - Jobs and queue are provided `enqueue_after` #7
56 |
57 | # 0.0.1
58 |
59 | - Pre-release: baseline feature completion
60 |
61 | # 0.0.0
62 |
63 | - Pre-release :tada:
64 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "underway"
3 | description = "⏳ Durable step functions via Postgres"
4 | version = "0.1.2"
5 | edition = "2021"
6 | license = "MIT OR Apache-2.0"
7 | keywords = ["background-jobs", "job-queue", "work-queue", "sqlx", "web"]
8 | categories = ["asynchronous", "database", "web-programming"]
9 | repository = "https://github.com/maxcountryman/underway"
10 | documentation = "https://docs.rs/underway"
11 |
12 | [dependencies]
13 | jiff-cron = "0.1.0"
14 | jiff = { version = "0.1.13", features = ["serde"] }
15 | serde = "1.0.210"
16 | serde_json = "1.0.128"
17 | sqlx = { version = "0.8.2", features = [
18 | "postgres",
19 | "runtime-tokio-rustls",
20 | "uuid",
21 | "json",
22 | ] }
23 | thiserror = "2.0.0"
24 | tokio = { version = "1.40.0", features = [
25 | "full",
26 | ] } # TODO: "full" shouldn't be required
27 | tracing = { version = "0.1.40", features = ["log"] }
28 | ulid = { version = "1.1.3", features = ["uuid"] }
29 | uuid = { version = "1.10.0", features = ["v4", "serde"] }
30 | num_cpus = "1.16.0"
31 | tokio-util = "0.7.12"
32 |
33 | [dev-dependencies]
34 | futures = "0.3.30"
35 | tokio = { version = "1.40.0", features = ["test-util"] }
36 |
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Max Countryman
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | underway
3 |
4 |
5 |
6 | ⏳ Durable step functions via Postgres.
7 |
8 |
9 |
20 |
21 | ## 🎨 Overview
22 |
23 | **Underway** provides durable background jobs over Postgres. Jobs are composed of a sequence of one or more steps. Each step takes the output of the previous step as its input. These simple workflows provide a powerful interface to common deferred work use cases.
24 |
25 | Key Features:
26 |
27 | - **PostgreSQL-Backed** Leverages PostgreSQL with `FOR UPDATE SKIP LOCKED`
28 | for reliable task storage and coordination.
29 | - **Atomic Task Management** Enqueue tasks within your transactions and use
30 | the worker's transaction within your tasks for atomic queries.
31 | - **Automatic Retries** Configurable retry strategies ensure tasks are
32 | reliably completed, even after transient failures.
33 | - **Cron-Like Scheduling** Schedule recurring tasks with cron-like
34 | expressions for automated, time-based job execution.
35 | - **Scalable and Flexible** Easily scales from a single worker to many,
36 | enabling seamless background job processing with minimal setup.
37 |
38 | ## 🤸 Usage
39 |
40 | Underway is suitable for many different use cases, ranging from simple
41 | single-step jobs to more sophisticated multi-step jobs, where dependencies
42 | are built up between steps.
43 |
44 | ## Welcome emails
45 |
46 | A common use case is deferring work that can be processed later. For
47 | instance, during user registration, we might want to send a welcome email to
48 | new users. Rather than handling this within the registration process (e.g.,
49 | form validation, database insertion), we can offload it to run "out-of-band"
50 | using Underway. By defining a job for sending the welcome email, Underway
51 | ensures it gets processed in the background, without slowing down the user
52 | registration flow.
53 |
54 | ```rust
55 | use std::env;
56 |
57 | use serde::{Deserialize, Serialize};
58 | use sqlx::PgPool;
59 | use underway::{Job, To};
60 |
61 | // This is the input we'll provide to the job when we enqueue it.
62 | #[derive(Deserialize, Serialize)]
63 | struct WelcomeEmail {
64 | user_id: i32,
65 | email: String,
66 | name: String,
67 | }
68 |
69 | #[tokio::main]
70 | async fn main() -> Result<(), Box> {
71 | // Set up the database connection pool.
72 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
73 | let pool = PgPool::connect(database_url).await?;
74 |
75 | // Run migrations.
76 | underway::run_migrations(&pool).await?;
77 |
78 | // Build the job.
79 | let job = Job::builder()
80 | .step(
81 | |_cx,
82 | WelcomeEmail {
83 | user_id,
84 | email,
85 | name,
86 | }| async move {
87 | // Simulate sending an email.
88 | println!("Sending welcome email to {name} <{email}> (user_id: {user_id})");
89 | // Returning this indicates this is the final step.
90 | To::done()
91 | },
92 | )
93 | .name("welcome-email")
94 | .pool(pool)
95 | .build()
96 | .await?;
97 |
98 | // Here we enqueue a new job to be processed later.
99 | job.enqueue(&WelcomeEmail {
100 | user_id: 42,
101 | email: "ferris@example.com".to_string(),
102 | name: "Ferris".to_string(),
103 | })
104 | .await?;
105 |
106 | // Start processing enqueued jobs.
107 | job.start().await??;
108 |
109 | Ok(())
110 | }
111 | ```
112 |
113 | ## Order receipts
114 |
115 | Another common use case is defining dependencies between discrete steps of a
116 | job. For instance, we might generate PDF receipts for orders and then email
117 | these to customers. With Underway, each step is handled separately, making
118 | it easy to create a job that first generates the PDF and, once
119 | completed, proceeds to send the email.
120 |
121 | This separation provides significant value: if the email sending service
122 | is temporarily unavailable, we can retry the email step without having to
123 | regenerate the PDF, avoiding unnecessary repeated work.
124 |
125 | ```rust
126 | use std::env;
127 |
128 | use serde::{Deserialize, Serialize};
129 | use sqlx::PgPool;
130 | use underway::{Job, To};
131 |
132 | #[derive(Deserialize, Serialize)]
133 | struct GenerateReceipt {
134 | // An order we want to generate a receipt for.
135 | order_id: i32,
136 | }
137 |
138 | #[derive(Deserialize, Serialize)]
139 | struct EmailReceipt {
140 | // An object store key to our receipt PDF.
141 | receipt_key: String,
142 | }
143 |
144 | #[tokio::main]
145 | async fn main() -> Result<(), Box> {
146 | // Set up the database connection pool.
147 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
148 | let pool = PgPool::connect(database_url).await?;
149 |
150 | // Run migrations.
151 | underway::run_migrations(&pool).await?;
152 |
153 | // Build the job.
154 | let job = Job::builder()
155 | .step(|_cx, GenerateReceipt { order_id }| async move {
156 | // Use the order ID to build a receipt PDF...
157 | let receipt_key = format!("receipts_bucket/{order_id}-receipt.pdf");
158 | // ...store the PDF in an object store.
159 |
160 | // We proceed to the next step with the receipt_key as its input.
161 | To::next(EmailReceipt { receipt_key })
162 | })
163 | .step(|_cx, EmailReceipt { receipt_key }| async move {
164 | // Retrieve the PDF from the object store, and send the email.
165 | println!("Emailing receipt for {receipt_key}");
166 | To::done()
167 | })
168 | .name("order-receipt")
169 | .pool(pool)
170 | .build()
171 | .await?;
172 |
173 | // Enqueue the job for the given order.
174 | job.enqueue(&GenerateReceipt { order_id: 42 }).await?;
175 |
176 | // Start processing enqueued jobs.
177 | job.start().await??;
178 |
179 | Ok(())
180 | }
181 | ```
182 |
183 | With this setup, if the email service is down, the `EmailReceipt` step can
184 | be retried without redoing the PDF generation, saving time and resources by
185 | not repeating the expensive step of generating the PDF.
186 |
187 | ## Daily reports
188 |
189 | Jobs may also be run on a schedule. This makes them useful for situations
190 | where we want to do things on a regular cadence, such as creating a daily
191 | business report.
192 |
193 | ```rust
194 | use std::env;
195 |
196 | use serde::{Deserialize, Serialize};
197 | use sqlx::PgPool;
198 | use underway::{Job, To};
199 |
200 | #[derive(Deserialize, Serialize)]
201 | struct DailyReport;
202 |
203 | #[tokio::main]
204 | async fn main() -> Result<(), Box> {
205 | // Set up the database connection pool.
206 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
207 | let pool = PgPool::connect(database_url).await?;
208 |
209 | // Run migrations.
210 | underway::run_migrations(&pool).await?;
211 |
212 | // Build the job.
213 | let job = Job::builder()
214 | .step(|_cx, _| async move {
215 | // Here we would generate and store the report.
216 | To::done()
217 | })
218 | .name("daily-report")
219 | .pool(pool)
220 | .build()
221 | .await?;
222 |
223 | // Set a daily schedule with the given input.
224 | let daily = "@daily[America/Los_Angeles]".parse()?;
225 | job.schedule(&daily, &DailyReport).await?;
226 |
227 | // Start processing enqueued jobs.
228 | job.start().await??;
229 |
230 | Ok(())
231 | }
232 | ```
233 |
234 | ## 🛟 Getting Help
235 |
236 | We've put together a number of [examples][examples] to help get you started. You're also welcome to [open a discussion](https://github.com/maxcountryman/underway/discussions/new?category=q-a) and ask additional questions you might have.
237 |
238 | ## 👯 Contributing
239 |
240 | We appreciate all kinds of contributions, thank you!
241 |
242 | [examples]: https://github.com/maxcountryman/underway/tree/main/examples
243 | [docs]: https://docs.rs/underway
244 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | postgres:
5 | image: postgres:latest
6 | environment:
7 | POSTGRES_USER: postgres
8 | POSTGRES_PASSWORD: postgres
9 | POSTGRES_DB: underway
10 | ports:
11 | - "5432:5432"
12 |
--------------------------------------------------------------------------------
/examples/basic/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-basic"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | serde = { version = "1.0.210", features = ["derive"] }
9 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
10 | tokio = { version = "1.34.0", features = ["full"] }
11 | underway = { path = "../../" }
12 | uuid = { version = "1.10.0", features = ["serde", "v4"] }
13 |
--------------------------------------------------------------------------------
/examples/basic/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use serde::{Deserialize, Serialize};
4 | use sqlx::PgPool;
5 | use underway::{Job, To};
6 |
7 | const QUEUE_NAME: &str = "example-basic";
8 |
9 | #[derive(Clone, Deserialize, Serialize)]
10 | struct WelcomeEmail {
11 | user_id: i32,
12 | email: String,
13 | name: String,
14 | }
15 |
16 | #[tokio::main]
17 | async fn main() -> Result<(), Box> {
18 | // Set up the database connection pool.
19 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
20 | let pool = PgPool::connect(database_url).await?;
21 |
22 | // Run migrations.
23 | underway::run_migrations(&pool).await?;
24 |
25 | // Build the job.
26 | let job = Job::builder()
27 | .step(
28 | |_ctx,
29 | WelcomeEmail {
30 | user_id,
31 | email,
32 | name,
33 | }| async move {
34 | // Simulate sending an email.
35 | println!("Sending welcome email to {name} <{email}> (user_id: {user_id})");
36 | To::done()
37 | },
38 | )
39 | .name(QUEUE_NAME)
40 | .pool(pool)
41 | .build()
42 | .await?;
43 |
44 | // Enqueue a job task.
45 | job.enqueue(&WelcomeEmail {
46 | user_id: 42,
47 | email: "ferris@example.com".to_string(),
48 | name: "Ferris".to_string(),
49 | })
50 | .await?;
51 |
52 | // Start the worker to process tasks.
53 | job.run().await?;
54 |
55 | Ok(())
56 | }
57 |
--------------------------------------------------------------------------------
/examples/graceful_shutdown/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-graceful-shutdown"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
9 | tokio = { version = "1.34.0", features = ["full"] }
10 | tracing = "0.1.40"
11 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
12 | underway = { path = "../../" }
13 |
--------------------------------------------------------------------------------
/examples/graceful_shutdown/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use sqlx::{postgres::PgPoolOptions, PgPool};
4 | use tokio::{signal, task::JoinSet};
5 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
6 | use underway::{Job, To};
7 |
8 | const QUEUE_NAME: &str = "graceful-shutdown";
9 |
10 | async fn shutdown_signal(pool: &PgPool) {
11 | let ctrl_c = async {
12 | signal::ctrl_c().await.unwrap();
13 | };
14 |
15 | #[cfg(unix)]
16 | let terminate = async {
17 | signal::unix::signal(signal::unix::SignalKind::terminate())
18 | .unwrap()
19 | .recv()
20 | .await;
21 | };
22 |
23 | #[cfg(not(unix))]
24 | let terminate = std::future::pending::<()>();
25 |
26 | tokio::select! {
27 | _ = ctrl_c => {
28 | underway::queue::graceful_shutdown(pool).await.unwrap();
29 | },
30 | _ = terminate => {
31 | underway::queue::graceful_shutdown(pool).await.unwrap();
32 | },
33 | }
34 | }
35 |
36 | #[tokio::main]
37 | async fn main() -> Result<(), Box> {
38 | // Initialize the tracing subscriber.
39 | tracing_subscriber::registry()
40 | .with(EnvFilter::new(
41 | env::var("RUST_LOG").unwrap_or_else(|_| "info,underway=debug,sqlx=warn".into()),
42 | ))
43 | .with(tracing_subscriber::fmt::layer())
44 | .try_init()?;
45 |
46 | // Set up the database connection pool.
47 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
48 | let pool = PgPoolOptions::new()
49 | .max_connections(25)
50 | .connect(database_url)
51 | .await?;
52 |
53 | // Run migrations.
54 | underway::run_migrations(&pool).await?;
55 |
56 | // Build the job.
57 | let job = Job::builder()
58 | .step(|_ctx, _input| async move {
59 | let sleep_duration = std::time::Duration::from_secs(10);
60 |
61 | tracing::info!(?sleep_duration, "Hello from a long-running task");
62 |
63 | // Artificial delay to simulate a long-running job.
64 | tokio::time::sleep(sleep_duration).await;
65 |
66 | To::done()
67 | })
68 | .name(QUEUE_NAME)
69 | .pool(pool.clone())
70 | .build()
71 | .await?;
72 |
73 | let every_second = "* * * * * *[America/Los_Angeles]".parse()?;
74 | job.schedule(&every_second, &()).await?;
75 |
76 | // Await the shutdown signal handler in its own task.
77 | tokio::spawn(async move { shutdown_signal(&pool).await });
78 |
79 | // All jobs will run until the queue signals shutdown.
80 | let mut jobs = JoinSet::new();
81 | for _ in 0..2 {
82 | jobs.spawn({
83 | let job = job.clone();
84 | async move { job.run().await }
85 | });
86 | }
87 | jobs.join_all().await;
88 |
89 | Ok(())
90 | }
91 |
--------------------------------------------------------------------------------
/examples/multitask/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-multitask"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | serde = { version = "1.0.210", features = ["derive"] }
9 | serde_json = "1"
10 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
11 | tokio = { version = "1.34.0", features = ["full"] }
12 | tracing = "0.1.40"
13 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
14 | underway = { path = "../../" }
15 |
--------------------------------------------------------------------------------
/examples/multitask/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use serde::{Deserialize, Serialize};
4 | use sqlx::{PgPool, Postgres, Transaction};
5 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
6 | use underway::{
7 | queue::Error as QueueError,
8 | task::{Result as TaskResult, TaskId},
9 | Queue, Task, Worker,
10 | };
11 |
12 | const QUEUE_NAME: &str = "example-multitask";
13 |
14 | #[derive(Debug, Clone, Deserialize, Serialize)]
15 | struct WelcomeEmail {
16 | user_id: i32,
17 | email: String,
18 | name: String,
19 | }
20 |
21 | struct WelcomeEmailTask;
22 |
23 | impl WelcomeEmailTask {
24 | async fn enqueue(
25 | &self,
26 | pool: &PgPool,
27 | queue: &Queue,
28 | input: WelcomeEmail,
29 | ) -> Result {
30 | // This ensures our task-specific configuration is applied.
31 | let welcome_email_task = self.into();
32 | queue
33 | .enqueue(pool, &welcome_email_task, &TaskInput::WelcomeEmail(input))
34 | .await
35 | }
36 | }
37 |
38 | impl Task for WelcomeEmailTask {
39 | type Input = WelcomeEmail;
40 | type Output = ();
41 |
42 | async fn execute(&self, _tx: Transaction<'_, Postgres>, input: Self::Input) -> TaskResult<()> {
43 | tracing::info!(?input, "Simulate sending a welcome email");
44 | Ok(())
45 | }
46 | }
47 |
48 | #[derive(Debug, Clone, Deserialize, Serialize)]
49 | struct Order {
50 | user_id: i32,
51 | sku: String,
52 | }
53 |
54 | struct OrderTask;
55 |
56 | impl OrderTask {
57 | async fn enqueue(
58 | &self,
59 | pool: &PgPool,
60 | queue: &Queue,
61 | input: Order,
62 | ) -> Result {
63 | // This ensures our task-specific configuration is applied.
64 | let order_task = self.into();
65 | queue
66 | .enqueue(pool, &order_task, &TaskInput::Order(input))
67 | .await
68 | }
69 | }
70 |
71 | impl Task for OrderTask {
72 | type Input = Order;
73 | type Output = ();
74 |
75 | async fn execute(&self, _tx: Transaction<'_, Postgres>, input: Self::Input) -> TaskResult<()> {
76 | tracing::info!(?input, "Simulate order processing");
77 | Ok(())
78 | }
79 |
80 | fn priority(&self) -> i32 {
81 | 10 // We'll make Order tasks higher priority.
82 | }
83 | }
84 |
85 | #[derive(Clone, Deserialize, Serialize)]
86 | enum TaskInput {
87 | WelcomeEmail(WelcomeEmail),
88 | Order(Order),
89 | }
90 |
91 | struct Multitask {
92 | welcome_email: WelcomeEmailTask,
93 | order: OrderTask,
94 | priority: i32,
95 | }
96 |
97 | impl Multitask {
98 | fn new() -> Self {
99 | Self {
100 | welcome_email: WelcomeEmailTask,
101 | order: OrderTask,
102 | priority: 0, // This is set when we convert from one of our tasks.
103 | }
104 | }
105 | }
106 |
107 | impl From<&WelcomeEmailTask> for Multitask {
108 | fn from(welcome_email_task: &WelcomeEmailTask) -> Self {
109 | Self {
110 | welcome_email: WelcomeEmailTask,
111 | order: OrderTask,
112 | priority: welcome_email_task.priority(), // Proxy task-specific configuration.
113 | }
114 | }
115 | }
116 |
117 | impl From<&OrderTask> for Multitask {
118 | fn from(order_task: &OrderTask) -> Self {
119 | Self {
120 | welcome_email: WelcomeEmailTask,
121 | order: OrderTask,
122 | priority: order_task.priority(), // Proxy task-specific configuration.
123 | }
124 | }
125 | }
126 |
127 | impl Task for Multitask {
128 | type Input = TaskInput;
129 | type Output = ();
130 |
131 | async fn execute(&self, tx: Transaction<'_, Postgres>, input: Self::Input) -> TaskResult<()> {
132 | match input {
133 | TaskInput::WelcomeEmail(input) => self.welcome_email.execute(tx, input).await,
134 | TaskInput::Order(input) => self.order.execute(tx, input).await,
135 | }
136 | }
137 |
138 | fn priority(&self) -> i32 {
139 | self.priority
140 | }
141 | }
142 |
143 | #[tokio::main]
144 | async fn main() -> Result<(), Box> {
145 | // Initialize the tracing subscriber.
146 | tracing_subscriber::registry()
147 | .with(EnvFilter::new(
148 | env::var("RUST_LOG").unwrap_or_else(|_| "debug,underway=info,sqlx=warn".into()),
149 | ))
150 | .with(tracing_subscriber::fmt::layer())
151 | .try_init()?;
152 |
153 | // Set up the database connection pool.
154 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
155 | let pool = PgPool::connect(database_url).await?;
156 |
157 | // Run migrations.
158 | underway::run_migrations(&pool).await?;
159 |
160 | // Create the task queue.
161 | let queue = Queue::builder()
162 | .name(QUEUE_NAME)
163 | .pool(pool.clone())
164 | .build()
165 | .await?;
166 |
167 | // Enqueue a welcome email task.
168 | let welcome_email_task = WelcomeEmailTask;
169 | let task_id = welcome_email_task
170 | .enqueue(
171 | &pool,
172 | &queue,
173 | WelcomeEmail {
174 | user_id: 42,
175 | email: "ferris@example.com".to_string(),
176 | name: "Ferris".to_string(),
177 | },
178 | )
179 | .await?;
180 |
181 | tracing::info!(task.id = %task_id.as_hyphenated(), "Enqueued welcome email task");
182 |
183 | // Enqueue an order task.
184 | let order_task = OrderTask;
185 | let task_id = order_task
186 | .enqueue(
187 | &pool,
188 | &queue,
189 | Order {
190 | user_id: 42,
191 | sku: "SKU0-0042".to_string(),
192 | },
193 | )
194 | .await?;
195 |
196 | tracing::info!(task.id = %task_id.as_hyphenated(), "Enqueued order task");
197 |
198 | // Run a worker that processes all tasks.
199 | let multitask = Multitask::new();
200 | Worker::new(queue, multitask).run().await?;
201 |
202 | Ok(())
203 | }
204 |
--------------------------------------------------------------------------------
/examples/rag/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-rag"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | async-openai = "0.25.0"
9 | jiff = "0.1.13"
10 | reqwest = { version = "0.12.8", features = ["json"] }
11 | serde = { version = "1.0.210", features = ["derive"] }
12 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
13 | tokio = { version = "1.34.0", features = ["full"] }
14 | tracing = "0.1.40"
15 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
16 | underway = { path = "../../" }
17 |
--------------------------------------------------------------------------------
/examples/rag/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use async_openai::{
4 | config::OpenAIConfig,
5 | types::{ChatCompletionRequestMessage, CreateChatCompletionRequestArgs},
6 | Client,
7 | };
8 | use serde::{de::DeserializeOwned, Deserialize, Serialize};
9 | use sqlx::PgPool;
10 | use tokio::task::JoinSet;
11 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
12 | use underway::{job::Context, Job, To, ToTaskResult};
13 |
14 | const SYSTEM_PROMPT: &str = include_str!("../system-prompt.llm.txt");
15 |
16 | const HN_API_BASE: &str = "https://hacker-news.firebaseio.com/v0/";
17 |
18 | #[derive(Deserialize, Serialize)]
19 | struct Summarize {
20 | top_stories: Vec<(Story, Vec)>,
21 | }
22 |
23 | #[derive(Clone)]
24 | struct State {
25 | openai_client: Client,
26 | }
27 |
28 | #[derive(Debug, Clone, Deserialize, Serialize)]
29 | struct Story {
30 | title: String,
31 | score: u32,
32 | kids: Option>,
33 | }
34 |
35 | #[derive(Debug, Deserialize, Serialize)]
36 | struct Comment {
37 | id: u32,
38 | text: Option,
39 | }
40 |
41 | fn get_item_uri(item_id: u32) -> String {
42 | format!("{HN_API_BASE}item/{}.json", item_id)
43 | }
44 |
45 | async fn fetch_item_by_id(id: u32) -> Result
46 | where
47 | T: DeserializeOwned,
48 | {
49 | let story_url = get_item_uri(id);
50 | let item: T = reqwest::get(&story_url).await?.json().await?;
51 | Ok(item)
52 | }
53 |
54 | async fn fetch_comments(comment_ids: &[u32], max_comments: usize) -> Vec {
55 | let mut tasks = JoinSet::new();
56 | for &id in comment_ids.iter().take(max_comments) {
57 | tasks.spawn(async move { fetch_item_by_id::(id).await });
58 | }
59 |
60 | let mut comments = Vec::new();
61 | while let Some(task) = tasks.join_next().await {
62 | if let Ok(Ok(comment)) = task {
63 | comments.push(comment);
64 | }
65 | }
66 |
67 | comments
68 | }
69 |
70 | async fn fetch_top_stories() -> Result)>, reqwest::Error> {
71 | let top_stories_url = format!("{HN_API_BASE}/topstories.json");
72 | let story_ids: Vec = reqwest::get(top_stories_url).await?.json().await?;
73 |
74 | let story_ids = &story_ids[..20];
75 |
76 | let mut tasks = JoinSet::new();
77 | for &id in story_ids {
78 | tasks.spawn(async move { fetch_item_by_id::(id).await });
79 | }
80 |
81 | let mut stories = Vec::new();
82 | while let Some(task) = tasks.join_next().await {
83 | if let Ok(Ok(story)) = task {
84 | stories.push(story);
85 | }
86 | }
87 |
88 | stories.sort_by(|a, b| b.score.cmp(&a.score));
89 | let top_stories = stories.into_iter().collect::>();
90 |
91 | let mut top_stories_with_comments = Vec::new();
92 | for story in top_stories {
93 | let comment_ids = story.kids.as_deref().unwrap_or_default();
94 | let comments = fetch_comments(comment_ids, 5).await; // Fetch up to 5 comments per story
95 | top_stories_with_comments.push((story, comments));
96 | }
97 |
98 | Ok(top_stories_with_comments)
99 | }
100 |
101 | fn user_prompt(top_stories: &[(Story, Vec)]) -> String {
102 | let mut prompt = String::new();
103 | for (story, comments) in top_stories {
104 | let comments_text = comments
105 | .iter()
106 | .filter_map(|c| c.text.as_deref())
107 | .collect::>()
108 | .join("\n");
109 |
110 | let formatted_story = format!("title: {}\ncomments:\n{}\n\n", story.title, comments_text);
111 | prompt.push_str(&formatted_story);
112 | }
113 |
114 | prompt
115 | }
116 |
117 | async fn summarize(
118 | client: &Client,
119 | top_stories: &[(Story, Vec)],
120 | ) -> Result> {
121 | let system_message = ChatCompletionRequestMessage::Assistant(SYSTEM_PROMPT.into());
122 |
123 | let input_prompt = user_prompt(top_stories);
124 | let user_message = ChatCompletionRequestMessage::User(input_prompt.into());
125 |
126 | let request = CreateChatCompletionRequestArgs::default()
127 | .model("gpt-3.5-turbo")
128 | .messages(vec![system_message, user_message])
129 | .max_tokens(800_u32)
130 | .build()?;
131 |
132 | let response = client.chat().create(request).await?;
133 | let summary = response
134 | .choices
135 | .first()
136 | .and_then(|choice| choice.message.content.clone())
137 | .unwrap_or_else(|| "No summary available.".to_string());
138 |
139 | Ok(summary)
140 | }
141 |
142 | #[tokio::main]
143 | async fn main() -> Result<(), Box> {
144 | // Initialize the tracing subscriber.
145 | tracing_subscriber::registry()
146 | .with(EnvFilter::new(
147 | env::var("RUST_LOG").unwrap_or_else(|_| "info,underway=info,sqlx=warn".into()),
148 | ))
149 | .with(tracing_subscriber::fmt::layer())
150 | .try_init()?;
151 |
152 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
153 | let pool = PgPool::connect(database_url).await?;
154 |
155 | underway::run_migrations(&pool).await?;
156 |
157 | let openai_client = Client::new();
158 |
159 | let job = Job::builder()
160 | .state(State { openai_client })
161 | .step(|_cx, _| async move {
162 | tracing::info!("Retrieving the top five stories from Hacker News");
163 |
164 | let top_stories = fetch_top_stories().await.retryable()?;
165 | let top_five = top_stories.into_iter().take(5).collect::>();
166 |
167 | To::next(Summarize {
168 | top_stories: top_five,
169 | })
170 | })
171 | .step(
172 | |Context {
173 | state: State { openai_client },
174 | ..
175 | },
176 | Summarize { top_stories }| async move {
177 | tracing::info!("Summarizing top five story discussions");
178 |
179 | let summary = summarize(&openai_client, &top_stories).await.retryable()?;
180 | println!("{}", summary);
181 |
182 | To::done()
183 | },
184 | )
185 | .name("example-rag")
186 | .pool(pool)
187 | .build()
188 | .await?;
189 |
190 | job.enqueue(&()).await?;
191 |
192 | job.start().await??;
193 |
194 | Ok(())
195 | }
196 |
--------------------------------------------------------------------------------
/examples/rag/system-prompt.llm.txt:
--------------------------------------------------------------------------------
1 | You are a summarization assistant. Your task is to summarize user comments about the top five stories from Hacker News. Please ensure the output is clear, concise, and easy to read.
2 |
3 | Follow this structure for each story:
4 |
5 | Title: [Story Title]
6 |
7 | Discussion: Provide an overview of story and how commenters are responding to it.
8 |
9 | Repeat this structure for each of the five stories.
10 |
--------------------------------------------------------------------------------
/examples/scheduled/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-scheduled"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
9 | tokio = { version = "1.34.0", features = ["full"] }
10 | underway = { path = "../../" }
11 |
--------------------------------------------------------------------------------
/examples/scheduled/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use sqlx::PgPool;
4 | use underway::{Job, To};
5 |
6 | const QUEUE_NAME: &str = "example-scheduled";
7 |
8 | #[tokio::main]
9 | async fn main() -> Result<(), Box> {
10 | // Set up the database connection pool.
11 | let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set");
12 | let pool = PgPool::connect(&database_url).await?;
13 |
14 | // Run migrations.
15 | underway::run_migrations(&pool).await?;
16 |
17 | // Build the job.
18 | let job = Job::builder()
19 | .step(|_ctx, _input| async move {
20 | println!("Hello, World!");
21 | To::done()
22 | })
23 | .name(QUEUE_NAME)
24 | .pool(pool)
25 | .build()
26 | .await?;
27 |
28 | // Schedule the job to run every minute in the given time zone.
29 | let every_minute = "0 * * * * *[America/Los_Angeles]".parse()?;
30 | job.schedule(&every_minute, &()).await?;
31 |
32 | job.run().await?;
33 |
34 | Ok(())
35 | }
36 |
--------------------------------------------------------------------------------
/examples/step/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-step"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | jiff = "0.1.13"
9 | serde = { version = "1.0.210", features = ["derive"] }
10 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
11 | tokio = { version = "1.34.0", features = ["full"] }
12 | tracing = "0.1.40"
13 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
14 | underway = { path = "../../" }
15 |
--------------------------------------------------------------------------------
/examples/step/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use serde::{Deserialize, Serialize};
4 | use sqlx::PgPool;
5 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
6 | use underway::{Job, To};
7 |
8 | #[derive(Serialize, Deserialize)]
9 | struct Start {
10 | n: usize,
11 | }
12 |
13 | #[derive(Serialize, Deserialize)]
14 | struct Power {
15 | n: usize,
16 | }
17 |
18 | #[derive(Serialize, Deserialize)]
19 | struct Modulo {
20 | n: usize,
21 | }
22 |
23 | #[tokio::main]
24 | async fn main() -> Result<(), Box> {
25 | // Initialize the tracing subscriber.
26 | tracing_subscriber::registry()
27 | .with(EnvFilter::new(
28 | env::var("RUST_LOG").unwrap_or_else(|_| "debug,underway=info,sqlx=warn".into()),
29 | ))
30 | .with(tracing_subscriber::fmt::layer())
31 | .try_init()?;
32 |
33 | // Set up the database connection pool.
34 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
35 | let pool = PgPool::connect(database_url).await?;
36 |
37 | // Run migrations.
38 | underway::run_migrations(&pool).await?;
39 |
40 | // Create our job.
41 | let job = Job::builder()
42 | // Step 1: Start with an initial number `n`.
43 | .step(|_ctx, Start { n }| async move {
44 | tracing::info!("Starting computation with n = {n}");
45 | // Proceed to the next step, passing the current state.
46 | To::next(Power { n })
47 | })
48 | // Step 2: Compute the power of `n`.
49 | .step(|_ctx, Power { n }| async move {
50 | let squared = n.pow(2);
51 | tracing::info!("Squared value: {n}^2 = {squared}");
52 | // Proceed to the next step with the new state.
53 | To::next(Modulo { n: squared })
54 | })
55 | // Step 3: Compute modulo of the result.
56 | .step(|_ctx, Modulo { n }| async move {
57 | let modulo_result = n % 10;
58 | tracing::info!("Modulo 10 of {n} is {modulo_result}");
59 | // Mark the job as done.
60 | To::done()
61 | })
62 | .name("example-step")
63 | .pool(pool)
64 | .build()
65 | .await?;
66 |
67 | // Enqueue the first step.
68 | job.enqueue(&Start { n: 42 }).await?;
69 |
70 | // Run the job worker.
71 | job.run().await?;
72 |
73 | Ok(())
74 | }
75 |
--------------------------------------------------------------------------------
/examples/tracing/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "example-tracing"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | serde = { version = "1.0.210", features = ["derive"] }
9 | sqlx = { version = "0.8.2", features = ["postgres", "runtime-tokio-rustls"] }
10 | tokio = { version = "1.34.0", features = ["full"] }
11 | tracing = "0.1.40"
12 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
13 | underway = { path = "../../" }
14 |
--------------------------------------------------------------------------------
/examples/tracing/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use serde::{Deserialize, Serialize};
4 | use sqlx::PgPool;
5 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter};
6 | use underway::{Job, To};
7 |
8 | const QUEUE_NAME: &str = "example-tracing";
9 |
10 | #[derive(Clone, Deserialize, Serialize)]
11 | struct WelcomeEmail {
12 | user_id: i32,
13 | email: String,
14 | name: String,
15 | }
16 |
17 | #[tokio::main]
18 | async fn main() -> Result<(), Box> {
19 | // Initialize the tracing subscriber.
20 | tracing_subscriber::registry()
21 | .with(EnvFilter::new(
22 | env::var("RUST_LOG").unwrap_or_else(|_| "info,underway=info,sqlx=warn".into()),
23 | ))
24 | .with(tracing_subscriber::fmt::layer())
25 | .try_init()?;
26 |
27 | // Set up the database connection pool.
28 | let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
29 | let pool = PgPool::connect(database_url).await?;
30 |
31 | // Run migrations.
32 | underway::run_migrations(&pool).await?;
33 |
34 | // Build the job.
35 | let job = Job::builder()
36 | .step(
37 | |_ctx,
38 | WelcomeEmail {
39 | user_id,
40 | email,
41 | name,
42 | }| async move {
43 | // Simulate sending an email.
44 | tracing::info!("Sending welcome email to {name} <{email}> (user_id: {user_id})");
45 | To::done()
46 | },
47 | )
48 | .name(QUEUE_NAME)
49 | .pool(pool)
50 | .build()
51 | .await?;
52 |
53 | // Enqueue a job task.
54 | let task_id = job
55 | .enqueue(&WelcomeEmail {
56 | user_id: 42,
57 | email: "ferris@example.com".to_string(),
58 | name: "Ferris".to_string(),
59 | })
60 | .await?;
61 |
62 | tracing::info!("Enqueued job");
63 |
64 | // Start the worker to process tasks.
65 | job.run().await?;
66 |
67 | Ok(())
68 | }
69 |
--------------------------------------------------------------------------------
/migrations/20240921151751_0.sql:
--------------------------------------------------------------------------------
1 | create schema if not exists underway;
2 |
3 | -- Force anything running this migration to use the right search path.
4 | set local search_path to underway;
5 |
6 | -- Manage Underway migrations within the Underway schema.
7 | create table if not exists underway._sqlx_migrations
8 | (like public._sqlx_migrations including all);
9 |
10 | create table underway.task_queue (
11 | name text not null,
12 | dlq_name text,
13 | created_at timestamp with time zone not null default now(),
14 | updated_at timestamp with time zone not null default now(),
15 | primary key (name),
16 | foreign key (dlq_name) references underway.task_queue(name)
17 | );
18 |
19 | create table underway.task_schedule (
20 | name text references underway.task_queue on delete cascade,
21 | schedule text not null,
22 | timezone text not null,
23 | input jsonb not null,
24 | created_at timestamp with time zone not null default now(),
25 | updated_at timestamp with time zone not null default now(),
26 | primary key (name)
27 | );
28 |
29 | create type underway.task_state as enum (
30 | 'pending',
31 | 'in_progress',
32 | 'succeeded',
33 | 'cancelled',
34 | 'failed'
35 | );
36 |
37 | create table underway.task (
38 | id uuid not null,
39 | task_queue_name text not null,
40 | input jsonb not null,
41 | state underway.task_state not null default 'pending',
42 | retry_count integer not null default 0,
43 | max_attempts integer not null default 5,
44 | initial_interval_ms integer not null default 1000,
45 | max_interval_ms integer not null default 60000,
46 | backoff_coefficient real not null default 2.0,
47 | error_message text,
48 | timeout interval not null default interval '15 minutes',
49 | ttl interval not null default interval '14 days',
50 | delay interval not null default interval '0',
51 | concurrency_key text,
52 | priority integer not null default 0,
53 | created_at timestamp with time zone not null default now(),
54 | updated_at timestamp with time zone not null default now(),
55 | started_at timestamp with time zone,
56 | succeeded_at timestamp with time zone,
57 | last_failed_at timestamp with time zone,
58 | primary key (id, task_queue_name),
59 | foreign key (task_queue_name) references underway.task_queue(name)
60 | );
61 |
62 | create unique index idx_task_concurrency_key_unique
63 | on underway.task (task_queue_name, concurrency_key)
64 | where concurrency_key is not null and state in ('pending', 'in_progress');
65 |
--------------------------------------------------------------------------------
/migrations/20241024174106_1.sql:
--------------------------------------------------------------------------------
1 | -- Force anything running this migration to use the right search path.
2 | set local search_path to underway;
3 |
4 | -- function to notify about task changes
5 | create or replace function underway.task_change_notify()
6 | returns trigger as $$
7 | begin
8 | if (new.state = 'pending') then
9 | perform pg_notify('task_change', json_build_object(
10 | 'task_queue_name', new.task_queue_name
11 | )::text);
12 | end if;
13 |
14 | return new;
15 | end;
16 | $$ language plpgsql;
17 |
18 | -- trigger that calls the function after task changes
19 | create trigger task_changed
20 | after insert or update on underway.task
21 | for each row
22 | execute procedure underway.task_change_notify();
23 |
--------------------------------------------------------------------------------
/migrations/20241105164503_2.sql:
--------------------------------------------------------------------------------
1 | -- Force anything running this migration to use the right search path.
2 | set local search_path to underway;
3 |
4 | -- Remove attempt-specific columns from `task`
5 | alter table underway.task
6 | drop column if exists retry_count,
7 | drop column if exists max_attempts,
8 | drop column if exists initial_interval_ms,
9 | drop column if exists max_interval_ms,
10 | drop column if exists backoff_coefficient,
11 | drop column if exists error_message,
12 | drop column if exists started_at,
13 | drop column if exists succeeded_at,
14 | drop column if exists last_failed_at;
15 |
16 | -- Define retry policies as their own type
17 | create type underway.task_retry_policy as (
18 | max_attempts int,
19 | initial_interval_ms int,
20 | max_interval_ms int,
21 | backoff_coefficient float
22 | );
23 |
24 | alter table underway.task
25 | add column if not exists retry_policy underway.task_retry_policy not null
26 | default row(5, 1000, 60000, 2.0)::underway.task_retry_policy;
27 |
28 | alter table underway.task
29 | add column if not exists completed_at timestamp with time zone;
30 |
31 | alter table underway.task
32 | add column if not exists last_attempt_at timestamp with time zone;
33 |
34 | create table underway.task_attempt (
35 | task_id uuid not null,
36 | task_queue_name text not null,
37 | attempt_number integer not null,
38 |
39 | -- Task state.
40 | state underway.task_state not null default 'in_progress',
41 |
42 | -- Error metatdata.
43 | error_message text,
44 |
45 | started_at timestamp with time zone not null default now(),
46 | updated_at timestamp with time zone not null default now(),
47 | completed_at timestamp with time zone,
48 |
49 | primary key (task_id, task_queue_name, attempt_number),
50 | foreign key (task_id, task_queue_name) references underway.task(id, task_queue_name) on delete cascade
51 | );
52 |
--------------------------------------------------------------------------------
/migrations/20241110164319_3.sql:
--------------------------------------------------------------------------------
1 | -- Force anything running this migration to use the right search path.
2 | set local search_path to underway;
3 |
4 | alter table underway.task
5 | add column if not exists heartbeat interval not null default interval '30 seconds';
6 |
7 | alter table underway.task
8 | add column if not exists last_heartbeat_at timestamp with time zone;
9 |
--------------------------------------------------------------------------------
/migrations/20241111174958_4.sql:
--------------------------------------------------------------------------------
1 | -- Force anything running this migration to use the right search path.
2 | set local search_path to underway;
3 |
4 | -- Rename the 'name' column in 'task_schedule' to 'task_queue_name' for consistency.
5 | alter table underway.task_schedule
6 | rename column name to task_queue_name;
7 |
8 | -- Update the primary key constraint to use the new column name.
9 | alter table underway.task_schedule
10 | drop constraint task_schedule_pkey,
11 | add primary key (task_queue_name);
12 |
--------------------------------------------------------------------------------
/migrations/20241126224448_5.sql:
--------------------------------------------------------------------------------
1 | -- Force anything running this migration to use the right search path.
2 | set local search_path to underway;
3 |
4 | alter table underway.task_attempt drop constraint task_attempt_task_id_task_queue_name_fkey;
5 |
6 | alter table underway.task drop constraint task_pkey;
7 | alter table underway.task add constraint task_pkey
8 | primary key (task_queue_name, id);
9 |
10 | alter table underway.task_attempt drop constraint task_attempt_pkey;
11 | alter table underway.task_attempt add constraint task_attempt_pkey
12 | primary key (task_queue_name, task_id, attempt_number);
13 |
14 | alter table underway.task_attempt add constraint task_attempt_task_queue_name_task_id_fkey
15 | foreign key (task_queue_name, task_id) references underway.task(task_queue_name, id) on delete cascade;
16 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | format_code_in_doc_comments = true
2 | format_strings = true
3 | imports_granularity = "Crate"
4 | group_imports = "StdExternalCrate"
5 | wrap_comments = true
6 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | //! # Underway
2 | //!
3 | //! ⏳ Durable step functions via Postgres.
4 | //!
5 | //! # Overview
6 | //!
7 | //! **Underway** provides durable background jobs over Postgres. Jobs are
8 | //! composed of a sequence of one or more steps. Each step takes the output of
9 | //! the previous step as its input. These simple workflows provide a powerful
10 | //! interface to common deferred work use cases.
11 | //!
12 | //! Key Features:
13 | //!
14 | //! - **PostgreSQL-Backed** Leverages PostgreSQL with `FOR UPDATE SKIP LOCKED`
15 | //! for reliable task storage and coordination.
16 | //! - **Atomic Task Management**: Enqueue tasks within your transactions and use
17 | //! the worker's transaction within your tasks for atomic queries.
18 | //! - **Automatic Retries**: Configurable retry strategies ensure tasks are
19 | //! reliably completed, even after transient failures.
20 | //! - **Cron-Like Scheduling**: Schedule recurring tasks with cron-like
21 | //! expressions for automated, time-based job execution.
22 | //! - **Scalable and Flexible**: Easily scales from a single worker to many,
23 | //! enabling seamless background job processing with minimal setup.
24 | //!
25 | //! # Examples
26 | //!
27 | //! Underway is suitable for many different use cases, ranging from simple
28 | //! single-step jobs to more sophisticated multi-step jobs, where dependencies
29 | //! are built up between steps.
30 | //!
31 | //! ## Welcome emails
32 | //!
33 | //! A common use case is deferring work that can be processed later. For
34 | //! instance, during user registration, we might want to send a welcome email to
35 | //! new users. Rather than handling this within the registration process (e.g.,
36 | //! form validation, database insertion), we can offload it to run "out-of-band"
37 | //! using Underway. By defining a job for sending the welcome email, Underway
38 | //! ensures it gets processed in the background, without slowing down the user
39 | //! registration flow.
40 | //!
41 | //! ```rust,no_run
42 | //! use std::env;
43 | //!
44 | //! use serde::{Deserialize, Serialize};
45 | //! use sqlx::PgPool;
46 | //! use underway::{Job, To};
47 | //!
48 | //! // This is the input we'll provide to the job when we enqueue it.
49 | //! #[derive(Deserialize, Serialize)]
50 | //! struct WelcomeEmail {
51 | //! user_id: i32,
52 | //! email: String,
53 | //! name: String,
54 | //! }
55 | //!
56 | //! #[tokio::main]
57 | //! async fn main() -> Result<(), Box> {
58 | //! // Set up the database connection pool.
59 | //! let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
60 | //! let pool = PgPool::connect(database_url).await?;
61 | //!
62 | //! // Run migrations.
63 | //! underway::run_migrations(&pool).await?;
64 | //!
65 | //! // Build the job.
66 | //! let job = Job::builder()
67 | //! .step(
68 | //! |_cx,
69 | //! WelcomeEmail {
70 | //! user_id,
71 | //! email,
72 | //! name,
73 | //! }| async move {
74 | //! // Simulate sending an email.
75 | //! println!("Sending welcome email to {name} <{email}> (user_id: {user_id})");
76 | //! // Returning this indicates this is the final step.
77 | //! To::done()
78 | //! },
79 | //! )
80 | //! .name("welcome-email")
81 | //! .pool(pool)
82 | //! .build()
83 | //! .await?;
84 | //!
85 | //! // Here we enqueue a new job to be processed later.
86 | //! job.enqueue(&WelcomeEmail {
87 | //! user_id: 42,
88 | //! email: "ferris@example.com".to_string(),
89 | //! name: "Ferris".to_string(),
90 | //! })
91 | //! .await?;
92 | //!
93 | //! // Start processing enqueued jobs.
94 | //! job.start().await??;
95 | //!
96 | //! Ok(())
97 | //! }
98 | //! ```
99 | //!
100 | //! ## Order receipts
101 | //!
102 | //! Another common use case is defining dependencies between discrete steps of a
103 | //! job. For instance, we might generate PDF receipts for orders and then email
104 | //! these to customers. With Underway, each step is handled separately, making
105 | //! it easy to create a job that first generates the PDF and, once
106 | //! completed, proceeds to send the email.
107 | //!
108 | //! This separation provides significant value: if the email sending service
109 | //! is temporarily unavailable, we can retry the email step without having to
110 | //! regenerate the PDF, avoiding unnecessary repeated work.
111 | //!
112 | //! ```rust,no_run
113 | //! use std::env;
114 | //!
115 | //! use serde::{Deserialize, Serialize};
116 | //! use sqlx::PgPool;
117 | //! use underway::{Job, To};
118 | //!
119 | //! #[derive(Deserialize, Serialize)]
120 | //! struct GenerateReceipt {
121 | //! // An order we want to generate a receipt for.
122 | //! order_id: i32,
123 | //! }
124 | //!
125 | //! #[derive(Deserialize, Serialize)]
126 | //! struct EmailReceipt {
127 | //! // An object store key to our receipt PDF.
128 | //! receipt_key: String,
129 | //! }
130 | //!
131 | //! #[tokio::main]
132 | //! async fn main() -> Result<(), Box> {
133 | //! // Set up the database connection pool.
134 | //! let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
135 | //! let pool = PgPool::connect(database_url).await?;
136 | //!
137 | //! // Run migrations.
138 | //! underway::run_migrations(&pool).await?;
139 | //!
140 | //! // Build the job.
141 | //! let job = Job::builder()
142 | //! .step(|_cx, GenerateReceipt { order_id }| async move {
143 | //! // Use the order ID to build a receipt PDF...
144 | //! let receipt_key = format!("receipts_bucket/{order_id}-receipt.pdf");
145 | //! // ...store the PDF in an object store.
146 | //!
147 | //! // We proceed to the next step with the receipt_key as its input.
148 | //! To::next(EmailReceipt { receipt_key })
149 | //! })
150 | //! .step(|_cx, EmailReceipt { receipt_key }| async move {
151 | //! // Retrieve the PDF from the object store, and send the email.
152 | //! println!("Emailing receipt for {receipt_key}");
153 | //! To::done()
154 | //! })
155 | //! .name("order-receipt")
156 | //! .pool(pool)
157 | //! .build()
158 | //! .await?;
159 | //!
160 | //! // Enqueue the job for the given order.
161 | //! job.enqueue(&GenerateReceipt { order_id: 42 }).await?;
162 | //!
163 | //! // Start processing enqueued jobs.
164 | //! job.start().await??;
165 | //!
166 | //! Ok(())
167 | //! }
168 | //! ```
169 | //!
170 | //! With this setup, if the email service is down, the `EmailReceipt` step can
171 | //! be retried without redoing the PDF generation, saving time and resources by
172 | //! not repeating the expensive step of generating the PDF.
173 | //!
174 | //! ## Daily reports
175 | //!
176 | //! Jobs may also be run on a schedule. This makes them useful for situations
177 | //! where we want to do things on a regular cadence, such as creating a daily
178 | //! business report.
179 | //!
180 | //! ```rust,no_run
181 | //! use std::env;
182 | //!
183 | //! use serde::{Deserialize, Serialize};
184 | //! use sqlx::PgPool;
185 | //! use underway::{Job, To};
186 | //!
187 | //! #[derive(Deserialize, Serialize)]
188 | //! struct DailyReport;
189 | //!
190 | //! #[tokio::main]
191 | //! async fn main() -> Result<(), Box> {
192 | //! // Set up the database connection pool.
193 | //! let database_url = &env::var("DATABASE_URL").expect("DATABASE_URL should be set");
194 | //! let pool = PgPool::connect(database_url).await?;
195 | //!
196 | //! // Run migrations.
197 | //! underway::run_migrations(&pool).await?;
198 | //!
199 | //! // Build the job.
200 | //! let job = Job::builder()
201 | //! .step(|_cx, _| async move {
202 | //! // Here we would generate and store the report.
203 | //! To::done()
204 | //! })
205 | //! .name("daily-report")
206 | //! .pool(pool)
207 | //! .build()
208 | //! .await?;
209 | //!
210 | //! // Set a daily schedule with the given input.
211 | //! let daily = "@daily[America/Los_Angeles]".parse()?;
212 | //! job.schedule(&daily, &DailyReport).await?;
213 | //!
214 | //! // Start processing enqueued jobs.
215 | //! job.start().await??;
216 | //!
217 | //! Ok(())
218 | //! }
219 | //! ```
220 | //!
221 | //! # Concepts
222 | //!
223 | //! Underway has been designed around several core concepts, which build on one
224 | //! another to deliver a robust background-job framework:
225 | //!
226 | //! - [Tasks](#tasks) represent a well-structured unit of work.
227 | //! - [Jobs](#jobs) are a series of sequential steps, where each step is a
228 | //! [`Task`].
229 | //! - [Queues](#queues) provide an interface for managing task lifecycle.
230 | //! - [Workers](#workers) interface with queues to execute tasks.
231 | //!
232 | //! ## Tasks
233 | //!
234 | //! Tasks are units of work to be executed, with clearly defined behavior and
235 | //! input.
236 | //!
237 | //! This is the lowest-level concept in our design, with everything else being
238 | //! built on top of or around this idea.
239 | //!
240 | //! See [`task`] for more details about tasks.
241 | //!
242 | //! ## Jobs
243 | //!
244 | //! Jobs are a series of sequential steps. Each step provides input to the next
245 | //! step in the series.
246 | //!
247 | //! In most cases, applications will use jobs to define tasks instead of using
248 | //! the `Task` trait directly.
249 | //!
250 | //! See [`job`] for more details about jobs.
251 | //!
252 | //! ## Queues
253 | //!
254 | //! Queues manage task lifecycle, including enqueuing and dequeuing them from
255 | //! the database.
256 | //!
257 | //! See [`queue`] for more details about queues.
258 | //!
259 | //! ## Workers
260 | //!
261 | //! Workers are responsible for executing tasks. They poll the queue for new
262 | //! tasks, and when found, try to invoke the task's execute routine.
263 | //!
264 | //! See [`worker`] for more details about workers.
265 | //!
266 | //! ## Strata
267 | //!
268 | //! The Underway system is split into a **lower-level** and a **higher-level**
269 | //! system, where the latter is the **job** abstraction and the former is
270 | //! everything else. More specifically the lower-level components are the
271 | //! **queue**, **worker**, **scheduler**, and **task**. The locus of the
272 | //! composite system is the task, with all components being built with or around
273 | //! it.
274 | //!
275 | //! ```text
276 | //! ╭───────────────╮
277 | //! │ Job │
278 | //! │ (impl Task) │
279 | //! ╰───────────────╯
280 | //! ┆
281 | //! ▼
282 | //! ╭───────────────╮
283 | //! ┏━━│ Queue │◀━┓
284 | //! ┃ ╰───────────────╯ ┃
285 | //! ╭───────────────╮ ┃ ◊ ┃ ╭───────────────╮
286 | //! │ Worker │◀━┩ │ ┡━━│ Scheduler │
287 | //! ╰───────────────╯ │ ╭───────────────╮ │ ╰───────────────╯
288 | //! └─▶│ Task │◀─┘
289 | //! ╰───────────────╯
290 | //! ```
291 | //!
292 | //! These components are designed to promote clear [separation of
293 | //! concerns][SoC], with each having a well-defined purpose and clear boundary
294 | //! in relationship to the other components.
295 | //!
296 | //! For example, queues manage task life cycle, encapsulating state transitions
297 | //! and persisiting the task's canonical state in the database. Whereas workers
298 | //! and schedulers interface with the queue to process tasks or enqueue tasks
299 | //! for execution, respectively.
300 | //!
301 | //! At the uppermost layer, jobs are built on top of this subsystem, and are an
302 | //! implementation of the `Task` trait. Put another way, the lower-level system
303 | //! is unawre of the concept of a "job" and treats it like any other task.
304 | //!
305 | //! [SoC]: https://en.wikipedia.org/wiki/Separation_of_concerns
306 | #![warn(clippy::all, nonstandard_style, future_incompatible, missing_docs)]
307 |
308 | use sqlx::{migrate::Migrator, Acquire, Postgres};
309 |
310 | pub use crate::{
311 | job::{Job, To},
312 | queue::Queue,
313 | scheduler::{Scheduler, ZonedSchedule},
314 | task::{Task, ToTaskResult},
315 | worker::Worker,
316 | };
317 |
318 | pub mod job;
319 | pub mod queue;
320 | mod scheduler;
321 | pub mod task;
322 | pub mod worker;
323 |
324 | static MIGRATOR: Migrator = sqlx::migrate!();
325 |
326 | /// Runs Underway migrations.
327 | ///
328 | /// These migrations must be applied before queues, tasks, and workers can be
329 | /// run.
330 | ///
331 | /// A transaction is acquired via the provided connection and migrations are run
332 | /// via this transaction.
333 | ///
334 | /// As there is no direct support for specifying the schema under which the
335 | /// migrations table will live, we manually specify this via the search path.
336 | /// This ensures that migrations are isolated to underway._sqlx_migrations.
337 | ///
338 | /// **Note**: Changes are managed within a dedicated schema, called "underway".
339 | ///
340 | /// # Example
341 | ///
342 | ///```rust,no_run
343 | /// # use tokio::runtime::Runtime;
344 | /// use std::env;
345 | ///
346 | /// use sqlx::PgPool;
347 | ///
348 | /// # fn main() {
349 | /// # let rt = Runtime::new().unwrap();
350 | /// # rt.block_on(async {
351 | /// // Set up the database connection pool.
352 | /// let database_url = &env::var("DATABASE_URL")?;
353 | /// let pool = PgPool::connect(database_url).await?;
354 | ///
355 | /// // Run migrations.
356 | /// underway::run_migrations(&pool).await?;
357 | /// # Ok::<(), Box>(())
358 | /// # });
359 | /// # }
360 | pub async fn run_migrations<'a, A>(conn: A) -> Result<(), sqlx::Error>
361 | where
362 | A: Acquire<'a, Database = Postgres>,
363 | {
364 | let mut tx = conn.begin().await?;
365 |
366 | // Ensure the 'underway' schema exists
367 | sqlx::query!("create schema if not exists underway;")
368 | .execute(&mut *tx)
369 | .await?;
370 |
371 | // Temporarily set search_path for this transaction
372 | sqlx::query!("set local search_path to underway;")
373 | .execute(&mut *tx)
374 | .await?;
375 |
376 | // Run migrations within the 'underway' schema
377 | MIGRATOR.run(&mut *tx).await?;
378 |
379 | tx.commit().await?;
380 |
381 | Ok(())
382 | }
383 |
384 | #[cfg(test)]
385 | mod tests {
386 | use sqlx::PgPool;
387 |
388 | use super::run_migrations;
389 |
390 | #[sqlx::test(migrations = false)]
391 | async fn sanity_check_run_migrations(pool: PgPool) -> Result<(), sqlx::Error> {
392 | run_migrations(&pool).await?;
393 |
394 | let schema_exists: bool = sqlx::query_scalar!(
395 | r#"
396 | select exists (
397 | select 1 from pg_namespace where nspname = 'underway'
398 | );
399 | "#,
400 | )
401 | .fetch_one(&pool)
402 | .await?
403 | .unwrap();
404 | assert!(
405 | schema_exists,
406 | "Schema 'underway' should exist after migrations."
407 | );
408 |
409 | let migrations_table_exists: bool = sqlx::query_scalar!(
410 | r#"
411 | select exists (
412 | select 1 from information_schema.tables
413 | where table_schema = 'underway' and
414 | table_name = '_sqlx_migrations'
415 | );
416 | "#,
417 | )
418 | .fetch_one(&pool)
419 | .await?
420 | .unwrap();
421 | assert!(
422 | migrations_table_exists,
423 | "Migrations table should exist in 'underway' schema."
424 | );
425 |
426 | let search_path: String = sqlx::query_scalar("show search_path;")
427 | .fetch_one(&pool)
428 | .await?;
429 |
430 | assert!(
431 | !search_path.contains("underway"),
432 | "search_path should not include 'underway' after the transaction."
433 | );
434 |
435 | assert!(
436 | search_path.contains("public"),
437 | "Default search_path should include 'public'."
438 | );
439 |
440 | Ok(())
441 | }
442 | }
443 |
--------------------------------------------------------------------------------
/src/scheduler.rs:
--------------------------------------------------------------------------------
1 | use std::{result::Result as StdResult, str::FromStr, sync::Arc, time::Duration as StdDuration};
2 |
3 | use jiff::{tz::TimeZone, Zoned};
4 | use jiff_cron::{Schedule, ScheduleIterator};
5 | use sqlx::postgres::{PgAdvisoryLock, PgListener};
6 | use tokio::time::Instant;
7 | use tokio_util::sync::CancellationToken;
8 | use tracing::instrument;
9 |
10 | use crate::{
11 | queue::{try_acquire_advisory_lock, Error as QueueError, SHUTDOWN_CHANNEL},
12 | Queue, Task,
13 | };
14 |
15 | pub(crate) type Result = std::result::Result;
16 |
17 | #[derive(Debug, thiserror::Error)]
18 | pub enum Error {
19 | #[error(transparent)]
20 | Queue(#[from] QueueError),
21 |
22 | #[error(transparent)]
23 | Database(#[from] sqlx::Error),
24 |
25 | #[error(transparent)]
26 | Jiff(#[from] jiff::Error),
27 |
28 | #[error(transparent)]
29 | Cron(#[from] jiff_cron::error::Error),
30 | }
31 |
32 | /// Scheduler for running task schedules.
33 | ///
34 | /// # Singleton behavior
35 | ///
36 | /// In order to ensure schedules are dispatched at most once, only a single
37 | /// instance of a scheduler is allowed to run per queue. Internally this is
38 | /// managed via an [advisory lock][advisory-lock]. The lock is keyed to the name
39 | /// of the queue the scheduler belongs to.
40 | ///
41 | /// When a scheduler is run it will attempt to acquire its lock. When it can,
42 | /// the run method loops indefinitely. However, when the lock cannot be
43 | /// acquired, e.g. because another scheduler is already running, it will return.
44 | ///
45 | /// [advisory-lock]: https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS
46 | pub struct Scheduler {
47 | queue: Queue,
48 | queue_lock: PgAdvisoryLock,
49 | task: Arc,
50 |
51 | // When this token is cancelled the queue has been shutdown.
52 | shutdown_token: CancellationToken,
53 | }
54 |
55 | impl Scheduler {
56 | /// Creates a new scheduler with the given queue and task.
57 | ///
58 | /// # Example
59 | ///
60 | /// ```rust,no_run
61 | /// # use sqlx::{PgPool, Transaction, Postgres};
62 | /// # use underway::{Task, task::Result as TaskResult, Queue};
63 | /// use underway::Scheduler;
64 | ///
65 | /// # struct ExampleTask;
66 | /// # impl Task for ExampleTask {
67 | /// # type Input = ();
68 | /// # type Output = ();
69 | /// # async fn execute(
70 | /// # &self,
71 | /// # _: Transaction<'_, Postgres>,
72 | /// # _: Self::Input,
73 | /// # ) -> TaskResult {
74 | /// # Ok(())
75 | /// # }
76 | /// # }
77 | /// # use tokio::runtime::Runtime;
78 | /// # fn main() {
79 | /// # let rt = Runtime::new().unwrap();
80 | /// # rt.block_on(async {
81 | /// # let pool = PgPool::connect(&std::env::var("DATABASE_URL")?).await?;
82 | /// # let queue = Queue::builder()
83 | /// # .name("example")
84 | /// # .pool(pool.clone())
85 | /// # .build()
86 | /// # .await?;
87 | /// # /*
88 | /// let queue = { /* A `Queue`. */ };
89 | /// # */
90 | /// # let task = ExampleTask;
91 | /// # /*
92 | /// let task = { /* An implementer of `Task`. */ };
93 | /// # */
94 | /// #
95 | ///
96 | /// Scheduler::new(queue, task);
97 | /// # Ok::<(), Box>(())
98 | /// # });
99 | /// # }
100 | /// ```
101 | pub fn new(queue: Queue, task: T) -> Self {
102 | let queue_lock = queue_scheduler_lock(&queue.name);
103 | Self {
104 | queue,
105 | queue_lock,
106 | task: Arc::new(task),
107 | shutdown_token: CancellationToken::new(),
108 | }
109 | }
110 |
111 | /// Sets the shutdown token.
112 | ///
113 | /// # Example
114 | ///
115 | /// ```rust,no_run
116 | /// # use sqlx::{PgPool, Transaction, Postgres};
117 | /// # use underway::{Task, task::Result as TaskResult, Queue, Scheduler};
118 | /// use tokio_util::sync::CancellationToken;
119 | ///
120 | /// # struct ExampleTask;
121 | /// # impl Task for ExampleTask {
122 | /// # type Input = ();
123 | /// # type Output = ();
124 | /// # async fn execute(
125 | /// # &self,
126 | /// # _: Transaction<'_, Postgres>,
127 | /// # _: Self::Input,
128 | /// # ) -> TaskResult {
129 | /// # Ok(())
130 | /// # }
131 | /// # }
132 | /// # use tokio::runtime::Runtime;
133 | /// # fn main() {
134 | /// # let rt = Runtime::new().unwrap();
135 | /// # rt.block_on(async {
136 | /// # let pool = PgPool::connect(&std::env::var("DATABASE_URL")?).await?;
137 | /// # let queue = Queue::builder()
138 | /// # .name("example")
139 | /// # .pool(pool.clone())
140 | /// # .build()
141 | /// # .await?;
142 | /// # let task = ExampleTask;
143 | /// # let mut scheduler = Scheduler::new(queue, task);
144 | /// # /*
145 | /// let mut scheduler = { /* A `Scheduler`. */ };
146 | /// # */
147 | /// #
148 | ///
149 | /// // Set a custom cancellation token.
150 | /// let token = CancellationToken::new();
151 | /// scheduler.set_shutdown_token(token);
152 | /// # Ok::<(), Box>(())
153 | /// # });
154 | /// # }
155 | /// ```
156 | pub fn set_shutdown_token(&mut self, shutdown_token: CancellationToken) {
157 | self.shutdown_token = shutdown_token;
158 | }
159 |
160 | /// Cancels the shutdown token causing the scheduler to exit.
161 | ///
162 | /// ```rust,no_run
163 | /// # use sqlx::{PgPool, Transaction, Postgres};
164 | /// # use underway::{Task, task::Result as TaskResult, Queue, Scheduler};
165 | /// use tokio_util::sync::CancellationToken;
166 | ///
167 | /// # struct ExampleTask;
168 | /// # impl Task for ExampleTask {
169 | /// # type Input = ();
170 | /// # type Output = ();
171 | /// # async fn execute(
172 | /// # &self,
173 | /// # _: Transaction<'_, Postgres>,
174 | /// # _: Self::Input,
175 | /// # ) -> TaskResult {
176 | /// # Ok(())
177 | /// # }
178 | /// # }
179 | /// # use tokio::runtime::Runtime;
180 | /// # fn main() {
181 | /// # let rt = Runtime::new().unwrap();
182 | /// # rt.block_on(async {
183 | /// # let pool = PgPool::connect(&std::env::var("DATABASE_URL")?).await?;
184 | /// # let queue = Queue::builder()
185 | /// # .name("example")
186 | /// # .pool(pool.clone())
187 | /// # .build()
188 | /// # .await?;
189 | /// # let task = ExampleTask;
190 | /// # let scheduler = Scheduler::new(queue, task);
191 | /// # /*
192 | /// let scheduler = { /* A `Scheduler`. */ };
193 | /// # */
194 | /// #
195 | ///
196 | /// // Stop the scheduler.
197 | /// scheduler.shutdown();
198 | /// # Ok::<(), Box>(())
199 | /// # });
200 | /// # }
201 | /// ```
202 | pub fn shutdown(&self) {
203 | self.shutdown_token.cancel();
204 | }
205 |
206 | /// Loops over the configured schedule, enqueuing tasks as the duration
207 | /// arrives.
208 | ///
209 | /// # Errors
210 | ///
211 | /// This function returns an error if:
212 | ///
213 | /// - It cannot acquire a new connection from the queue's pool.
214 | /// - It fails to listen on either the shutdown channel.
215 | /// - The cron expression or timezone IANA name are malformed.
216 | ///
217 | /// # Example
218 | ///
219 | /// ```rust,no_run
220 | /// # use sqlx::{PgPool, Transaction, Postgres};
221 | /// # use underway::{Task, task::Result as TaskResult, Queue, Scheduler};
222 | /// # struct ExampleTask;
223 | /// # impl Task for ExampleTask {
224 | /// # type Input = ();
225 | /// # type Output = ();
226 | /// # async fn execute(
227 | /// # &self,
228 | /// # _: Transaction<'_, Postgres>,
229 | /// # _: Self::Input,
230 | /// # ) -> TaskResult {
231 | /// # Ok(())
232 | /// # }
233 | /// # }
234 | /// # use tokio::runtime::Runtime;
235 | /// # fn main() {
236 | /// # let rt = Runtime::new().unwrap();
237 | /// # rt.block_on(async {
238 | /// # let pool = PgPool::connect(&std::env::var("DATABASE_URL")?).await?;
239 | /// # let queue = Queue::builder()
240 | /// # .name("example")
241 | /// # .pool(pool)
242 | /// # .build()
243 | /// # .await?;
244 | /// # let task = ExampleTask;
245 | /// # let scheduler = Scheduler::new(queue, task);
246 | /// # /*
247 | /// let scheduler = { /* A `Scheduler`. */ };
248 | /// # */
249 | /// #
250 | ///
251 | /// // Run the scheduler in separate task.
252 | /// tokio::spawn(async move { scheduler.run().await });
253 | /// # Ok::<(), Box>(())
254 | /// # });
255 | /// # }
256 | /// ```
257 | #[instrument(skip(self), fields(queue.name = self.queue.name), err)]
258 | pub async fn run(&self) -> Result {
259 | let conn = self.queue.pool.acquire().await?;
260 | let Some(guard) = try_acquire_advisory_lock(conn, &self.queue_lock).await? else {
261 | tracing::trace!("Scheduler could not acquire lock, exiting");
262 | return Ok(());
263 | };
264 |
265 | let Some((zoned_schedule, input)) = self.queue.task_schedule(&self.queue.pool).await?
266 | else {
267 | // No schedule configured, so we'll exit.
268 | return Ok(());
269 | };
270 |
271 | // Set up a listener for shutdown notifications
272 | let mut shutdown_listener = PgListener::connect_with(&self.queue.pool).await?;
273 | shutdown_listener.listen(SHUTDOWN_CHANNEL).await?;
274 |
275 | // TODO: Handle updates to schedules?
276 |
277 | for next in zoned_schedule.iter() {
278 | tracing::debug!(?next, "Waiting until next scheduled task enqueue");
279 |
280 | tokio::select! {
281 | notify_shutdown = shutdown_listener.recv() => {
282 | match notify_shutdown {
283 | Ok(_) => {
284 | self.shutdown_token.cancel();
285 | },
286 | Err(err) => {
287 | tracing::error!(%err, "Postgres shutdown notification error");
288 | }
289 | }
290 | }
291 |
292 | _ = self.shutdown_token.cancelled() => {
293 | guard.release_now().await?;
294 | break
295 | }
296 |
297 | _ = wait_until(&next) => {
298 | self.process_next_schedule(&input).await?
299 | }
300 | }
301 | }
302 |
303 | Ok(())
304 | }
305 |
306 | #[instrument(skip_all, fields(task.id = tracing::field::Empty), err)]
307 | async fn process_next_schedule(&self, input: &T::Input) -> Result {
308 | let task_id = self
309 | .queue
310 | .enqueue(&self.queue.pool, &self.task, input)
311 | .await?;
312 |
313 | tracing::Span::current().record("task.id", task_id.as_hyphenated().to_string());
314 |
315 | Ok(())
316 | }
317 | }
318 |
319 | fn queue_scheduler_lock(queue_name: &str) -> PgAdvisoryLock {
320 | PgAdvisoryLock::new(format!("{queue_name}-scheduler"))
321 | }
322 |
323 | async fn wait_until(next: &Zoned) {
324 | let tz = next.time_zone();
325 | loop {
326 | let now = Zoned::now().with_time_zone(tz.to_owned());
327 | if now >= *next {
328 | break;
329 | }
330 |
331 | let until_next = next.duration_until(&now).unsigned_abs();
332 | if until_next == StdDuration::ZERO {
333 | break;
334 | }
335 |
336 | tokio::time::sleep_until(Instant::now() + until_next).await;
337 | }
338 | }
339 |
340 | /// Schedule paired with its time zone.
341 | #[derive(Debug, PartialEq)]
342 | pub struct ZonedSchedule {
343 | schedule: Schedule,
344 | timezone: TimeZone,
345 | }
346 |
347 | impl ZonedSchedule {
348 | /// Create a new schedule which is associated with a time zone.
349 | pub fn new(cron_expr: &str, time_zone_name: &str) -> StdResult {
350 | let schedule = cron_expr.parse()?;
351 | let timezone = TimeZone::get(time_zone_name)?;
352 |
353 | assert!(
354 | timezone.iana_name().is_some(),
355 | "Time zones must use IANA names for now"
356 | );
357 |
358 | Ok(Self { schedule, timezone })
359 | }
360 |
361 | pub(crate) fn cron_expr(&self) -> String {
362 | self.schedule.to_string()
363 | }
364 |
365 | pub(crate) fn iana_name(&self) -> &str {
366 | self.timezone
367 | .iana_name()
368 | .expect("iana_name should always be Some because new ensures valid time zone")
369 | }
370 |
371 | /// Returns an iterator of `Zoned` where each is a time at which the
372 | /// schedule should fire.
373 | pub fn iter(&self) -> ZonedScheduleIterator {
374 | ZonedScheduleIterator {
375 | upcoming: self.schedule.upcoming(self.timezone.clone()),
376 | }
377 | }
378 | }
379 |
380 | pub struct ZonedScheduleIterator<'a> {
381 | upcoming: ScheduleIterator<'a>,
382 | }
383 |
384 | impl Iterator for ZonedScheduleIterator<'_> {
385 | type Item = Zoned;
386 |
387 | fn next(&mut self) -> Option {
388 | self.upcoming.next()
389 | }
390 | }
391 |
392 | #[derive(Debug, thiserror::Error)]
393 | pub enum ZonedScheduleError {
394 | #[error(transparent)]
395 | Jiff(#[from] jiff::Error),
396 |
397 | #[error(transparent)]
398 | Cron(#[from] jiff_cron::error::Error),
399 |
400 | #[error("Parsing error: {0}")]
401 | Parse(String),
402 | }
403 |
404 | impl FromStr for ZonedSchedule {
405 | type Err = ZonedScheduleError;
406 |
407 | fn from_str(s: &str) -> StdResult {
408 | // Check if the string ends with a closing bracket ']'
409 | if !s.ends_with(']') {
410 | return Err(ZonedScheduleError::Parse("Missing closing ']'".to_string()));
411 | }
412 |
413 | // Find the position of the opening bracket '['
414 | let open_bracket_pos = s
415 | .find('[')
416 | .ok_or_else(|| ZonedScheduleError::Parse("Missing opening '['".to_string()))?;
417 |
418 | // Extract the cron expression and time zone string
419 | let cron_expr = &s[..open_bracket_pos];
420 | let time_zone_name = &s[open_bracket_pos + 1..s.len() - 1]; // Exclude the closing ']'
421 |
422 | ZonedSchedule::new(cron_expr, time_zone_name)
423 | }
424 | }
425 |
426 | #[cfg(test)]
427 | mod tests {
428 | use std::time::SystemTime;
429 |
430 | use jiff::ToSpan;
431 |
432 | use super::*;
433 |
434 | #[test]
435 | fn zoned_schedule_creation_valid() {
436 | let cron_expr = "0 0 * * * * *"; // Every hour at minute 0
437 | let time_zone_name = "UTC";
438 | let schedule = ZonedSchedule::new(cron_expr, time_zone_name);
439 |
440 | assert!(
441 | schedule.is_ok(),
442 | "Expected ZonedSchedule to be created successfully"
443 | );
444 | }
445 |
446 | #[test]
447 | fn zoned_schedule_creation_invalid_cron() {
448 | let cron_expr = "invalid cron";
449 | let time_zone_name = "UTC";
450 | let schedule = ZonedSchedule::new(cron_expr, time_zone_name);
451 |
452 | assert!(
453 | schedule.is_err(),
454 | "Expected error due to invalid cron expression"
455 | );
456 | }
457 |
458 | #[test]
459 | fn zoned_schedule_creation_invalid_time_zone() {
460 | let cron_expr = "0 0 * * * * *";
461 | let time_zone_name = "Invalid/TimeZone";
462 | let schedule = ZonedSchedule::new(cron_expr, time_zone_name);
463 |
464 | assert!(schedule.is_err(), "Expected error due to invalid time zone");
465 | }
466 |
467 | #[test]
468 | fn zoned_schedule_parses() {
469 | "0 0 * * * *[America/Los_Angeles]"
470 | .parse::()
471 | .expect("A schedule should be parsed");
472 | }
473 |
474 | #[tokio::test]
475 | async fn wait_until_past_time() {
476 | let tz = TimeZone::UTC;
477 | let next = Zoned::now()
478 | .with_time_zone(tz.to_owned())
479 | .saturating_sub(10.seconds());
480 |
481 | let start = SystemTime::now();
482 | wait_until(&next).await;
483 | let elapsed = start.elapsed().unwrap();
484 | assert!(
485 | elapsed < StdDuration::from_millis(10),
486 | "Expected immediate return"
487 | );
488 | }
489 |
490 | #[tokio::test]
491 | async fn wait_until_future_time() {
492 | let tz = TimeZone::UTC;
493 | let next = Zoned::now()
494 | .with_time_zone(tz.to_owned())
495 | .saturating_add(5.seconds());
496 |
497 | // Pause and control tokio's time
498 | tokio::time::pause();
499 |
500 | let handle = tokio::spawn({
501 | let next = next.clone();
502 | async move { wait_until(&next).await }
503 | });
504 | tokio::time::advance(StdDuration::from_secs(5)).await;
505 |
506 | handle.await.expect("Failed to run wait_until");
507 | let elapsed: StdDuration = (&Zoned::now().with_time_zone(tz.to_owned()) - &next)
508 | .try_into()
509 | .unwrap();
510 | assert!(
511 | elapsed < StdDuration::from_millis(10),
512 | "Expected precise completion"
513 | );
514 | }
515 |
516 | #[tokio::test]
517 | async fn wait_until_exact_time() {
518 | let tz = TimeZone::UTC;
519 | let next = Zoned::now().with_time_zone(tz.to_owned());
520 |
521 | let start = SystemTime::now();
522 | wait_until(&next).await;
523 | let elapsed = start.elapsed().unwrap();
524 | assert!(
525 | elapsed < StdDuration::from_millis(10),
526 | "Expected immediate return"
527 | );
528 | }
529 | }
530 |
--------------------------------------------------------------------------------
/src/task.rs:
--------------------------------------------------------------------------------
1 | //! Tasks represent a well-structured unit of work.
2 | //!
3 | //! A task is defined by implementing the [`execute`](crate::Task::execute)
4 | //! method and specifying the associated type [`Input`](crate::Task::Input).
5 | //! This provides a strongly-typed interface to execute task invocations.
6 | //!
7 | //! Once a task is implemented, it can be enqueued on a [`Queue`](crate::Queue)
8 | //! for processing. A [`Worker`](crate::Worker) can then dequeue the task and
9 | //! invoke its `execute` method, providing the input that has been deserialized
10 | //! into the specified [`Input`](crate::Task::Input) type.
11 | //!
12 | //! Queues and workers operate over tasks to make them useful in the context of
13 | //! your application.
14 | //!
15 | //! # Implementing `Task`
16 | //!
17 | //! Generally you'll want to use the higher-level [`Job`](crate::Job)
18 | //! abstraction instead of implementing `Task` yourself. Its workflow is more
19 | //! ergonomic and therefore preferred for virtually all cases.
20 | //!
21 | //! However, it's possible to implement the trait directly. This may be useful
22 | //! for building more sophisticated behavior on top of the task concept that
23 | //! isn't already provided by `Job`.
24 | //!
25 | //! ```
26 | //! use serde::{Deserialize, Serialize};
27 | //! use sqlx::{Postgres, Transaction};
28 | //! use underway::{task::Result as TaskResult, Task};
29 | //! # use sqlx::PgPool;
30 | //! # use tokio::runtime::Runtime;
31 | //! # fn main() {
32 | //! # let rt = Runtime::new().unwrap();
33 | //! # rt.block_on(async {
34 | //!
35 | //! // Task input representing the data needed to send a welcome email.
36 | //! #[derive(Debug, Deserialize, Serialize)]
37 | //! struct WelcomeEmail {
38 | //! user_id: i32,
39 | //! email: String,
40 | //! name: String,
41 | //! }
42 | //!
43 | //! // Task that sends a welcome email to a user.
44 | //! struct WelcomeEmailTask;
45 | //!
46 | //! impl Task for WelcomeEmailTask {
47 | //! type Input = WelcomeEmail;
48 | //! type Output = ();
49 | //!
50 | //! /// Simulate sending a welcome email by printing a message to the console.
51 | //! async fn execute(
52 | //! &self,
53 | //! _tx: Transaction<'_, Postgres>,
54 | //! input: Self::Input,
55 | //! ) -> TaskResult {
56 | //! println!(
57 | //! "Sending welcome email to {} <{}> (user_id: {})",
58 | //! input.name, input.email, input.user_id
59 | //! );
60 | //!
61 | //! // Here you would integrate with an email service.
62 | //! // If email sending fails, you could return an error to trigger retries.
63 | //! Ok(())
64 | //! }
65 | //! }
66 | //! # let pool = PgPool::connect(&std::env::var("DATABASE_URL")?).await?;
67 | //! # let tx = pool.begin().await?;
68 | //! # let task = WelcomeEmailTask;
69 | //! # let input = WelcomeEmail {
70 | //! # user_id: 1,
71 | //! # email: "user@example.com".to_string(),
72 | //! # name: "Alice".to_string(),
73 | //! # };
74 | //! # task.execute(tx, input).await?;
75 | //! # Ok::<(), Box>(())
76 | //! # });
77 | //! # }
78 | //! ```
79 | use std::{
80 | fmt::{self, Display},
81 | future::Future,
82 | ops::Deref,
83 | result::Result as StdResult,
84 | };
85 |
86 | use jiff::{SignedDuration, Span, ToSpan};
87 | use serde::{de::DeserializeOwned, Deserialize, Serialize};
88 | use sqlx::{Postgres, Transaction};
89 | use ulid::Ulid;
90 | use uuid::Uuid;
91 |
92 | pub(crate) use self::retry_policy::RetryCount;
93 | pub use self::retry_policy::RetryPolicy;
94 |
95 | mod retry_policy;
96 |
97 | /// A type alias for task identifiers.
98 | ///
99 | /// Task IDs are [ULID][ULID]s which are converted to UUIDv4 for storage.
100 | ///
101 | /// [ULID]: https://github.com/ulid/spec?tab=readme-ov-file#specification
102 | #[derive(Debug, Clone, Copy, Serialize, Deserialize, Hash, Eq, PartialEq, sqlx::Type)]
103 | #[sqlx(transparent)]
104 | pub struct TaskId(Uuid);
105 |
106 | impl TaskId {
107 | pub(crate) fn new() -> Self {
108 | Self(Ulid::new().into())
109 | }
110 | }
111 |
112 | impl Deref for TaskId {
113 | type Target = Uuid;
114 |
115 | fn deref(&self) -> &Self::Target {
116 | &self.0
117 | }
118 | }
119 |
120 | impl Display for TaskId {
121 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
122 | write!(f, "{}", self.0)
123 | }
124 | }
125 |
126 | /// A type alias for task execution results.
127 | pub type Result = StdResult;
128 |
129 | /// Task errors.
130 | #[derive(Debug, thiserror::Error)]
131 | #[non_exhaustive]
132 | pub enum Error {
133 | /// Error returned by the `sqlx` crate during database operations.
134 | #[error(transparent)]
135 | Database(#[from] sqlx::Error),
136 |
137 | /// Indicates the task timed out during execution.
138 | #[error("Task timed out after {0} during execution")]
139 | TimedOut(SignedDuration),
140 |
141 | /// Error indicating that the task has encountered an unrecoverable error
142 | /// state.
143 | ///
144 | /// **Note:** Returning this error from an execute future will override any
145 | /// remaining retries and set the task to [`State::Failed`].
146 | #[error("{0}")]
147 | Fatal(String),
148 |
149 | /// Error indicating that the task has encountered a recoverable error
150 | /// state.
151 | #[error("{0}")]
152 | Retryable(String),
153 | }
154 |
155 | /// Convenience trait for converting results into task results.
156 | ///
157 | /// This makes it easier to convert execution errors to either
158 | /// [`Retryable`](Error::Retryable) or [`Fatal`](Error::Fatal). These are
159 | /// recoverable and unrecoverable, respectively.
160 | ///
161 | /// # Examples
162 | ///
163 | /// Sometimes errors are retryable:
164 | ///
165 | ///```rust
166 | /// use tokio::net;
167 | /// use underway::{Job, To, ToTaskResult};
168 | ///
169 | /// Job::<(), ()>::builder().step(|_, _| async {
170 | /// // If we can't resolve DNS the issue may be transient and recoverable.
171 | /// net::lookup_host("example.com:80").await.retryable()?;
172 | ///
173 | /// To::done()
174 | /// });
175 | /// ```
176 | ///
177 | /// And other times they're fatal:
178 | ///
179 | /// ```rust
180 | /// use std::env;
181 | ///
182 | /// use underway::{Job, To, ToTaskResult};
183 | ///
184 | /// Job::<(), ()>::builder().step(|_, _| async {
185 | /// // If the API_KEY environment variable isn't set we can't recover.
186 | /// let api_key = env::var("API_KEY").fatal()?;
187 | ///
188 | /// To::done()
189 | /// });
190 | /// ```
191 | pub trait ToTaskResult {
192 | /// Converts the error into a [`Retryable`](Error::Retryable) task error.
193 | fn retryable(self) -> StdResult;
194 |
195 | /// Converts the error into a [`Fatal`](Error::Fatal) task error.
196 | fn fatal(self) -> StdResult;
197 | }
198 |
199 | impl ToTaskResult for StdResult {
200 | fn retryable(self) -> StdResult {
201 | self.map_err(|err| Error::Retryable(err.to_string()))
202 | }
203 |
204 | fn fatal(self) -> StdResult {
205 | self.map_err(|err| Error::Fatal(err.to_string()))
206 | }
207 | }
208 |
209 | /// Trait for defining tasks.
210 | ///
211 | /// Queues and workers operate over types that implement this trait.
212 | pub trait Task: Send + 'static {
213 | /// The input type that the execute method will take.
214 | ///
215 | /// This type must be serialized to and deserialized from the database.
216 | type Input: DeserializeOwned + Serialize + Send + 'static;
217 |
218 | /// The output type that the execute method will return upon success.
219 | type Output: Serialize + Send + 'static;
220 |
221 | /// Executes the task with the provided input.
222 | ///
223 | /// The core of a task, this method is called when the task is picked up by
224 | /// a worker.
225 | ///
226 | /// Typically this method will do something with the provided input. If no
227 | /// input is needed, then the unit type, `()`, can be used instead and the
228 | /// input ignored.
229 | ///
230 | /// # Example
231 | ///
232 | /// ```
233 | /// use serde::{Deserialize, Serialize};
234 | /// use sqlx::{Postgres, Transaction};
235 | /// use underway::{task::Result as TaskResult, Task};
236 | ///
237 | /// // Task input representing the data needed to send a welcome email.
238 | /// #[derive(Debug, Deserialize, Serialize)]
239 | /// struct WelcomeEmail {
240 | /// user_id: i32,
241 | /// email: String,
242 | /// name: String,
243 | /// }
244 | ///
245 | /// // Task that sends a welcome email to a user.
246 | /// struct WelcomeEmailTask;
247 | ///
248 | /// impl Task for WelcomeEmailTask {
249 | /// type Input = WelcomeEmail;
250 | /// type Output = ();
251 | ///
252 | /// /// Simulate sending a welcome email by printing a message to the console.
253 | /// async fn execute(
254 | /// &self,
255 | /// tx: Transaction<'_, Postgres>,
256 | /// input: Self::Input,
257 | /// ) -> TaskResult {
258 | /// println!(
259 | /// "Sending welcome email to {} <{}> (user_id: {})",
260 | /// input.name, input.email, input.user_id
261 | /// );
262 | ///
263 | /// // Here you would integrate with an email service.
264 | /// // If email sending fails, you could return an error to trigger retries.
265 | /// Ok(())
266 | /// }
267 | /// }
268 | /// ```
269 | fn execute(
270 | &self,
271 | tx: Transaction<'_, Postgres>,
272 | input: Self::Input,
273 | ) -> impl Future