├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── azure-pipelines.yml ├── code_of_conduct.md ├── diesel.toml ├── integration_tests ├── Cargo.toml └── tests │ ├── codegen.rs │ ├── db.rs │ ├── dummy_jobs.rs │ ├── lib.rs │ ├── runner.rs │ ├── sync.rs │ ├── test_guard.rs │ └── util.rs ├── migrations ├── .gitkeep └── 2018-05-03-150523_create_jobs │ ├── down.sql │ └── up.sql ├── swirl ├── Cargo.toml ├── examples │ └── run_100k_jobs.rs └── src │ ├── db.rs │ ├── errors.rs │ ├── job.rs │ ├── lib.rs │ ├── registry.rs │ ├── runner.rs │ ├── runner │ ├── channel.rs │ └── event.rs │ ├── schema.rs │ └── storage.rs └── swirl_proc_macro ├── Cargo.toml └── src ├── background_job.rs ├── diagnostic_shim.rs └── lib.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "swirl", 4 | "integration_tests", 5 | ] 6 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | Copyright 2019 Sean Griffin 179 | 180 | Licensed under the Apache License, Version 2.0 (the "License"); 181 | you may not use this file except in compliance with the License. 182 | You may obtain a copy of the License at 183 | 184 | http://www.apache.org/licenses/LICENSE-2.0 185 | 186 | Unless required by applicable law or agreed to in writing, software 187 | distributed under the License is distributed on an "AS IS" BASIS, 188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 189 | See the License for the specific language governing permissions and 190 | limitations under the License. 191 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2019 Sean Griffin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Swirl 2 | ===== 3 | 4 | A simple, efficient background work queue for Rust 5 | -------------------------------------------------- 6 | 7 | Swirl is a background work queue built on Diesel and PostgreSQL's row locking 8 | features. It was extracted from [crates.io](crates.io), which uses it for 9 | updating the index off the web server. 10 | 11 | This library is still in its early stages, and has not yet reached 0.1 status. 12 | We're using it successfully in production on crates.io today, but there are 13 | still several things missing that you may want from a job queue. 14 | 15 | ## Getting Started 16 | 17 | Swirl stores background jobs in your PostgreSQL 9.5+ database. As such, it has 18 | migrations which need to be run. At the moment, this should be done by copying 19 | our migrations directory into your own. This will be improved before the crate 20 | is released. 21 | 22 | Jobs in Swirl are defined as functions annotated with 23 | `#[swirl::background_job]`, like so: 24 | 25 | ```rust 26 | #[swirl::background_job] 27 | fn resize_image(file_name: String, dimensions: Size) -> Result<(), swirl::PerformError> { 28 | // Do expensive computation that shouldn't be done on the web server 29 | } 30 | ``` 31 | 32 | All arguments must implement `serde::Serialize` and `serde::DeserializeOwned`. 33 | Jobs can also take a shared "environment" argument. This is a struct you define, 34 | which can contain resources shared between jobs like a connection pool, or 35 | application level configuration. For example: 36 | 37 | ```rust 38 | struct Environment { 39 | file_server_private_key: String, 40 | http_client: http_lib::Client, 41 | } 42 | 43 | #[swirl::background_job] 44 | fn resize_image( 45 | env: &Environment, 46 | file_name: String, 47 | dimensions: Size, 48 | ) -> Result<(), swirl::PerformError> { 49 | // Do expensive computation that shouldn't be done on the web server 50 | } 51 | ``` 52 | 53 | Note that all jobs must use the same type for the environment. 54 | Once a job is defined, it can be enqueued like so: 55 | 56 | ```rust 57 | resize_image(file_name, dimensions).enqueue(&diesel_connection)? 58 | ``` 59 | 60 | You do not pass the environment when enqueuing jobs. 61 | Jobs are run asynchronously by an instance of `swirl::Runner`. To construct 62 | one, you must first pass it the job environment (this is `()` if your jobs don't 63 | take an environment), and a Diesel connection pool (from `diesel::r2d2`). 64 | 65 | ```rust 66 | let runner = Runner::builder(environment, connection_pool) 67 | .build(); 68 | ``` 69 | 70 | At the time of writing, it is up to you to make sure your connection pool is 71 | well configured for your runner. Your connection pool size should be at least as 72 | big as the thread pool size (defaults to the number of CPUs on your machine), or 73 | double that if your jobs require a database connection. 74 | 75 | Once the runner is created, calling `run_all_pending_jobs` will continuously 76 | saturate all available threads, attempting to run one job per thread at a time. 77 | It will return `Ok(())` once at least one thread has reported there were no jobs 78 | available to run, or an error if a job fails to start running. Note that this 79 | function does not know or care if a job *completes* successfully, only if we 80 | were successful at starting to do work. Typically this function should be called 81 | in a loop: 82 | 83 | ```rust 84 | loop { 85 | if let Err(e) = runner.run_all_pending_jobs() { 86 | // Something has gone seriously wrong. The database might be down, 87 | // or the thread pool may have died. We could just try again, or 88 | // perhaps rebuild the runner, or crash/restart the process. 89 | } 90 | } 91 | ``` 92 | 93 | In situations where you have low job throughput, you can add a sleep to this 94 | loop to wait some period of time before looking for more jobs. 95 | 96 | When a job fails (by returning an error or panicking), it will be retried after 97 | `1 ^ {retry_count}` minutes. If a job fails or an error occurs marking a job as 98 | finsihed/failed, it will be logged to stderr. No output will be sent when jobs 99 | are running successfully. 100 | 101 | Swirl uses at least once semantics. This means that we guarantee all jobs are 102 | successfully run to completion, but we do not guarantee that it will do so only 103 | once, even if the job successfully returns `Ok(())`. Therefore, it is important 104 | that all jobs are idempotent. 105 | 106 | ## Upcoming features 107 | 108 | Planned features that are not yet implemented are: 109 | 110 | - Automatic configuration of the DB connection pool 111 | - Allowing jobs to take a database connection as an argument 112 | - If your jobs need a DB connection today, put the connection pool on your 113 | environment. 114 | - More robust and configurable logging 115 | - Configurable retry behavior 116 | - Support for multiple queues with priority 117 | - Less boilerplate in the job runner 118 | 119 | ## Code of conduct 120 | 121 | Anyone who interacts with Swirl in any space, including but not limited to 122 | this GitHub repository, must follow our [code of conduct](https://github.com/sgrif/swirl/blob/master/code_of_conduct.md). 123 | 124 | ## License 125 | 126 | Licensed under either of these: 127 | 128 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 129 | https://www.apache.org/licenses/LICENSE-2.0) 130 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 131 | https://opensource.org/licenses/MIT) 132 | 133 | ### Contributing 134 | 135 | Unless you explicitly state otherwise, any contribution you intentionally submit 136 | for inclusion in the work, as defined in the Apache-2.0 license, shall be 137 | dual-licensed as above, without any additional terms or conditions. 138 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | variables: 5 | DATABASE_URL: postgres://postgres:postgres@localhost/swirl_test 6 | TEST_DATABASE_URL: postgres://postgres:postgres@localhost/swirl_test 7 | 8 | pool: 9 | vmImage: Ubuntu-16.04 10 | 11 | steps: 12 | - script: | 13 | curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable 14 | source ~/.cargo/env 15 | echo "##vso[task.setvariable variable=PATH;]$PATH" 16 | displayName: Install rustc 17 | 18 | - script: | 19 | sudo apt-get update 20 | sudo apt-get -y install postgresql libpq-dev 21 | sudo -u postgres psql postgres -c "ALTER USER postgres PASSWORD 'postgres'" 22 | displayName: Install PostgreSQL 23 | 24 | - script: cargo install diesel_cli --vers 1.4.0 --debug --no-default-features --features postgres 25 | displayName: Install diesel 26 | 27 | - script: diesel database setup 28 | displayName: Run migrations 29 | 30 | - script: cargo test 31 | displayName: Run tests 32 | -------------------------------------------------------------------------------- /code_of_conduct.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting a project maintainer at: 59 | 60 | * Sean Griffin 61 | 62 | All complaints will be reviewed and investigated and will result in a response 63 | that is deemed necessary and appropriate to the circumstances. The project team 64 | is obligated to maintain confidentiality with regard to the reporter of an 65 | incident. Further details of specific enforcement policies may be posted 66 | separately. 67 | 68 | Project maintainers who do not follow or enforce the Code of Conduct in good 69 | faith may face temporary or permanent repercussions as determined by other 70 | members of the project's leadership. 71 | 72 | ## Attribution 73 | 74 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 75 | available at [https://contributor-covenant.org/version/1/4][version] 76 | 77 | [homepage]: https://contributor-covenant.org 78 | [version]: https://contributor-covenant.org/version/1/4/ 79 | -------------------------------------------------------------------------------- /diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "swirl/src/schema.rs" 6 | -------------------------------------------------------------------------------- /integration_tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "integration_tests" 3 | version = "0.1.0" 4 | authors = ["Sean Griffin "] 5 | edition = "2018" 6 | autotests = false 7 | 8 | [dependencies] 9 | diesel = { version = "1.0.0", features = ["postgres", "r2d2"] } 10 | swirl = { path = "../swirl" } 11 | lazy_static = "1.0.0" 12 | dotenv = "0.11" 13 | antidote = "1.0.0" 14 | assert_matches = "1.0.0" 15 | failure = { features = ["backtrace"] } 16 | 17 | [[test]] 18 | name = "integration_tests" 19 | path = "tests/lib.rs" 20 | harness = true 21 | 22 | [features] 23 | nightly = ["swirl/nightly"] 24 | -------------------------------------------------------------------------------- /integration_tests/tests/codegen.rs: -------------------------------------------------------------------------------- 1 | use crate::dummy_jobs::*; 2 | use crate::test_guard::TestGuard; 3 | use diesel::prelude::*; 4 | use failure::Fallible; 5 | use swirl::db::DieselPoolObj; 6 | use swirl::{JobsFailed, PerformError}; 7 | 8 | #[test] 9 | fn generated_jobs_serialize_all_arguments_except_first() -> Fallible<()> { 10 | #[swirl::background_job] 11 | fn check_arg_equal_to_env(env: &String, arg: String) -> Result<(), PerformError> { 12 | if env == &arg { 13 | Ok(()) 14 | } else { 15 | Err("arg wasn't env!".into()) 16 | } 17 | } 18 | 19 | let runner = TestGuard::runner("a".to_string()); 20 | let conn = runner.connection_pool().get()?; 21 | check_arg_equal_to_env("a".into()).enqueue(&conn)?; 22 | check_arg_equal_to_env("b".into()).enqueue(&conn)?; 23 | 24 | runner.run_all_pending_jobs()?; 25 | assert_eq!(Err(JobsFailed(1)), runner.check_for_failed_jobs()); 26 | Ok(()) 27 | } 28 | 29 | #[test] 30 | fn jobs_with_args_but_no_env() -> Fallible<()> { 31 | #[swirl::background_job] 32 | fn assert_foo(arg: String) -> Result<(), PerformError> { 33 | if arg == "foo" { 34 | Ok(()) 35 | } else { 36 | Err("arg wasn't foo!".into()) 37 | } 38 | } 39 | 40 | let runner = TestGuard::dummy_runner(); 41 | let conn = runner.connection_pool().get()?; 42 | assert_foo("foo".into()).enqueue(&conn)?; 43 | assert_foo("not foo".into()).enqueue(&conn)?; 44 | 45 | runner.run_all_pending_jobs()?; 46 | assert_eq!(Err(JobsFailed(1)), runner.check_for_failed_jobs()); 47 | Ok(()) 48 | } 49 | 50 | #[test] 51 | fn env_can_have_any_name() -> Fallible<()> { 52 | #[swirl::background_job] 53 | fn env_with_different_name(environment: &String) -> Result<(), swirl::PerformError> { 54 | assert_eq!(environment, "my environment"); 55 | Ok(()) 56 | } 57 | 58 | let runner = TestGuard::runner(String::from("my environment")); 59 | let conn = runner.connection_pool().get()?; 60 | env_with_different_name().enqueue(&conn)?; 61 | 62 | runner.run_all_pending_jobs()?; 63 | runner.check_for_failed_jobs()?; 64 | Ok(()) 65 | } 66 | 67 | #[test] 68 | #[forbid(unused_imports)] 69 | fn test_imports_only_used_in_job_body_are_not_warned_as_unused() -> Fallible<()> { 70 | use std::io::prelude::*; 71 | 72 | #[swirl::background_job] 73 | fn uses_trait_import() -> Result<(), swirl::PerformError> { 74 | let mut buf = Vec::new(); 75 | buf.write_all(b"foo")?; 76 | let s = String::from_utf8(buf)?; 77 | assert_eq!(s, "foo"); 78 | Ok(()) 79 | } 80 | 81 | let runner = TestGuard::dummy_runner(); 82 | let conn = runner.connection_pool().get()?; 83 | uses_trait_import().enqueue(&conn)?; 84 | 85 | runner.run_all_pending_jobs()?; 86 | runner.check_for_failed_jobs()?; 87 | Ok(()) 88 | } 89 | 90 | #[test] 91 | fn jobs_can_take_a_connection_as_an_argument() -> Fallible<()> { 92 | use diesel::sql_query; 93 | 94 | #[swirl::background_job] 95 | fn takes_env_and_conn(_env: &(), conn: &PgConnection) -> Result<(), swirl::PerformError> { 96 | sql_query("SELECT 1").execute(conn)?; 97 | Ok(()) 98 | } 99 | 100 | #[swirl::background_job] 101 | fn takes_only_conn(conn: &PgConnection) -> Result<(), swirl::PerformError> { 102 | sql_query("SELECT 1").execute(conn)?; 103 | Ok(()) 104 | } 105 | 106 | #[swirl::background_job] 107 | fn takes_connection_pool(pool: &dyn DieselPoolObj) -> Result<(), swirl::PerformError> { 108 | let conn1 = pool.get()?; 109 | let conn2 = pool.get()?; 110 | sql_query("SELECT 1").execute(&**conn1)?; 111 | sql_query("SELECT 1").execute(&**conn2)?; 112 | Ok(()) 113 | } 114 | 115 | #[swirl::background_job] 116 | fn takes_fully_qualified_conn(conn: &diesel::PgConnection) -> Result<(), swirl::PerformError> { 117 | sql_query("SELECT 1").execute(conn)?; 118 | Ok(()) 119 | } 120 | 121 | #[swirl::background_job] 122 | fn takes_fully_qualified_pool( 123 | pool: &dyn swirl::db::DieselPoolObj, 124 | ) -> Result<(), swirl::PerformError> { 125 | let conn1 = pool.get()?; 126 | let conn2 = pool.get()?; 127 | sql_query("SELECT 1").execute(&**conn1)?; 128 | sql_query("SELECT 1").execute(&**conn2)?; 129 | Ok(()) 130 | } 131 | 132 | let runner = TestGuard::dummy_runner(); 133 | { 134 | let conn = runner.connection_pool().get()?; 135 | takes_env_and_conn().enqueue(&conn)?; 136 | takes_only_conn().enqueue(&conn)?; 137 | takes_connection_pool().enqueue(&conn)?; 138 | takes_fully_qualified_conn().enqueue(&conn)?; 139 | takes_fully_qualified_pool().enqueue(&conn)?; 140 | } 141 | 142 | runner.run_all_pending_jobs()?; 143 | runner.check_for_failed_jobs()?; 144 | Ok(()) 145 | } 146 | -------------------------------------------------------------------------------- /integration_tests/tests/db.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use diesel::r2d2; 3 | 4 | pub type DieselPool = r2d2::Pool>; 5 | pub type PoolBuilder = swirl::db::R2d2Builder; 6 | 7 | pub fn pool_builder() -> r2d2::Builder> { 8 | r2d2::Pool::builder() 9 | .min_idle(Some(0)) 10 | .connection_customizer(Box::new(SetStatementTimeout(1000))) 11 | } 12 | 13 | #[derive(Debug, Clone, Copy)] 14 | struct SetStatementTimeout(u64); 15 | 16 | impl r2d2::CustomizeConnection for SetStatementTimeout { 17 | fn on_acquire(&self, conn: &mut PgConnection) -> Result<(), r2d2::Error> { 18 | use diesel::sql_query; 19 | 20 | sql_query(format!("SET statement_timeout = {}", self.0)) 21 | .execute(conn) 22 | .map_err(r2d2::Error::QueryError)?; 23 | Ok(()) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /integration_tests/tests/dummy_jobs.rs: -------------------------------------------------------------------------------- 1 | pub use swirl::Job; 2 | 3 | use swirl::errors::PerformError; 4 | 5 | use crate::sync::Barrier; 6 | 7 | /// A job which takes a barrier as its environment and calls wait on it before 8 | /// succeeding 9 | #[swirl::background_job] 10 | pub fn barrier_job(env: &Barrier) -> Result<(), PerformError> { 11 | env.wait(); 12 | Ok(()) 13 | } 14 | 15 | /// A job which always fails 16 | #[swirl::background_job] 17 | pub fn failure_job() -> Result<(), PerformError> { 18 | Err("failed".into()) 19 | } 20 | 21 | #[swirl::background_job] 22 | /// A job which panics 23 | pub fn panic_job() -> Result<(), PerformError> { 24 | panic!() 25 | } 26 | -------------------------------------------------------------------------------- /integration_tests/tests/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | 3 | mod db; 4 | mod dummy_jobs; 5 | mod sync; 6 | mod test_guard; 7 | mod util; 8 | 9 | mod codegen; 10 | mod runner; 11 | -------------------------------------------------------------------------------- /integration_tests/tests/runner.rs: -------------------------------------------------------------------------------- 1 | use assert_matches::assert_matches; 2 | use diesel::prelude::*; 3 | use failure::Fallible; 4 | use std::sync::mpsc::sync_channel; 5 | use std::thread; 6 | use std::time::Duration; 7 | use swirl::schema::*; 8 | use swirl::JobsFailed; 9 | 10 | use crate::dummy_jobs::*; 11 | use crate::sync::Barrier; 12 | use crate::test_guard::TestGuard; 13 | 14 | #[test] 15 | fn run_all_pending_jobs_returns_when_all_jobs_enqueued() -> Fallible<()> { 16 | let barrier = Barrier::new(3); 17 | let runner = TestGuard::runner(barrier.clone()); 18 | let conn = runner.connection_pool().get()?; 19 | barrier_job().enqueue(&conn)?; 20 | barrier_job().enqueue(&conn)?; 21 | 22 | runner.run_all_pending_jobs()?; 23 | 24 | let queued_job_count = background_jobs::table.count().get_result(&conn); 25 | let unlocked_job_count = background_jobs::table 26 | .select(background_jobs::id) 27 | .for_update() 28 | .skip_locked() 29 | .load::(&conn) 30 | .map(|v| v.len()); 31 | 32 | assert_eq!(Ok(2), queued_job_count); 33 | assert_eq!(Ok(0), unlocked_job_count); 34 | 35 | barrier.wait(); 36 | Ok(()) 37 | } 38 | 39 | #[test] 40 | fn check_for_failed_jobs_blocks_until_all_queued_jobs_are_finished() -> Fallible<()> { 41 | let barrier = Barrier::new(3); 42 | let runner = TestGuard::runner(barrier.clone()); 43 | let conn = runner.connection_pool().get()?; 44 | barrier_job().enqueue(&conn)?; 45 | barrier_job().enqueue(&conn)?; 46 | 47 | runner.run_all_pending_jobs()?; 48 | 49 | let (send, recv) = sync_channel(0); 50 | let handle = thread::spawn(move || { 51 | let wait = Duration::from_millis(100); 52 | assert!( 53 | recv.recv_timeout(wait).is_err(), 54 | "wait_for_jobs returned before jobs finished" 55 | ); 56 | 57 | barrier.wait(); 58 | 59 | assert!(recv.recv().is_ok(), "wait_for_jobs didn't return"); 60 | }); 61 | 62 | runner.check_for_failed_jobs()?; 63 | send.send(1)?; 64 | handle.join().unwrap(); 65 | Ok(()) 66 | } 67 | 68 | #[test] 69 | fn check_for_failed_jobs_panics_if_jobs_failed() -> Fallible<()> { 70 | let runner = TestGuard::dummy_runner(); 71 | let conn = runner.connection_pool().get()?; 72 | failure_job().enqueue(&conn)?; 73 | failure_job().enqueue(&conn)?; 74 | failure_job().enqueue(&conn)?; 75 | 76 | runner.run_all_pending_jobs()?; 77 | assert_eq!(Err(JobsFailed(3)), runner.check_for_failed_jobs()); 78 | Ok(()) 79 | } 80 | 81 | #[test] 82 | fn panicking_jobs_are_caught_and_treated_as_failures() -> Fallible<()> { 83 | let runner = TestGuard::dummy_runner(); 84 | let conn = runner.connection_pool().get()?; 85 | panic_job().enqueue(&conn)?; 86 | failure_job().enqueue(&conn)?; 87 | 88 | runner.run_all_pending_jobs()?; 89 | assert_eq!(Err(JobsFailed(2)), runner.check_for_failed_jobs()); 90 | Ok(()) 91 | } 92 | 93 | #[test] 94 | fn run_all_pending_jobs_errs_if_jobs_dont_start_in_timeout() -> Fallible<()> { 95 | let barrier = Barrier::new(2); 96 | // A runner with 1 thread where all jobs will hang indefinitely. 97 | // The second job will never start. 98 | let runner = TestGuard::builder(barrier.clone()) 99 | .thread_count(1) 100 | .job_start_timeout(Duration::from_millis(50)) 101 | .build(); 102 | let conn = runner.connection_pool().get()?; 103 | barrier_job().enqueue(&conn)?; 104 | barrier_job().enqueue(&conn)?; 105 | 106 | let run_result = runner.run_all_pending_jobs(); 107 | assert_matches!(run_result, Err(swirl::FetchError::NoMessageReceived)); 108 | 109 | // Make sure the jobs actually run so we don't panic on drop 110 | barrier.wait(); 111 | barrier.wait(); 112 | runner.check_for_failed_jobs()?; 113 | Ok(()) 114 | } 115 | 116 | #[test] 117 | fn jobs_failing_to_load_doesnt_panic_threads() -> Fallible<()> { 118 | let runner = TestGuard::builder(()) 119 | .thread_count(1) 120 | .connection_count(1) 121 | .build(); 122 | 123 | { 124 | let conn = runner.connection_pool().get()?; 125 | failure_job().enqueue(&conn)?; 126 | // Since jobs are loaded with `SELECT FOR UPDATE`, it will always fail in 127 | // read-only mode 128 | diesel::sql_query("SET default_transaction_read_only = 't'").execute(&conn)?; 129 | } 130 | 131 | let run_result = runner.run_all_pending_jobs(); 132 | 133 | { 134 | let conn = runner.connection_pool().get()?; 135 | diesel::sql_query("SET default_transaction_read_only = 'f'").execute(&conn)?; 136 | } 137 | 138 | assert_matches!(run_result, Err(swirl::FetchError::FailedLoadingJob(_))); 139 | runner.check_for_failed_jobs()?; 140 | Ok(()) 141 | } 142 | -------------------------------------------------------------------------------- /integration_tests/tests/sync.rs: -------------------------------------------------------------------------------- 1 | use std::panic::{RefUnwindSafe, UnwindSafe}; 2 | use std::sync::{Arc, Barrier as StdBarrier, BarrierWaitResult}; 3 | 4 | #[derive(Clone)] 5 | pub struct Barrier { 6 | inner: Arc, 7 | } 8 | 9 | impl Barrier { 10 | pub fn new(n: usize) -> Self { 11 | Self { 12 | inner: Arc::new(StdBarrier::new(n)), 13 | } 14 | } 15 | 16 | pub fn wait(&self) -> BarrierWaitResult { 17 | self.inner.wait() 18 | } 19 | } 20 | 21 | impl UnwindSafe for Barrier {} 22 | impl RefUnwindSafe for Barrier {} 23 | -------------------------------------------------------------------------------- /integration_tests/tests/test_guard.rs: -------------------------------------------------------------------------------- 1 | use antidote::{Mutex, MutexGuard}; 2 | use diesel::prelude::*; 3 | use std::ops::{Deref, DerefMut}; 4 | use std::time::Duration; 5 | use swirl::{Builder, Runner}; 6 | 7 | use crate::db::*; 8 | use crate::util::*; 9 | 10 | lazy_static::lazy_static! { 11 | // Since these tests deal with behavior concerning multiple connections 12 | // running concurrently, they have to run outside of a transaction. 13 | // Therefore we can't run more than one at a time. 14 | // 15 | // Rather than forcing the whole suite to be run with `--test-threads 1`, 16 | // we just lock these tests instead. 17 | static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); 18 | } 19 | 20 | pub struct TestGuard<'a, Env: 'static> { 21 | runner: Runner, 22 | _lock: MutexGuard<'a, ()>, 23 | } 24 | 25 | impl<'a, Env> TestGuard<'a, Env> { 26 | pub fn builder(env: Env) -> GuardBuilder { 27 | let database_url = 28 | dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); 29 | let builder = Runner::builder(env).connection_pool_builder(database_url, pool_builder()); 30 | 31 | GuardBuilder { builder } 32 | } 33 | 34 | pub fn runner(env: Env) -> Self { 35 | Self::builder(env).build() 36 | } 37 | } 38 | 39 | impl<'a> TestGuard<'a, ()> { 40 | pub fn dummy_runner() -> Self { 41 | Self::builder(()).build() 42 | } 43 | } 44 | 45 | pub struct GuardBuilder { 46 | builder: Builder, 47 | } 48 | 49 | impl GuardBuilder { 50 | pub fn thread_count(mut self, count: usize) -> Self { 51 | self.builder = self.builder.thread_count(count); 52 | self 53 | } 54 | 55 | pub fn connection_count(mut self, count: u32) -> Self { 56 | self.builder = self.builder.connection_count(count); 57 | self 58 | } 59 | 60 | pub fn job_start_timeout(mut self, timeout: Duration) -> Self { 61 | self.builder = self.builder.job_start_timeout(timeout); 62 | self 63 | } 64 | 65 | pub fn build<'a>(self) -> TestGuard<'a, Env> { 66 | TestGuard { 67 | _lock: TEST_MUTEX.lock(), 68 | runner: self.builder.build(), 69 | } 70 | } 71 | } 72 | 73 | impl<'a, Env> Deref for TestGuard<'a, Env> { 74 | type Target = Runner; 75 | 76 | fn deref(&self) -> &Self::Target { 77 | &self.runner 78 | } 79 | } 80 | 81 | impl<'a, Env> DerefMut for TestGuard<'a, Env> { 82 | fn deref_mut(&mut self) -> &mut Self::Target { 83 | &mut self.runner 84 | } 85 | } 86 | 87 | impl<'a, Env> Drop for TestGuard<'a, Env> { 88 | fn drop(&mut self) { 89 | let conn = self.runner.connection_pool().get().unwrap(); 90 | ::diesel::sql_query("TRUNCATE TABLE background_jobs") 91 | .execute(&conn) 92 | .unwrap_from_drop(); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /integration_tests/tests/util.rs: -------------------------------------------------------------------------------- 1 | pub trait UnwrapFromDrop { 2 | fn unwrap_from_drop(self) -> T; 3 | } 4 | 5 | impl UnwrapFromDrop for Result 6 | where 7 | T: Default, 8 | E: std::fmt::Debug, 9 | { 10 | fn unwrap_from_drop(self) -> T { 11 | use std::thread::panicking; 12 | 13 | match self { 14 | Ok(t) => t, 15 | Err(e) => { 16 | if panicking() { 17 | eprintln!("called `Result::unwrap()` on an `Err` value: {:?}", e); 18 | T::default() 19 | } else { 20 | panic!("called `Result::unwrap()` on an `Err` value: {:?}", e) 21 | } 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /migrations/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sgrif/swirl/fe78e7011c72d7f0eeb54e0022ae36f0f1fb02ad/migrations/.gitkeep -------------------------------------------------------------------------------- /migrations/2018-05-03-150523_create_jobs/down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE background_jobs; 2 | -------------------------------------------------------------------------------- /migrations/2018-05-03-150523_create_jobs/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE background_jobs ( 2 | id BIGSERIAL PRIMARY KEY, 3 | job_type TEXT NOT NULL, 4 | data JSONB NOT NULL, 5 | retries INTEGER NOT NULL DEFAULT 0, 6 | last_retry TIMESTAMP NOT NULL DEFAULT '1970-01-01', 7 | created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP 8 | ); 9 | -------------------------------------------------------------------------------- /swirl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "swirl" 3 | version = "0.1.0" 4 | authors = ["Sean Griffin "] 5 | edition = "2018" 6 | description = "A simple background processing framework for Diesel and PostgreSQL" 7 | license = "MIT OR Apache-2.0" 8 | 9 | [dependencies] 10 | swirl_proc_macro = { path = "../swirl_proc_macro" } 11 | diesel = { version = "1.0.0", features = ["postgres", "serde_json"] } 12 | threadpool = "1.7" 13 | serde_json = "1.0.0" 14 | serde = "1.0.0" 15 | serde_derive = "1.0.90" 16 | inventory = "0.1" 17 | 18 | [dev-dependencies] 19 | dotenv = "0.11" 20 | lazy_static = "1.0" 21 | num_cpus = "1.0" 22 | 23 | [features] 24 | default = ["r2d2"] 25 | r2d2 = ["diesel/r2d2"] 26 | nightly = ["swirl_proc_macro/nightly"] 27 | -------------------------------------------------------------------------------- /swirl/examples/run_100k_jobs.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use std::error::Error; 3 | use std::time::Instant; 4 | use swirl::*; 5 | 6 | #[swirl::background_job] 7 | fn dummy_job() -> Result<(), PerformError> { 8 | Ok(()) 9 | } 10 | 11 | fn main() -> Result<(), Box> { 12 | let database_url = dotenv::var("DATABASE_URL")?; 13 | println!("Enqueuing 100k jobs"); 14 | let runner = Runner::builder(()).database_url(database_url).build(); 15 | enqueue_jobs(&*runner.connection_pool().get()?).unwrap(); 16 | println!("Running jobs"); 17 | let started = Instant::now(); 18 | 19 | runner.run_all_pending_jobs()?; 20 | runner.check_for_failed_jobs()?; 21 | 22 | let elapsed = started.elapsed(); 23 | println!("Ran 100k jobs in {} seconds", elapsed.as_secs()); 24 | 25 | Ok(()) 26 | } 27 | 28 | fn enqueue_jobs(conn: &PgConnection) -> Result<(), EnqueueError> { 29 | use diesel::sql_query; 30 | sql_query("TRUNCATE TABLE background_jobs;").execute(conn)?; 31 | for _ in 0..100_000 { 32 | dummy_job().enqueue(conn)?; 33 | } 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /swirl/src/db.rs: -------------------------------------------------------------------------------- 1 | use diesel::PgConnection; 2 | use std::error::Error; 3 | use std::ops::Deref; 4 | 5 | pub type DieselPooledConn<'a, T> = >::Connection; 6 | 7 | /// A trait to work around associated type constructors 8 | /// 9 | /// This will eventually change to `type Connection<'a>` on [`DieselPool`] 10 | pub trait BorrowedConnection<'a> { 11 | /// The smart pointer returned by this connection pool. 12 | type Connection: Deref; 13 | } 14 | 15 | /// A connection pool for Diesel database connections 16 | /// 17 | /// If you don't care about the details of connection pooling, or want to use 18 | /// the r2d2 crate, you can enable the r2d2 feature on this crate and never 19 | /// be concerned with this trait. If you want to use your own connection pool, 20 | /// you can implement this trait manually. 21 | pub trait DieselPool: Clone + Send + for<'a> BorrowedConnection<'a> { 22 | /// The error type returned when a connection could not be retreived from 23 | /// the pool. 24 | type Error: Error + Send + Sync + 'static; 25 | 26 | /// Attempt to get a database connection from the pool. Errors if a 27 | /// connection could not be retrieved from the pool. 28 | /// 29 | /// The exact details of why an error would be returned will depend on 30 | /// the pool, but a reasonable implementation will return an error if: 31 | /// 32 | /// - A timeout was reached 33 | /// - An error occurred establishing a new connection 34 | fn get(&self) -> Result, Self::Error>; 35 | } 36 | 37 | /// Object safe version of [`DieselPool`] 38 | pub trait DieselPoolObj { 39 | /// Object safe version of [`DieselPool::get`] 40 | /// 41 | /// This function will heap allocate the connection. This allocation can 42 | /// be avoided by using [`Self::with_connection`] 43 | fn get(&self) -> Result + '_>, Box>; 44 | 45 | fn with_connection( 46 | &self, 47 | f: &dyn Fn(&PgConnection) -> Result<(), Box>, 48 | ) -> Result<(), Box>; 49 | } 50 | 51 | impl DieselPoolObj for T { 52 | fn get(&self) -> Result + '_>, Box> { 53 | DieselPool::get(self) 54 | .map(|v| Box::new(v) as _) 55 | .map_err(|v| Box::new(v) as _) 56 | } 57 | 58 | fn with_connection( 59 | &self, 60 | f: &dyn Fn(&PgConnection) -> Result<(), Box>, 61 | ) -> Result<(), Box> { 62 | let conn = DieselPool::get(self)?; 63 | f(&conn) 64 | } 65 | } 66 | 67 | /// A builder for connection pools 68 | pub trait DieselPoolBuilder { 69 | /// The concrete connection pool built by this type 70 | type Pool: DieselPool; 71 | 72 | /// Sets the maximum size of the connection pool. 73 | fn max_size(self, max_size: u32) -> Self; 74 | 75 | /// Build the pool 76 | fn build(self, database_url: String) -> Self::Pool; 77 | } 78 | 79 | #[cfg(feature = "r2d2")] 80 | mod r2d2_impl { 81 | use super::*; 82 | use diesel::r2d2; 83 | 84 | type ConnectionManager = r2d2::ConnectionManager; 85 | 86 | impl<'a> BorrowedConnection<'a> for r2d2::Pool { 87 | type Connection = r2d2::PooledConnection; 88 | } 89 | 90 | impl DieselPool for r2d2::Pool { 91 | type Error = r2d2::PoolError; 92 | 93 | fn get<'a>(&'a self) -> Result, Self::Error> { 94 | self.get() 95 | } 96 | } 97 | 98 | pub struct R2d2Builder { 99 | url: String, 100 | builder: r2d2::Builder, 101 | connection_count: Option, 102 | } 103 | 104 | impl R2d2Builder { 105 | pub(crate) fn new(url: String, builder: r2d2::Builder) -> Self { 106 | Self { 107 | url, 108 | builder, 109 | connection_count: None, 110 | } 111 | } 112 | 113 | pub(crate) fn connection_count(&mut self, connection_count: u32) { 114 | self.connection_count = Some(connection_count); 115 | } 116 | 117 | pub(crate) fn build(self, default_connection_count: u32) -> r2d2::Pool { 118 | let max_size = self.connection_count.unwrap_or(default_connection_count); 119 | self.builder 120 | .max_size(max_size) 121 | .build_unchecked(ConnectionManager::new(self.url)) 122 | } 123 | } 124 | } 125 | 126 | #[cfg(feature = "r2d2")] 127 | #[doc(hidden)] 128 | pub use self::r2d2_impl::R2d2Builder; 129 | -------------------------------------------------------------------------------- /swirl/src/errors.rs: -------------------------------------------------------------------------------- 1 | use diesel::result::Error as DieselError; 2 | use std::error::Error; 3 | use std::fmt; 4 | 5 | use crate::db::DieselPool; 6 | 7 | /// An error occurred queueing the job 8 | #[derive(Debug)] 9 | pub enum EnqueueError { 10 | /// An error occurred serializing the job 11 | SerializationError(serde_json::error::Error), 12 | 13 | /// An error occurred inserting the job into the database 14 | DatabaseError(DieselError), 15 | 16 | #[doc(hidden)] 17 | /// Match on `_` instead, more variants may be added in the future 18 | __NonExhaustive, 19 | } 20 | 21 | impl From for EnqueueError { 22 | fn from(e: serde_json::error::Error) -> Self { 23 | EnqueueError::SerializationError(e) 24 | } 25 | } 26 | 27 | impl From for EnqueueError { 28 | fn from(e: DieselError) -> Self { 29 | EnqueueError::DatabaseError(e) 30 | } 31 | } 32 | 33 | impl fmt::Display for EnqueueError { 34 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 35 | match self { 36 | EnqueueError::SerializationError(e) => e.fmt(f), 37 | EnqueueError::DatabaseError(e) => e.fmt(f), 38 | EnqueueError::__NonExhaustive => unreachable!(), 39 | } 40 | } 41 | } 42 | 43 | impl Error for EnqueueError { 44 | fn source(&self) -> Option<&(dyn Error + 'static)> { 45 | match self { 46 | EnqueueError::SerializationError(e) => Some(e), 47 | EnqueueError::DatabaseError(e) => Some(e), 48 | EnqueueError::__NonExhaustive => unreachable!(), 49 | } 50 | } 51 | } 52 | 53 | /// An error occurred performing the job 54 | pub type PerformError = Box; 55 | 56 | /// An error occurred while attempting to fetch jobs from the queue 57 | pub enum FetchError { 58 | /// We could not acquire a database connection from the pool. 59 | /// 60 | /// Either the connection pool is too small, or new connections cannot be 61 | /// established. 62 | NoDatabaseConnection(Pool::Error), 63 | 64 | /// Could not execute the query to load a job from the database. 65 | FailedLoadingJob(DieselError), 66 | 67 | /// No message was received from the worker thread. 68 | /// 69 | /// Either the thread pool is too small, or jobs have hung indefinitely 70 | NoMessageReceived, 71 | } 72 | 73 | impl fmt::Debug for FetchError { 74 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 75 | match self { 76 | FetchError::NoDatabaseConnection(e) => { 77 | f.debug_tuple("NoDatabaseConnection").field(e).finish() 78 | } 79 | FetchError::FailedLoadingJob(e) => f.debug_tuple("FailedLoadingJob").field(e).finish(), 80 | FetchError::NoMessageReceived => f.debug_struct("NoMessageReceived").finish(), 81 | } 82 | } 83 | } 84 | 85 | impl fmt::Display for FetchError { 86 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 87 | match self { 88 | FetchError::NoDatabaseConnection(e) => { 89 | write!(f, "Timed out acquiring a database connection. ")?; 90 | write!(f, "Try increasing the connection pool size: ")?; 91 | write!(f, "{}", e)?; 92 | } 93 | FetchError::FailedLoadingJob(e) => { 94 | write!(f, "An error occurred loading a job from the database: ")?; 95 | write!(f, "{}", e)?; 96 | } 97 | FetchError::NoMessageReceived => { 98 | write!(f, "No message was received from the worker thread. ")?; 99 | write!(f, "Try increasing the thread pool size or timeout period.")?; 100 | } 101 | } 102 | Ok(()) 103 | } 104 | } 105 | 106 | impl Error for FetchError { 107 | fn source(&self) -> Option<&(dyn Error + 'static)> { 108 | match self { 109 | FetchError::NoDatabaseConnection(e) => Some(e), 110 | FetchError::FailedLoadingJob(e) => Some(e), 111 | FetchError::NoMessageReceived => None, 112 | } 113 | } 114 | } 115 | 116 | /// An error returned by `Runner::check_for_failed_jobs`. Only used in tests. 117 | #[derive(Debug)] 118 | pub enum FailedJobsError { 119 | /// Jobs failed to run 120 | JobsFailed( 121 | /// The number of failed jobs 122 | i64, 123 | ), 124 | 125 | #[doc(hidden)] 126 | /// Match on `_` instead, more variants may be added in the future 127 | /// Some other error occurred. Worker threads may have panicked, an error 128 | /// occurred counting failed jobs in the DB, or something else 129 | /// unexpectedly went wrong. 130 | __Unknown(Box), 131 | } 132 | 133 | pub use FailedJobsError::JobsFailed; 134 | 135 | impl From> for FailedJobsError { 136 | fn from(e: Box) -> Self { 137 | FailedJobsError::__Unknown(e) 138 | } 139 | } 140 | 141 | impl From for FailedJobsError { 142 | fn from(e: DieselError) -> Self { 143 | FailedJobsError::__Unknown(e.into()) 144 | } 145 | } 146 | 147 | impl PartialEq for FailedJobsError { 148 | fn eq(&self, other: &Self) -> bool { 149 | match (self, other) { 150 | (JobsFailed(x), JobsFailed(y)) => x == y, 151 | _ => false, 152 | } 153 | } 154 | } 155 | 156 | impl fmt::Display for FailedJobsError { 157 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 158 | use FailedJobsError::*; 159 | 160 | match self { 161 | JobsFailed(x) => write!(f, "{} jobs failed", x), 162 | FailedJobsError::__Unknown(e) => e.fmt(f), 163 | } 164 | } 165 | } 166 | 167 | impl Error for FailedJobsError { 168 | fn source(&self) -> Option<&(dyn Error + 'static)> { 169 | match self { 170 | JobsFailed(_) => None, 171 | FailedJobsError::__Unknown(e) => Some(&**e), 172 | } 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /swirl/src/job.rs: -------------------------------------------------------------------------------- 1 | use diesel::PgConnection; 2 | use serde::{de::DeserializeOwned, Serialize}; 3 | 4 | use crate::db::DieselPoolObj; 5 | use crate::errors::{EnqueueError, PerformError}; 6 | use crate::storage; 7 | 8 | /// A background job, meant to be run asynchronously. 9 | pub trait Job: Serialize + DeserializeOwned { 10 | /// The environment this job is run with. This is a struct you define, 11 | /// which should encapsulate things like database connection pools, any 12 | /// configuration, and any other static data or shared resources. 13 | type Environment: 'static; 14 | 15 | /// The key to use for storing this job, and looking it up later. 16 | /// 17 | /// Typically this is the name of your struct in `snake_case` 18 | const JOB_TYPE: &'static str; 19 | 20 | /// Enqueue this job to be run at some point in the future. 21 | fn enqueue(self, conn: &PgConnection) -> Result<(), EnqueueError> { 22 | storage::enqueue_job(conn, self) 23 | } 24 | 25 | /// The logic involved in actually performing this job. 26 | fn perform(self, env: &Self::Environment, pool: &dyn DieselPoolObj) 27 | -> Result<(), PerformError>; 28 | } 29 | -------------------------------------------------------------------------------- /swirl/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | 3 | #[macro_use] 4 | extern crate diesel; 5 | 6 | #[doc(hidden)] 7 | pub extern crate inventory; 8 | #[doc(hidden)] 9 | pub extern crate serde; 10 | 11 | mod job; 12 | mod registry; 13 | mod runner; 14 | mod storage; 15 | 16 | pub mod db; 17 | pub mod errors; 18 | pub mod schema; 19 | 20 | pub use swirl_proc_macro::*; 21 | 22 | #[doc(hidden)] 23 | pub use serde_derive::{Deserialize, Serialize}; 24 | 25 | pub use errors::*; 26 | pub use job::*; 27 | pub use registry::Registry; 28 | pub use runner::*; 29 | 30 | #[doc(hidden)] 31 | pub use registry::JobVTable; 32 | -------------------------------------------------------------------------------- /swirl/src/registry.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::new_without_default)] // https://github.com/rust-lang/rust-clippy/issues/3632 2 | 3 | use std::any::{Any, TypeId}; 4 | use std::collections::HashMap; 5 | use std::marker::PhantomData; 6 | 7 | use crate::db::DieselPoolObj; 8 | use crate::errors::PerformError; 9 | use crate::Job; 10 | 11 | #[derive(Default)] 12 | #[allow(missing_debug_implementations)] // Can't derive debug 13 | /// A registry of background jobs, used to map job types to concrete perform 14 | /// functions at runtime. 15 | pub struct Registry { 16 | jobs: HashMap<&'static str, JobVTable>, 17 | _marker: PhantomData, 18 | } 19 | 20 | impl Registry { 21 | /// Loads the registry from all invocations of [`register_job!`] for this 22 | /// environment type 23 | pub fn load() -> Self { 24 | let jobs = inventory::iter:: 25 | .into_iter() 26 | .filter(|vtable| vtable.env_type == TypeId::of::()) 27 | .map(|&vtable| (vtable.job_type, vtable)) 28 | .collect(); 29 | 30 | Self { 31 | jobs: jobs, 32 | _marker: PhantomData, 33 | } 34 | } 35 | 36 | /// Get the perform function for a given job type 37 | pub fn get(&self, job_type: &str) -> Option> { 38 | self.jobs.get(job_type).map(|&vtable| PerformJob { 39 | vtable, 40 | _marker: PhantomData, 41 | }) 42 | } 43 | } 44 | 45 | /// Register a job to be run by swirl. This must be called for any 46 | /// implementors of [`swirl::Job`] 47 | #[macro_export] 48 | macro_rules! register_job { 49 | ($job_ty: ty) => { 50 | $crate::inventory::submit! { 51 | #![crate = swirl] 52 | swirl::JobVTable::from_job::<$job_ty>() 53 | } 54 | }; 55 | } 56 | 57 | #[doc(hidden)] 58 | #[derive(Clone, Copy)] 59 | pub struct JobVTable { 60 | env_type: TypeId, 61 | job_type: &'static str, 62 | perform: fn(serde_json::Value, &dyn Any, &dyn DieselPoolObj) -> Result<(), PerformError>, 63 | } 64 | 65 | inventory::collect!(JobVTable); 66 | 67 | impl JobVTable { 68 | pub fn from_job() -> Self { 69 | Self { 70 | env_type: TypeId::of::(), 71 | job_type: T::JOB_TYPE, 72 | perform: perform_job::, 73 | } 74 | } 75 | } 76 | 77 | fn perform_job( 78 | data: serde_json::Value, 79 | env: &dyn Any, 80 | pool: &dyn DieselPoolObj, 81 | ) -> Result<(), PerformError> { 82 | let environment = env.downcast_ref().ok_or_else::(|| { 83 | "Incorrect environment type. This should never happen. \ 84 | Please open an issue at https://github.com/sgrif/swirl/issues/new" 85 | .into() 86 | })?; 87 | let data = serde_json::from_value(data)?; 88 | T::perform(data, environment, pool) 89 | } 90 | 91 | pub struct PerformJob { 92 | vtable: JobVTable, 93 | _marker: PhantomData, 94 | } 95 | 96 | impl PerformJob { 97 | pub fn perform( 98 | &self, 99 | data: serde_json::Value, 100 | env: &Env, 101 | pool: &dyn DieselPoolObj, 102 | ) -> Result<(), PerformError> { 103 | let perform_fn = self.vtable.perform; 104 | perform_fn(data, env, pool) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /swirl/src/runner.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | #[cfg(feature = "r2d2")] 3 | use diesel::r2d2; 4 | use std::any::Any; 5 | use std::error::Error; 6 | use std::panic::{catch_unwind, AssertUnwindSafe, PanicInfo, RefUnwindSafe, UnwindSafe}; 7 | use std::sync::Arc; 8 | use std::time::Duration; 9 | use threadpool::ThreadPool; 10 | 11 | use crate::db::*; 12 | use crate::errors::*; 13 | use crate::{storage, Registry}; 14 | use event::*; 15 | 16 | mod channel; 17 | mod event; 18 | 19 | pub struct NoConnectionPoolGiven; 20 | 21 | #[allow(missing_debug_implementations)] 22 | pub struct Builder { 23 | connection_pool_or_builder: ConnectionPoolBuilder, 24 | environment: Env, 25 | thread_count: Option, 26 | job_start_timeout: Option, 27 | } 28 | 29 | impl Builder { 30 | /// Set the number of threads to be used to run jobs concurrently. 31 | /// 32 | /// Defaults to 5 33 | pub fn thread_count(mut self, thread_count: usize) -> Self { 34 | self.thread_count = Some(thread_count); 35 | self 36 | } 37 | 38 | fn get_thread_count(&self) -> usize { 39 | self.thread_count.unwrap_or(5) 40 | } 41 | 42 | /// The amount of time to wait for a job to start before assuming an error 43 | /// has occurred. 44 | /// 45 | /// Defaults to 10 seconds. 46 | pub fn job_start_timeout(mut self, timeout: Duration) -> Self { 47 | self.job_start_timeout = Some(timeout); 48 | self 49 | } 50 | 51 | /// Provide a connection pool to be used by the runner 52 | pub fn connection_pool(self, pool: NewPool) -> Builder { 53 | Builder { 54 | connection_pool_or_builder: pool, 55 | environment: self.environment, 56 | thread_count: self.thread_count, 57 | job_start_timeout: self.job_start_timeout, 58 | } 59 | } 60 | } 61 | 62 | #[cfg(feature = "r2d2")] 63 | impl Builder { 64 | /// Build the runner with an r2d2 connection pool 65 | /// 66 | /// This will override any connection pool previously provided 67 | pub fn database_url>(self, database_url: S) -> Builder { 68 | self.connection_pool_builder(database_url, r2d2::Builder::new()) 69 | } 70 | 71 | /// Provide a connection pool builder. 72 | /// 73 | /// This will override any connection pool previously provided. 74 | /// 75 | /// You should call this method if you want to provide additional 76 | /// configuration for the database connection pool. The builder will be 77 | /// configured to have its max size set to the value given to `2 * thread_count`. 78 | /// To override this behavior, call [`connection_count`](Self::connection_count) 79 | pub fn connection_pool_builder>( 80 | self, 81 | database_url: S, 82 | builder: r2d2::Builder>, 83 | ) -> Builder { 84 | self.connection_pool(R2d2Builder::new(database_url.into(), builder)) 85 | } 86 | } 87 | 88 | #[cfg(feature = "r2d2")] 89 | impl Builder { 90 | /// Set the max size of the database connection pool 91 | pub fn connection_count(mut self, connection_count: u32) -> Self { 92 | self.connection_pool_or_builder 93 | .connection_count(connection_count); 94 | self 95 | } 96 | 97 | /// Build the runner with an r2d2 connection pool. 98 | pub fn build(self) -> Runner>> { 99 | let thread_count = self.get_thread_count(); 100 | let connection_pool_size = thread_count as u32 * 2; 101 | let connection_pool = self.connection_pool_or_builder.build(connection_pool_size); 102 | 103 | Runner { 104 | connection_pool, 105 | thread_pool: ThreadPool::new(thread_count), 106 | environment: Arc::new(self.environment), 107 | registry: Arc::new(Registry::load()), 108 | job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), 109 | } 110 | } 111 | } 112 | 113 | impl Builder 114 | where 115 | ConnectionPool: DieselPool, 116 | { 117 | /// Build the runner 118 | pub fn build(self) -> Runner { 119 | Runner { 120 | thread_pool: ThreadPool::new(self.get_thread_count()), 121 | connection_pool: self.connection_pool_or_builder, 122 | environment: Arc::new(self.environment), 123 | registry: Arc::new(Registry::load()), 124 | job_start_timeout: self.job_start_timeout.unwrap_or(Duration::from_secs(10)), 125 | } 126 | } 127 | } 128 | 129 | #[allow(missing_debug_implementations)] 130 | /// The core runner responsible for locking and running jobs 131 | pub struct Runner { 132 | connection_pool: ConnectionPool, 133 | thread_pool: ThreadPool, 134 | environment: Arc, 135 | registry: Arc>, 136 | job_start_timeout: Duration, 137 | } 138 | 139 | impl Runner { 140 | /// Create a builder for a job runner 141 | /// 142 | /// This method takes the two required configurations, the database 143 | /// connection pool, and the environment to pass to your jobs. If your 144 | /// environment contains a connection pool, it should be the same pool given 145 | /// here. 146 | pub fn builder(environment: Env) -> Builder { 147 | Builder { 148 | connection_pool_or_builder: NoConnectionPoolGiven, 149 | environment, 150 | thread_count: None, 151 | job_start_timeout: None, 152 | } 153 | } 154 | } 155 | 156 | impl Runner { 157 | #[doc(hidden)] 158 | /// For use in integration tests 159 | pub fn connection_pool(&self) -> &ConnectionPool { 160 | &self.connection_pool 161 | } 162 | } 163 | 164 | impl Runner 165 | where 166 | Env: RefUnwindSafe + Send + Sync + 'static, 167 | ConnectionPool: DieselPool + 'static, 168 | { 169 | /// Runs all pending jobs in the queue 170 | /// 171 | /// This function will return once all jobs in the queue have begun running, 172 | /// but does not wait for them to complete. When this function returns, at 173 | /// least one thread will have tried to acquire a new job, and found there 174 | /// were none in the queue. 175 | pub fn run_all_pending_jobs(&self) -> Result<(), FetchError> { 176 | use std::cmp::max; 177 | 178 | let max_threads = self.thread_pool.max_count(); 179 | let (sender, receiver) = channel::new(max_threads); 180 | let mut pending_messages = 0; 181 | loop { 182 | let available_threads = max_threads - self.thread_pool.active_count(); 183 | 184 | let jobs_to_queue = if pending_messages == 0 { 185 | // If we have no queued jobs talking to us, and there are no 186 | // available threads, we still need to queue at least one job 187 | // or we'll never receive a message 188 | max(available_threads, 1) 189 | } else { 190 | available_threads 191 | }; 192 | 193 | for _ in 0..jobs_to_queue { 194 | self.run_single_job(sender.clone()); 195 | } 196 | 197 | pending_messages += jobs_to_queue; 198 | match receiver.recv_timeout(self.job_start_timeout) { 199 | Ok(Event::Working) => pending_messages -= 1, 200 | Ok(Event::NoJobAvailable) => return Ok(()), 201 | Ok(Event::ErrorLoadingJob(e)) => return Err(FetchError::FailedLoadingJob(e)), 202 | Ok(Event::FailedToAcquireConnection(e)) => { 203 | return Err(FetchError::NoDatabaseConnection(e)); 204 | } 205 | Err(_) => return Err(FetchError::NoMessageReceived), 206 | } 207 | } 208 | } 209 | 210 | fn run_single_job(&self, sender: EventSender) { 211 | let environment = Arc::clone(&self.environment); 212 | let registry = Arc::clone(&self.registry); 213 | // FIXME: https://github.com/sfackler/r2d2/pull/70 214 | let connection_pool = AssertUnwindSafe(self.connection_pool().clone()); 215 | self.get_single_job(sender, move |job| { 216 | let perform_job = registry 217 | .get(&job.job_type) 218 | .ok_or_else(|| PerformError::from(format!("Unknown job type {}", job.job_type)))?; 219 | perform_job.perform(job.data, &environment, &connection_pool.0) 220 | }) 221 | } 222 | 223 | fn get_single_job(&self, sender: EventSender, f: F) 224 | where 225 | F: FnOnce(storage::BackgroundJob) -> Result<(), PerformError> + Send + UnwindSafe + 'static, 226 | { 227 | use diesel::result::Error::RollbackTransaction; 228 | 229 | // The connection may not be `Send` so we need to clone the pool instead 230 | let pool = self.connection_pool.clone(); 231 | self.thread_pool.execute(move || { 232 | let conn = match pool.get() { 233 | Ok(conn) => conn, 234 | Err(e) => { 235 | sender.send(Event::FailedToAcquireConnection(e)); 236 | return; 237 | } 238 | }; 239 | 240 | let job_run_result = conn.transaction::<_, diesel::result::Error, _>(|| { 241 | let job = match storage::find_next_unlocked_job(&conn).optional() { 242 | Ok(Some(j)) => { 243 | sender.send(Event::Working); 244 | j 245 | } 246 | Ok(None) => { 247 | sender.send(Event::NoJobAvailable); 248 | return Ok(()); 249 | } 250 | Err(e) => { 251 | sender.send(Event::ErrorLoadingJob(e)); 252 | return Err(RollbackTransaction); 253 | } 254 | }; 255 | let job_id = job.id; 256 | 257 | let result = catch_unwind(|| f(job)) 258 | .map_err(|e| try_to_extract_panic_info(&e)) 259 | .and_then(|r| r); 260 | 261 | match result { 262 | Ok(_) => storage::delete_successful_job(&conn, job_id)?, 263 | Err(e) => { 264 | eprintln!("Job {} failed to run: {}", job_id, e); 265 | storage::update_failed_job(&conn, job_id); 266 | } 267 | } 268 | Ok(()) 269 | }); 270 | 271 | match job_run_result { 272 | Ok(_) | Err(RollbackTransaction) => {} 273 | Err(e) => { 274 | panic!("Failed to update job: {:?}", e); 275 | } 276 | } 277 | }) 278 | } 279 | 280 | fn connection(&self) -> Result, Box> { 281 | self.connection_pool.get().map_err(Into::into) 282 | } 283 | 284 | /// Waits for all running jobs to complete, and returns an error if any 285 | /// failed 286 | /// 287 | /// This function is intended for use in tests. If any jobs have failed, it 288 | /// will return `swirl::JobsFailed` with the number of jobs that failed. 289 | /// 290 | /// If any other unexpected errors occurred, such as panicked worker threads 291 | /// or an error loading the job count from the database, an opaque error 292 | /// will be returned. 293 | pub fn check_for_failed_jobs(&self) -> Result<(), FailedJobsError> { 294 | self.wait_for_jobs()?; 295 | let failed_jobs = storage::failed_job_count(&*self.connection()?)?; 296 | if failed_jobs == 0 { 297 | Ok(()) 298 | } else { 299 | Err(JobsFailed(failed_jobs)) 300 | } 301 | } 302 | 303 | fn wait_for_jobs(&self) -> Result<(), Box> { 304 | self.thread_pool.join(); 305 | let panic_count = self.thread_pool.panic_count(); 306 | if panic_count == 0 { 307 | Ok(()) 308 | } else { 309 | Err(format!("{} threads panicked", panic_count).into()) 310 | } 311 | } 312 | } 313 | 314 | /// Try to figure out what's in the box, and print it if we can. 315 | /// 316 | /// The actual error type we will get from `panic::catch_unwind` is really poorly documented. 317 | /// However, the `panic::set_hook` functions deal with a `PanicInfo` type, and its payload is 318 | /// documented as "commonly but not always `&'static str` or `String`". So we can try all of those, 319 | /// and give up if we didn't get one of those three types. 320 | fn try_to_extract_panic_info(info: &(dyn Any + Send + 'static)) -> PerformError { 321 | if let Some(x) = info.downcast_ref::() { 322 | format!("job panicked: {}", x).into() 323 | } else if let Some(x) = info.downcast_ref::<&'static str>() { 324 | format!("job panicked: {}", x).into() 325 | } else if let Some(x) = info.downcast_ref::() { 326 | format!("job panicked: {}", x).into() 327 | } else { 328 | "job panicked".into() 329 | } 330 | } 331 | 332 | #[cfg(test)] 333 | mod tests { 334 | use diesel::prelude::*; 335 | use diesel::r2d2; 336 | 337 | use super::*; 338 | use crate::schema::background_jobs::dsl::*; 339 | use std::panic::AssertUnwindSafe; 340 | use std::sync::{Arc, Barrier, Mutex, MutexGuard}; 341 | 342 | #[test] 343 | fn jobs_are_locked_when_fetched() { 344 | let _guard = TestGuard::lock(); 345 | 346 | let runner = runner(); 347 | let first_job_id = create_dummy_job(&runner).id; 348 | let second_job_id = create_dummy_job(&runner).id; 349 | let fetch_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); 350 | let fetch_barrier2 = fetch_barrier.clone(); 351 | let return_barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); 352 | let return_barrier2 = return_barrier.clone(); 353 | 354 | runner.get_single_job(channel::dummy_sender(), move |job| { 355 | fetch_barrier.0.wait(); // Tell thread 2 it can lock its job 356 | assert_eq!(first_job_id, job.id); 357 | return_barrier.0.wait(); // Wait for thread 2 to lock its job 358 | Ok(()) 359 | }); 360 | 361 | fetch_barrier2.0.wait(); // Wait until thread 1 locks its job 362 | runner.get_single_job(channel::dummy_sender(), move |job| { 363 | assert_eq!(second_job_id, job.id); 364 | return_barrier2.0.wait(); // Tell thread 1 it can unlock its job 365 | Ok(()) 366 | }); 367 | 368 | runner.wait_for_jobs().unwrap(); 369 | } 370 | 371 | #[test] 372 | fn jobs_are_deleted_when_successfully_run() { 373 | let _guard = TestGuard::lock(); 374 | 375 | let runner = runner(); 376 | create_dummy_job(&runner); 377 | 378 | runner.get_single_job(channel::dummy_sender(), |_| Ok(())); 379 | runner.wait_for_jobs().unwrap(); 380 | 381 | let remaining_jobs = background_jobs 382 | .count() 383 | .get_result(&*runner.connection().unwrap()); 384 | assert_eq!(Ok(0), remaining_jobs); 385 | } 386 | 387 | #[test] 388 | fn failed_jobs_do_not_release_lock_before_updating_retry_time() { 389 | let _guard = TestGuard::lock(); 390 | 391 | let runner = runner(); 392 | create_dummy_job(&runner); 393 | let barrier = Arc::new(AssertUnwindSafe(Barrier::new(2))); 394 | let barrier2 = barrier.clone(); 395 | 396 | runner.get_single_job(channel::dummy_sender(), move |_| { 397 | barrier.0.wait(); 398 | // error so the job goes back into the queue 399 | Err("nope".into()) 400 | }); 401 | 402 | let conn = runner.connection().unwrap(); 403 | // Wait for the first thread to acquire the lock 404 | barrier2.0.wait(); 405 | // We are intentionally not using `get_single_job` here. 406 | // `SKIP LOCKED` is intentionally omitted here, so we block until 407 | // the lock on the first job is released. 408 | // If there is any point where the row is unlocked, but the retry 409 | // count is not updated, we will get a row here. 410 | let available_jobs = background_jobs 411 | .select(id) 412 | .filter(retries.eq(0)) 413 | .for_update() 414 | .load::(&*conn) 415 | .unwrap(); 416 | assert_eq!(0, available_jobs.len()); 417 | 418 | // Sanity check to make sure the job actually is there 419 | let total_jobs_including_failed = background_jobs 420 | .select(id) 421 | .for_update() 422 | .load::(&*conn) 423 | .unwrap(); 424 | assert_eq!(1, total_jobs_including_failed.len()); 425 | 426 | runner.wait_for_jobs().unwrap(); 427 | } 428 | 429 | #[test] 430 | fn panicking_in_jobs_updates_retry_counter() { 431 | let _guard = TestGuard::lock(); 432 | let runner = runner(); 433 | let job_id = create_dummy_job(&runner).id; 434 | 435 | runner.get_single_job(channel::dummy_sender(), |_| panic!()); 436 | runner.wait_for_jobs().unwrap(); 437 | 438 | let tries = background_jobs 439 | .find(job_id) 440 | .select(retries) 441 | .for_update() 442 | .first::(&*runner.connection().unwrap()) 443 | .unwrap(); 444 | assert_eq!(1, tries); 445 | } 446 | 447 | lazy_static::lazy_static! { 448 | // Since these tests deal with behavior concerning multiple connections 449 | // running concurrently, they have to run outside of a transaction. 450 | // Therefore we can't run more than one at a time. 451 | // 452 | // Rather than forcing the whole suite to be run with `--test-threads 1`, 453 | // we just lock these tests instead. 454 | static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); 455 | } 456 | 457 | struct TestGuard<'a>(MutexGuard<'a, ()>); 458 | 459 | impl<'a> TestGuard<'a> { 460 | fn lock() -> Self { 461 | TestGuard(TEST_MUTEX.lock().unwrap()) 462 | } 463 | } 464 | 465 | impl<'a> Drop for TestGuard<'a> { 466 | fn drop(&mut self) { 467 | ::diesel::sql_query("TRUNCATE TABLE background_jobs") 468 | .execute(&*runner().connection().unwrap()) 469 | .unwrap(); 470 | } 471 | } 472 | 473 | type Runner = crate::Runner>>; 474 | 475 | fn runner() -> Runner<()> { 476 | let database_url = 477 | dotenv::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL must be set to run tests"); 478 | 479 | crate::Runner::builder(()) 480 | .database_url(database_url) 481 | .thread_count(2) 482 | .build() 483 | } 484 | 485 | fn create_dummy_job(runner: &Runner<()>) -> storage::BackgroundJob { 486 | ::diesel::insert_into(background_jobs) 487 | .values((job_type.eq("Foo"), data.eq(serde_json::json!(null)))) 488 | .returning((id, job_type, data)) 489 | .get_result(&*runner.connection().unwrap()) 490 | .unwrap() 491 | } 492 | } 493 | -------------------------------------------------------------------------------- /swirl/src/runner/channel.rs: -------------------------------------------------------------------------------- 1 | //! A wrapper around a `std::sync::mpsc::sync_channel` that allows easy creation 2 | //! of a dummy sender for tests, and doesn't error if the receiver hung up 3 | 4 | pub use std::sync::mpsc::Receiver; 5 | use std::sync::mpsc::{sync_channel, SyncSender}; 6 | 7 | pub fn new(size: usize) -> (Sender, Receiver) { 8 | let (std_sender, std_receiver) = sync_channel(size); 9 | (Sender(std_sender), std_receiver) 10 | } 11 | 12 | #[cfg(test)] 13 | pub fn dummy_sender() -> Sender { 14 | new(1).0 15 | } 16 | 17 | pub struct Sender(SyncSender); 18 | 19 | impl Sender { 20 | pub fn send(&self, t: T) { 21 | let _ = self.0.send(t); 22 | } 23 | } 24 | 25 | impl Clone for Sender 26 | where 27 | SyncSender: Clone, 28 | { 29 | fn clone(&self) -> Self { 30 | Self(self.0.clone()) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /swirl/src/runner/event.rs: -------------------------------------------------------------------------------- 1 | use diesel::result::Error as DieselError; 2 | 3 | use super::channel; 4 | use crate::db::DieselPool; 5 | 6 | pub type EventSender = channel::Sender>; 7 | 8 | pub enum Event { 9 | Working, 10 | NoJobAvailable, 11 | ErrorLoadingJob(DieselError), 12 | FailedToAcquireConnection(Pool::Error), 13 | } 14 | 15 | use std::fmt; 16 | 17 | impl fmt::Debug for Event { 18 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 19 | match self { 20 | Event::Working => f.debug_struct("Working").finish(), 21 | Event::NoJobAvailable => f.debug_struct("NoJobAvailable").finish(), 22 | Event::ErrorLoadingJob(e) => f.debug_tuple("ErrorLoadingJob").field(e).finish(), 23 | Event::FailedToAcquireConnection(e) => { 24 | f.debug_tuple("FailedToAcquireConnection").field(e).finish() 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /swirl/src/schema.rs: -------------------------------------------------------------------------------- 1 | table! { 2 | background_jobs (id) { 3 | id -> Int8, 4 | job_type -> Text, 5 | data -> Jsonb, 6 | retries -> Int4, 7 | last_retry -> Timestamp, 8 | created_at -> Timestamp, 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /swirl/src/storage.rs: -------------------------------------------------------------------------------- 1 | use diesel::dsl::now; 2 | use diesel::pg::Pg; 3 | use diesel::prelude::*; 4 | use diesel::sql_types::{Bool, Integer, Interval}; 5 | use diesel::{delete, insert_into, update}; 6 | use serde_json; 7 | 8 | use crate::errors::EnqueueError; 9 | use crate::schema::background_jobs; 10 | use crate::Job; 11 | 12 | #[derive(Queryable, Identifiable, Debug, Clone)] 13 | pub struct BackgroundJob { 14 | pub id: i64, 15 | pub job_type: String, 16 | pub data: serde_json::Value, 17 | } 18 | 19 | /// Enqueues a job to be run as soon as possible. 20 | pub fn enqueue_job(conn: &PgConnection, job: T) -> Result<(), EnqueueError> { 21 | use crate::schema::background_jobs::dsl::*; 22 | 23 | let job_data = serde_json::to_value(job)?; 24 | insert_into(background_jobs) 25 | .values((job_type.eq(T::JOB_TYPE), data.eq(job_data))) 26 | .execute(conn)?; 27 | Ok(()) 28 | } 29 | 30 | fn retriable() -> Box> { 31 | use crate::schema::background_jobs::dsl::*; 32 | use diesel::dsl::*; 33 | 34 | sql_function!(fn power(x: Integer, y: Integer) -> Integer); 35 | 36 | Box::new(last_retry.lt(now - 1.minute().into_sql::() * power(2, retries))) 37 | } 38 | 39 | /// Finds the next job that is unlocked, and ready to be retried. If a row is 40 | /// found, it will be locked. 41 | pub fn find_next_unlocked_job(conn: &PgConnection) -> QueryResult { 42 | use crate::schema::background_jobs::dsl::*; 43 | 44 | background_jobs 45 | .select((id, job_type, data)) 46 | .filter(retriable()) 47 | .order(id) 48 | .for_update() 49 | .skip_locked() 50 | .first::(conn) 51 | } 52 | 53 | /// The number of jobs that have failed at least once 54 | pub fn failed_job_count(conn: &PgConnection) -> QueryResult { 55 | use crate::schema::background_jobs::dsl::*; 56 | 57 | background_jobs 58 | .count() 59 | .filter(retries.gt(0)) 60 | .get_result(conn) 61 | } 62 | 63 | /// Deletes a job that has successfully completed running 64 | pub fn delete_successful_job(conn: &PgConnection, job_id: i64) -> QueryResult<()> { 65 | use crate::schema::background_jobs::dsl::*; 66 | 67 | delete(background_jobs.find(job_id)).execute(conn)?; 68 | Ok(()) 69 | } 70 | 71 | /// Marks that we just tried and failed to run a job. 72 | /// 73 | /// Ignores any database errors that may have occurred. If the DB has gone away, 74 | /// we assume that just trying again with a new connection will succeed. 75 | pub fn update_failed_job(conn: &PgConnection, job_id: i64) { 76 | use crate::schema::background_jobs::dsl::*; 77 | 78 | let _ = update(background_jobs.find(job_id)) 79 | .set((retries.eq(retries + 1), last_retry.eq(now))) 80 | .execute(conn); 81 | } 82 | -------------------------------------------------------------------------------- /swirl_proc_macro/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "swirl_proc_macro" 3 | version = "0.1.0" 4 | authors = ["Sean Griffin "] 5 | description = "This library should not be used directly, it is re-exported through swirl" 6 | license = "MIT OR Apache-2.0" 7 | edition = "2018" 8 | 9 | [dependencies] 10 | syn = { version = "1.0", features = ["full", "extra-traits"] } 11 | quote = "1.0" 12 | proc-macro2 = "1.0" 13 | 14 | [lib] 15 | proc-macro = true 16 | 17 | [features] 18 | nightly = [] 19 | -------------------------------------------------------------------------------- /swirl_proc_macro/src/background_job.rs: -------------------------------------------------------------------------------- 1 | use crate::diagnostic_shim::*; 2 | use proc_macro2::TokenStream; 3 | use quote::quote; 4 | use std::borrow::Cow; 5 | use syn::punctuated::Punctuated; 6 | use syn::spanned::Spanned; 7 | 8 | pub fn expand(item: syn::ItemFn) -> Result { 9 | let job = BackgroundJob::try_from(item)?; 10 | 11 | let attrs = job.attrs; 12 | let vis = job.visibility; 13 | let fn_token = job.fn_token; 14 | let name = job.name; 15 | let env_pat = &job.args.env_arg.pat; 16 | let env_type = &job.args.env_arg.ty; 17 | let connection_arg = &job.args.connection_arg; 18 | let pool_pat = connection_arg.pool_pat(); 19 | let pool_ty = connection_arg.pool_ty(); 20 | let fn_args = job.args.iter(); 21 | let struct_def = job.args.struct_def(); 22 | let struct_assign = job.args.struct_assign(); 23 | let arg_names = job.args.names(); 24 | let return_type = job.return_type; 25 | let body = connection_arg.wrap(job.body); 26 | 27 | let res = quote! { 28 | #(#attrs)* 29 | #vis #fn_token #name (#(#fn_args),*) -> #name :: Job { 30 | #name :: Job { 31 | #(#struct_assign),* 32 | } 33 | } 34 | 35 | impl swirl::Job for #name :: Job { 36 | type Environment = #env_type; 37 | const JOB_TYPE: &'static str = stringify!(#name); 38 | 39 | #fn_token perform(self, #env_pat: &Self::Environment, #pool_pat: &#pool_ty) #return_type { 40 | let Self { #(#arg_names),* } = self; 41 | #body 42 | } 43 | } 44 | 45 | mod #name { 46 | use super::*; 47 | 48 | #[derive(swirl::Serialize, swirl::Deserialize)] 49 | #[serde(crate = "swirl::serde")] 50 | pub struct Job { 51 | #(#struct_def),* 52 | } 53 | 54 | swirl::register_job!(Job); 55 | } 56 | }; 57 | Ok(res) 58 | } 59 | 60 | struct BackgroundJob { 61 | attrs: Vec, 62 | visibility: syn::Visibility, 63 | fn_token: syn::Token![fn], 64 | name: syn::Ident, 65 | args: JobArgs, 66 | return_type: syn::ReturnType, 67 | body: Vec, 68 | } 69 | 70 | impl BackgroundJob { 71 | fn try_from(item: syn::ItemFn) -> Result { 72 | let syn::ItemFn { 73 | attrs, 74 | vis, 75 | sig, 76 | block, 77 | } = item; 78 | 79 | if let Some(constness) = sig.constness { 80 | return Err(constness 81 | .span 82 | .error("#[swirl::background_job] cannot be used on const functions")); 83 | } 84 | 85 | if let Some(unsafety) = sig.unsafety { 86 | return Err(unsafety 87 | .span 88 | .error("#[swirl::background_job] cannot be used on unsafe functions")); 89 | } 90 | 91 | if let Some(asyncness) = sig.asyncness { 92 | return Err(asyncness 93 | .span 94 | .error("#[swirl::background_job] cannot be used on async functions")); 95 | } 96 | 97 | if let Some(abi) = sig.abi { 98 | return Err(abi 99 | .span() 100 | .error("#[swirl::background_job] cannot be used on functions with an abi")); 101 | } 102 | 103 | if !sig.generics.params.is_empty() { 104 | return Err(sig 105 | .generics 106 | .span() 107 | .error("#[swirl::background_job] cannot be used on generic functions")); 108 | } 109 | 110 | if let Some(where_clause) = sig.generics.where_clause { 111 | return Err(where_clause.where_token.span.error( 112 | "#[swirl::background_job] cannot be used on functions with a where clause", 113 | )); 114 | } 115 | 116 | let fn_token = sig.fn_token; 117 | let return_type = sig.output.clone(); 118 | let ident = sig.ident.clone(); 119 | let job_args = JobArgs::try_from(sig)?; 120 | 121 | Ok(Self { 122 | attrs, 123 | visibility: vis, 124 | fn_token, 125 | name: ident, 126 | args: job_args, 127 | return_type, 128 | body: block.stmts, 129 | }) 130 | } 131 | } 132 | 133 | struct JobArgs { 134 | env_arg: EnvArg, 135 | connection_arg: ConnectionArg, 136 | args: Punctuated, 137 | } 138 | 139 | impl JobArgs { 140 | fn iter(&self) -> <&Self as IntoIterator>::IntoIter { 141 | self.into_iter() 142 | } 143 | 144 | fn try_from(decl: syn::Signature) -> Result { 145 | let mut env_arg = None; 146 | let mut connection_arg = ConnectionArg::None; 147 | let mut args = Punctuated::new(); 148 | 149 | for fn_arg in decl.inputs { 150 | let pat_type = match fn_arg { 151 | syn::FnArg::Receiver(..) => { 152 | return Err(fn_arg.span().error("Background jobs cannot take self")); 153 | } 154 | syn::FnArg::Typed(pat_type) => pat_type, 155 | }; 156 | 157 | if let syn::Pat::Ident(syn::PatIdent { 158 | by_ref: None, 159 | subpat: None, 160 | .. 161 | }) = *pat_type.pat 162 | { 163 | // ok 164 | } else { 165 | return Err(pat_type 166 | .pat 167 | .span() 168 | .error("#[swirl::background_job] cannot yet handle patterns")); 169 | } 170 | 171 | let span = pat_type.span(); 172 | match (&env_arg, &connection_arg, Arg::try_from(pat_type)?) { 173 | (None, _, Arg::Env(arg)) => env_arg = Some(arg), 174 | (Some(_), _, Arg::Env(_)) => { 175 | return Err( 176 | span.error("Background jobs cannot take references as arguments") 177 | .help("If this argument is a database connection, the type must be `&PgConnection`") 178 | ); 179 | } 180 | (_, ConnectionArg::None, Arg::Connection(arg)) => connection_arg = arg, 181 | (_, _, Arg::Connection(_)) => { 182 | return Err( 183 | span.error("Multiple database connection arguments") 184 | .help("To take a connection pool as an argument instead of a single connection, use the type `&dyn swirl::db::DieselPoolObj`") 185 | ); 186 | } 187 | (_, _, Arg::Normal(pat_type)) => args.push(pat_type), 188 | } 189 | } 190 | 191 | Ok(Self { 192 | env_arg: env_arg.unwrap_or_default(), 193 | connection_arg, 194 | args, 195 | }) 196 | } 197 | 198 | fn struct_def(&self) -> impl Iterator + '_ { 199 | self.args.iter().map(|arg| quote::quote!(pub(super) #arg)) 200 | } 201 | 202 | fn struct_assign(&self) -> impl Iterator + '_ { 203 | self.names().map(|ident| syn::parse_quote!(#ident: #ident)) 204 | } 205 | 206 | fn names(&self) -> impl Iterator + '_ { 207 | self.args.iter().map(|arg| match &*arg.pat { 208 | syn::Pat::Ident(pat_ident) => pat_ident.ident.clone(), 209 | _ => unreachable!(), 210 | }) 211 | } 212 | } 213 | 214 | impl<'a> IntoIterator for &'a JobArgs { 215 | type Item = <&'a Punctuated as IntoIterator>::Item; 216 | type IntoIter = <&'a Punctuated as IntoIterator>::IntoIter; 217 | 218 | fn into_iter(self) -> Self::IntoIter { 219 | (&self.args).into_iter() 220 | } 221 | } 222 | 223 | enum Arg { 224 | Env(EnvArg), 225 | Connection(ConnectionArg), 226 | Normal(syn::PatType), 227 | } 228 | 229 | impl Arg { 230 | fn try_from(pat_type: syn::PatType) -> Result { 231 | if let syn::Type::Reference(type_ref) = *pat_type.ty { 232 | if let Some(mutable) = type_ref.mutability { 233 | return Err(mutable.span.error("Unexpected `mut`")); 234 | } 235 | let pat = pat_type.pat; 236 | let ty = type_ref.elem; 237 | if ConnectionArg::is_connection_arg(&ty) { 238 | Ok(Arg::Connection(ConnectionArg::from_arg(pat, ty))) 239 | } else { 240 | Ok(Arg::Env(EnvArg { pat, ty })) 241 | } 242 | } else { 243 | Ok(Arg::Normal(pat_type)) 244 | } 245 | } 246 | } 247 | 248 | struct EnvArg { 249 | pat: Box, 250 | ty: Box, 251 | } 252 | 253 | impl Default for EnvArg { 254 | fn default() -> Self { 255 | Self { 256 | pat: syn::parse_quote!(_), 257 | ty: syn::parse_quote!(()), 258 | } 259 | } 260 | } 261 | 262 | enum ConnectionArg { 263 | None, 264 | SingleConnection(Box), 265 | Pool(Box, Box), 266 | } 267 | 268 | impl ConnectionArg { 269 | fn is_single_connection(ty: &syn::Type) -> bool { 270 | if let syn::Type::Path(syn::TypePath { path, .. }) = ty { 271 | path_ends_with(path, "PgConnection") 272 | } else { 273 | false 274 | } 275 | } 276 | 277 | fn is_pool(ty: &syn::Type) -> bool { 278 | if let syn::Type::TraitObject(type_trait_object) = ty { 279 | type_trait_object.bounds.iter().any(|bound| { 280 | if let syn::TypeParamBound::Trait(trait_bound) = bound { 281 | path_ends_with(&trait_bound.path, "DieselPoolObj") 282 | } else { 283 | false 284 | } 285 | }) 286 | } else { 287 | false 288 | } 289 | } 290 | 291 | fn is_connection_arg(ty: &syn::Type) -> bool { 292 | Self::is_single_connection(ty) || Self::is_pool(ty) 293 | } 294 | 295 | fn from_arg(pat: Box, ty: Box) -> Self { 296 | if Self::is_single_connection(&ty) { 297 | ConnectionArg::SingleConnection(pat) 298 | } else if Self::is_pool(&ty) { 299 | ConnectionArg::Pool(pat, ty) 300 | } else { 301 | ConnectionArg::None 302 | } 303 | } 304 | 305 | fn pool_pat(&self) -> Cow<'_, syn::Pat> { 306 | match self { 307 | ConnectionArg::None => Cow::Owned(syn::parse_quote!(_)), 308 | ConnectionArg::SingleConnection(_) => { 309 | Cow::Owned(syn::parse_quote!(__swirl_connection_pool)) 310 | } 311 | ConnectionArg::Pool(pat, _) => Cow::Borrowed(pat), 312 | } 313 | } 314 | 315 | fn pool_ty(&self) -> Cow<'_, syn::Type> { 316 | if let ConnectionArg::Pool(_, ty) = self { 317 | Cow::Borrowed(ty) 318 | } else { 319 | Cow::Owned(syn::parse_quote!(swirl::db::DieselPoolObj)) 320 | } 321 | } 322 | 323 | fn wrap(&self, body: Vec) -> TokenStream { 324 | let mut body = quote!(#(#body)*); 325 | if let ConnectionArg::SingleConnection(pat) = self { 326 | let pool_pat = self.pool_pat(); 327 | body = quote! { 328 | #pool_pat.with_connection(&|#pat| { 329 | #body 330 | }) 331 | } 332 | } 333 | body 334 | } 335 | } 336 | 337 | fn path_ends_with(path: &syn::Path, needle: &str) -> bool { 338 | path.segments 339 | .last() 340 | .map(|s| s.arguments.is_empty() && s.ident == needle) 341 | .unwrap_or(false) 342 | } 343 | -------------------------------------------------------------------------------- /swirl_proc_macro/src/diagnostic_shim.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Span, TokenStream}; 2 | 3 | pub trait DiagnosticShim { 4 | fn error>(self, msg: T) -> Diagnostic; 5 | } 6 | 7 | #[cfg(feature = "nightly")] 8 | impl DiagnosticShim for Span { 9 | fn error>(self, msg: T) -> Diagnostic { 10 | self.unstable().error(msg) 11 | } 12 | } 13 | 14 | #[cfg(not(feature = "nightly"))] 15 | impl DiagnosticShim for Span { 16 | fn error>(self, msg: T) -> Diagnostic { 17 | Diagnostic::error(self, msg) 18 | } 19 | } 20 | 21 | #[cfg(feature = "nightly")] 22 | pub use proc_macro::Diagnostic; 23 | 24 | #[cfg(not(feature = "nightly"))] 25 | pub struct Diagnostic { 26 | span: Span, 27 | message: String, 28 | } 29 | 30 | #[cfg(not(feature = "nightly"))] 31 | impl Diagnostic { 32 | fn error>(span: Span, msg: T) -> Self { 33 | Diagnostic { 34 | span, 35 | message: msg.into(), 36 | } 37 | } 38 | 39 | pub(crate) fn help>(mut self, msg: T) -> Self { 40 | self.message += &format!("\nhelp: {}", msg.into()); 41 | self 42 | } 43 | } 44 | 45 | pub trait DiagnosticExt { 46 | fn to_compile_error(self) -> TokenStream; 47 | } 48 | 49 | #[cfg(feature = "nightly")] 50 | impl DiagnosticExt for Diagnostic { 51 | fn to_compile_error(self) -> TokenStream { 52 | self.emit(); 53 | "".parse().unwrap() 54 | } 55 | } 56 | 57 | #[cfg(not(feature = "nightly"))] 58 | impl DiagnosticExt for Diagnostic { 59 | fn to_compile_error(self) -> TokenStream { 60 | syn::Error::new(self.span, self.message).to_compile_error() 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /swirl_proc_macro/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | #![recursion_limit = "128"] 3 | #![cfg_attr(feature = "nightly", feature(proc_macro_diagnostic))] 4 | 5 | extern crate proc_macro; 6 | 7 | mod background_job; 8 | mod diagnostic_shim; 9 | 10 | use proc_macro::TokenStream; 11 | use proc_macro2::Span; 12 | use syn::{parse_macro_input, ItemFn}; 13 | 14 | use diagnostic_shim::*; 15 | 16 | #[proc_macro_attribute] 17 | pub fn background_job(attr: TokenStream, item: TokenStream) -> TokenStream { 18 | if !attr.is_empty() { 19 | return syn::Error::new( 20 | Span::call_site(), 21 | "swirl::background_job does not take arguments", 22 | ) 23 | .to_compile_error() 24 | .into(); 25 | } 26 | 27 | let item = parse_macro_input!(item as ItemFn); 28 | emit_errors(background_job::expand(item)) 29 | } 30 | 31 | fn emit_errors(result: Result) -> TokenStream { 32 | result 33 | .map(Into::into) 34 | .unwrap_or_else(|e| e.to_compile_error().into()) 35 | } 36 | --------------------------------------------------------------------------------