├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── THIRDPARTY.toml ├── assets ├── logo.png └── logo.pxm ├── compose.yaml ├── makefile ├── raiden-derive ├── .gitignore ├── Cargo.toml └── src │ ├── attribute │ ├── mod.rs │ └── names.rs │ ├── condition │ ├── builder.rs │ └── mod.rs │ ├── filter_expression │ ├── builder.rs │ └── mod.rs │ ├── finder │ └── mod.rs │ ├── helpers.rs │ ├── key │ └── mod.rs │ ├── key_condition │ ├── builder.rs │ └── mod.rs │ ├── lib.rs │ ├── ops │ ├── batch_delete.rs │ ├── batch_get.rs │ ├── delete.rs │ ├── get.rs │ ├── mod.rs │ ├── put.rs │ ├── query.rs │ ├── scan.rs │ ├── shared.rs │ ├── transact_write.rs │ └── update.rs │ └── rename │ └── mod.rs ├── raiden ├── .gitignore ├── Cargo.toml ├── examples │ ├── delete.rs │ ├── get_with_reserved.rs │ ├── get_with_retries.rs │ ├── hello.rs │ ├── last_key.rs │ ├── put.rs │ ├── query.rs │ ├── query_rename.rs │ ├── scan.rs │ ├── scan_with_filter.rs │ ├── transact_write.rs │ ├── transact_write_with_http_client.rs │ ├── update.rs │ └── with_http_client.rs ├── src │ ├── condition │ │ └── mod.rs │ ├── errors │ │ ├── mod.rs │ │ └── transaction.rs │ ├── filter_expression │ │ └── mod.rs │ ├── id_generator │ │ └── mod.rs │ ├── key_condition │ │ └── mod.rs │ ├── lib.rs │ ├── next_token │ │ └── mod.rs │ ├── ops │ │ ├── batch_delete.rs │ │ ├── batch_get.rs │ │ ├── get.rs │ │ ├── mod.rs │ │ ├── put.rs │ │ ├── query.rs │ │ ├── scan.rs │ │ ├── transact_write.rs │ │ └── update.rs │ ├── retry │ │ └── mod.rs │ ├── types.rs │ ├── update_expression │ │ ├── add.rs │ │ ├── delete.rs │ │ ├── mod.rs │ │ └── set.rs │ └── value_id.rs └── tests │ ├── all │ ├── batch_delete.rs │ ├── batch_get.rs │ ├── condition.rs │ ├── delete.rs │ ├── filter_expression.rs │ ├── get.rs │ ├── key_condition.rs │ ├── mod.rs │ ├── put.rs │ ├── query.rs │ ├── rename.rs │ ├── rename_all.rs │ ├── scan.rs │ ├── transact_write.rs │ └── update.rs │ └── mod.rs ├── renovate.json ├── rust-toolchain.toml ├── setup ├── deps.ts ├── dynamo_util.ts ├── fixtures │ ├── batch_delete_test_0.ts │ ├── batch_delete_test_1.ts │ ├── batch_test_0.ts │ ├── batch_test_1.ts │ ├── batch_test_2.ts │ ├── delete_test_0.ts │ ├── delete_test_1.ts │ ├── empty_put_test_data_0.ts │ ├── empty_set_test_data_0.ts │ ├── empty_string_test_data_0.ts │ ├── float_test.ts │ ├── last_evaluate_key_data.ts │ ├── project.ts │ ├── put_item_condition_data_0.ts │ ├── query_large_data_test.ts │ ├── query_test_data_0.ts │ ├── query_test_data_1.ts │ ├── rename_all_camel_case_test_data_0.ts │ ├── rename_all_pascal_case_test_data_0.ts │ ├── rename_test_data_0.ts │ ├── reserved_test_data_0.ts │ ├── scan_large_data_test.ts │ ├── scan_test_data_0.ts │ ├── scan_with_filter_test_data_0.ts │ ├── test_user_staging.ts │ ├── tx_conditional_check_test_data_0.ts │ ├── tx_conditional_check_test_data_1.ts │ ├── tx_delete_test_data_0.ts │ ├── update_add_test_data_0.ts │ ├── update_delete_test_data_0.ts │ ├── update_remove_test_data_0.ts │ ├── update_test_data_0.ts │ ├── update_test_data_1.ts │ ├── update_with_contains_in_set_condition.ts │ ├── use_default_for_null_data.ts │ ├── use_default_test_data_0.ts │ └── user.ts └── setup.ts └── yarn.lock /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | --- 8 | 9 | ## Describe the bug 10 | 11 | A clear and concise description of what the bug is. 12 | 13 | ## Reproduced step 14 | 15 | Steps to reproduce the behavior: 16 | 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | ## Expected behavior 23 | 24 | A clear and concise description of what you expected to happen. 25 | 26 | ## Actual behavior 27 | 28 | A clear and concise description of what you actual to happen. 29 | 30 | ## Screenshots 31 | 32 | If applicable, add screenshots to help explain your problem. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | ## Is your feature request related to a problem? Please describe. 10 | 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | ## Describe the solution you'd like 14 | 15 | A clear and concise description of what you want to happen. 16 | 17 | ## Describe alternatives you've considered 18 | 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | ## Additional context 22 | 23 | Add any other context or screenshots about the feature request here. 24 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What does this change? 2 | 3 | A clear and concise description of what the changes is. 4 | 5 | ## References 6 | 7 | - If you have links to other resources, please list them here. (e.g. issue url, related pull request url, documents) 8 | 9 | ## Screenshots 10 | 11 | If applicable, add screenshots to help explain your changes. 12 | 13 | ## What can I check for bug fixes? 14 | 15 | Please briefly describe how you can confirm the resolution of the bug. 16 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: [push, pull_request] 3 | 4 | jobs: 5 | test: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@master 9 | - uses: denoland/setup-deno@909cc5acb0fdd60627fb858598759246509fa755 # v2.0.2 10 | with: 11 | deno-version: v1.x 12 | - name: Run deno fmt and deno lint 13 | run: | 14 | deno fmt --check setup 15 | deno lint 16 | - uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f # v1.0.6 17 | with: 18 | profile: minimal 19 | toolchain: 1.80.0 20 | override: true 21 | - name: Install deps 22 | run: sudo apt-get install libssl-dev 23 | - name: Cache cargo registry 24 | uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 25 | with: 26 | path: ~/.cargo/registry 27 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} 28 | - name: Cache cargo index 29 | uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 30 | with: 31 | path: ~/.cargo/git 32 | key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} 33 | - name: Install clippy and rustfmt 34 | run: rustup component add clippy rustfmt 35 | - name: Run rustfmt 36 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 37 | with: 38 | command: fmt 39 | args: -- --check 40 | - name: Run clippy 41 | uses: actions-rs/cargo@ae10961054e4aa8b4aa7dffede299aaf087aa33b # v1.0.1 42 | with: 43 | command: clippy 44 | args: -- -D warnings 45 | - name: Cache cargo build 46 | uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 47 | with: 48 | path: target 49 | key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }} 50 | - name: test 51 | run: make test 52 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | node_modules -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## @0.0.63 (12. April, 2022) 9 | 10 | - Support `filter` expression for query and scan. 11 | You can pass `filter_expression` like following. 12 | - Use tokio@1.17.0 13 | 14 | ``` rust 15 | let filter = Scan::filter_expression(Scan::num()).eq(1000); 16 | let res = client.scan().filter(filter).run().await.unwrap(); 17 | ``` 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["raiden", "raiden-derive"] 3 | 4 | [workspace.package] 5 | authors = ["bokuweb "] 6 | edition = "2021" 7 | license = "MIT OR Apache-2.0" 8 | rust-version = "1.80.0" 9 | version = "0.1.0" 10 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | DynamoDB library for Rust. 5 |

6 | 7 | --- 8 | 9 | ![Continuous Integration](https://github.com/bokuweb/raiden/workflows/Continuous%20Integration/badge.svg) 10 | 11 | ## Examples 12 | 13 | ### get_item example 14 | 15 | ``` rust 16 | #[derive(Raiden)] 17 | #[raiden(table_name = "user")] 18 | pub struct User { 19 | #[raiden(partition_key)] 20 | id: String, 21 | name: String, 22 | } 23 | 24 | #[tokio::main] 25 | async fn main() { 26 | let client = User::client(Region::UsEast1); 27 | let _res = client.get("user_primary_key").run().await; 28 | } 29 | ``` 30 | 31 | ### put_item example 32 | 33 | ``` rust 34 | #[derive(Raiden)] 35 | #[raiden(table_name = "user")] 36 | pub struct User { 37 | #[raiden(partition_key)] 38 | id: String, 39 | name: String, 40 | } 41 | 42 | #[tokio::main] 43 | async fn main() { 44 | let client = User::client(Region::UsEast1); 45 | let input = User::put_item_builder() 46 | .id("foo".to_owned()) 47 | .name("bokuweb".to_owned()) 48 | .build(); 49 | let res = client.put(&input).run().await; 50 | } 51 | ``` 52 | 53 | ### batch_get_item example 54 | 55 | ``` rust 56 | #[derive(Raiden, Debug, PartialEq)] 57 | pub struct User { 58 | #[raiden(partition_key)] 59 | id: String, 60 | #[raiden(sort_key)] 61 | year: usize, 62 | } 63 | 64 | #[tokio::main] 65 | async fn main() { 66 | let client = User::client(Region::UsEast1); 67 | let keys: Vec<(&str, usize)> = vec![("Alice", 1992), ("Bob", 1976), ("Charlie", 2002)]; 68 | let res = client.batch_get(keys).run().await; 69 | } 70 | ``` 71 | 72 | ## Support `tokio-rs/tracing` 73 | 74 | `raiden` supports making span for Tracing ( span name is `dynamodb::action` with table name and api name in field ). 75 | To activate this feature, you need to specify `tracing` feature in your `Cargo.toml`. And your crate needs `tracing` . 76 | 77 | ```toml 78 | # Example 79 | [dependencies] 80 | raiden = { 81 | tag = "0.0.76", 82 | git = "https://github.com/raiden-rs/raiden-dynamo.git", 83 | features = [ "tracing"] 84 | } 85 | tracing = "0.1" 86 | ``` 87 | 88 | ## Development 89 | 90 | ### Requirements 91 | 92 | - Rust 93 | - Deno (1.13.2+) 94 | - GNU Make 95 | - Docker Engine 96 | 97 | ### Setup 98 | 99 | ``` 100 | AWS_ACCESS_KEY_ID=awsdummy AWS_SECRET_ACCESS_KEY=awsdummy make dynamo 101 | ``` 102 | 103 | This starts up DynamoDB on Docker container, and then arranges test fixtures. 104 | 105 | ### Test 106 | 107 | ``` 108 | AWS_ACCESS_KEY_ID=awsdummy AWS_SECRET_ACCESS_KEY=awsdummy make test 109 | ``` 110 | 111 | NOTE: Don't recommend to use `cargo test` because our test suite doesn't support running tests in parallel. Use `cargo test -- --test-threads=1` instead of it. 112 | 113 | ### Example 114 | 115 | ``` 116 | AWS_ACCESS_KEY_ID=awsdummy AWS_SECRET_ACCESS_KEY=awsdummy cargo run --example EXAMPLE_NAME 117 | ``` 118 | 119 | ### Utility 120 | 121 | [dynamodb-admin](https://github.com/aaronshaf/dynamodb-admin) is useful to check data in DynamoDB Local. 122 | 123 | ``` 124 | npx dynamodb-admin 125 | ``` 126 | 127 | Then open `http://localhost:8001` in browser. 128 | 129 | ## Supported APIs 130 | 131 | ### Item 132 | 133 | - [x] BatchGetItem 134 | - [ ] BatchWriteItem 135 | - [x] DeleteItem 136 | - [x] GetItem 137 | - [x] PutItem 138 | - [x] Query 139 | - [x] Scan 140 | - [ ] TransactGetItems 141 | - [x] TransactWriteItems 142 | - [x] UpdateItem 143 | 144 | ## Known limitations 145 | 146 | Here is a list of unsupported features/behaviors in the actual implementation. 147 | We have a plan to resolve these issues in a future release. 148 | 149 | - [x] Automatic retrying: https://github.com/raiden-rs/raiden/issues/44 150 | - [x] Strict type checking of keys: https://github.com/raiden-rs/raiden/issues/26 151 | - [x] Exponential backoff handling 152 | 153 | ## License 154 | 155 | This project is available under the terms of either the [Apache 2.0 license](./LICENSE-APACHE) or the [MIT license](./LICENSE-MIT). 156 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/raiden-rs/raiden-dynamo/da412a8b4e04e273c0ad649ec5e98b7fc74c6e16/assets/logo.png -------------------------------------------------------------------------------- /assets/logo.pxm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/raiden-rs/raiden-dynamo/da412a8b4e04e273c0ad649ec5e98b7fc74c6e16/assets/logo.pxm -------------------------------------------------------------------------------- /compose.yaml: -------------------------------------------------------------------------------- 1 | name: "raiden-dynamo" 2 | 3 | services: 4 | dynamodb: 5 | image: "amazon/dynamodb-local" 6 | ports: 7 | - "127.0.0.1:8000:8000" 8 | healthcheck: 9 | test: ["CMD-SHELL", "curl -s http://localhost:8000/shell || exit 1"] 10 | interval: "30s" 11 | start_period: "10s" 12 | timeout: "5s" 13 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | export AWS_ACCESS_KEY_ID = awsdummy 2 | export AWS_SECRET_ACCESS_KEY = awsdummy 3 | 4 | .PHONY: dynamo 5 | dynamo: 6 | - docker compose down --volumes 7 | docker compose up -d --wait 8 | deno run --allow-net=localhost:8000 --allow-env --allow-read --allow-sys --no-check ./setup/setup.ts 9 | 10 | .PHONY: test 11 | test: 12 | make dynamo 13 | cargo test -- --test-threads=1 14 | 15 | .PHONY: lint 16 | lint: 17 | cargo clippy --all-targets -- -D warnings 18 | cargo clippy --all-targets --no-default-features --features rustls -- -D warnings 19 | cargo clippy --all-targets --features tracing -- -D warnings 20 | 21 | .PHONY: check-deps 22 | check-deps: 23 | cargo machete 24 | cargo +nightly udeps --all-targets 25 | cargo +nightly udeps --all-targets --no-default-features --features rustls 26 | cargo +nightly udeps --all-targets --features tracing 27 | 28 | .PHONY: licenses 29 | licenses: 30 | cargo bundle-licenses --format toml --output THIRDPARTY.toml 31 | 32 | .PHONY: check-licenses 33 | check-licenses: 34 | RUST_LOG=error cargo bundle-licenses --format toml --output __CHECK --previous THIRDPARTY.toml --check-previous 35 | rm __CHECK || true 36 | -------------------------------------------------------------------------------- /raiden-derive/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /raiden-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "raiden-derive" 3 | authors.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | rust-version.workspace = true 7 | version.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [lib] 12 | proc-macro = true 13 | 14 | [dependencies] 15 | convert_case = "^0.8.0" 16 | ident_case = "^1.0.1" 17 | proc-macro2 = "^1.0.95" 18 | quote = "^1.0.40" 19 | syn = "^2.0.101" 20 | 21 | [features] 22 | default = [] 23 | tracing = [] 24 | -------------------------------------------------------------------------------- /raiden-derive/src/attribute/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod names; 2 | 3 | pub(crate) use names::*; 4 | -------------------------------------------------------------------------------- /raiden-derive/src/attribute/names.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | 3 | use crate::rename::*; 4 | use convert_case::{Case, Casing}; 5 | 6 | // TODO: Add map and list accessor 7 | // e.g. MyMap.nestedField.deeplyNestedField 8 | // Should we annotate map or list accessor with following derive? 9 | // #[raiden(expression_name = "MyMap.nestedField.deeplyNestedField")] 10 | pub fn expand_attr_names( 11 | attr_enum_name: &proc_macro2::Ident, 12 | fields: &syn::FieldsNamed, 13 | rename_all_type: crate::rename::RenameAllType, 14 | struct_name: &proc_macro2::Ident, 15 | ) -> proc_macro2::TokenStream { 16 | let names = fields.named.iter().map(|f| { 17 | let ident = &f.ident.clone().unwrap(); 18 | let renamed = crate::finder::find_rename_value(&f.attrs); 19 | 20 | let name = if let Some(renamed) = renamed { 21 | renamed.to_case(Case::Pascal) 22 | } else { 23 | exclude_raw_ident(&ident.to_string()).to_case(Case::Pascal) 24 | }; 25 | let name = format_ident!("{}", name); 26 | quote! { 27 | #name 28 | } 29 | }); 30 | 31 | let arms = fields.named.iter().map(|f| { 32 | let ident = &f.ident.clone().unwrap(); 33 | let renamed = crate::finder::find_rename_value(&f.attrs); 34 | let basename = create_renamed(ident.to_string(), renamed, rename_all_type); 35 | let attr_name = basename.to_string(); 36 | let name = exclude_raw_ident(&basename).to_case(Case::Pascal); 37 | let name = format_ident!("{}", name); 38 | quote! { 39 | #attr_enum_name::#name => #attr_name.to_owned() 40 | } 41 | }); 42 | 43 | let getters = fields.named.iter().map(|f| { 44 | let ident = &f.ident.clone().unwrap(); 45 | let renamed = crate::finder::find_rename_value(&f.attrs); 46 | let basename = create_renamed(ident.to_string(), renamed, rename_all_type); 47 | let func_name = basename.to_case(Case::Snake); 48 | let func_name = if crate::helpers::is_reserved(&func_name) { 49 | format_ident!("r#{}", func_name) 50 | } else { 51 | format_ident!("{}", func_name) 52 | }; 53 | let name = exclude_raw_ident(&basename).to_case(Case::Pascal); 54 | let name = format_ident!("{}", name); 55 | quote! { 56 | pub fn #func_name() -> #attr_enum_name { 57 | #attr_enum_name::#name 58 | } 59 | } 60 | }); 61 | 62 | quote! { 63 | #[derive(Debug, Clone, Copy, PartialEq)] 64 | pub enum #attr_enum_name { 65 | #( 66 | #names, 67 | )* 68 | } 69 | 70 | impl ::raiden::IntoAttrName for #attr_enum_name { 71 | fn into_attr_name(self) -> String { 72 | match self { 73 | #( 74 | #arms, 75 | )* 76 | } 77 | } 78 | } 79 | 80 | // attr name getter 81 | impl #struct_name { 82 | #( 83 | #getters 84 | )* 85 | } 86 | 87 | } 88 | } 89 | 90 | fn exclude_raw_ident(ident: &str) -> String { 91 | if &ident[0..2] == "r#" { 92 | ident[2..].to_owned() 93 | } else { 94 | ident.to_owned() 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /raiden-derive/src/condition/builder.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | 3 | pub fn expand_condition_builder( 4 | attr_enum_name: &proc_macro2::Ident, 5 | struct_name: &proc_macro2::Ident, 6 | _fields: &syn::FieldsNamed, 7 | ) -> proc_macro2::TokenStream { 8 | let condition_name = format_ident!("{}Condition", struct_name); 9 | let condition_token_name = format_ident!("{}ConditionToken", struct_name); 10 | let wait_attr_op_name = format_ident!("{}LeftAttrAndWaitOp", struct_name); 11 | 12 | quote! { 13 | 14 | #[derive(Debug, Clone)] 15 | pub struct #condition_token_name; 16 | 17 | 18 | #[derive(Debug, Clone)] 19 | pub struct #condition_name { 20 | not: bool, 21 | } 22 | 23 | impl #struct_name { 24 | pub fn condition() -> #condition_name { 25 | #condition_name { 26 | not: false, 27 | } 28 | } 29 | } 30 | 31 | impl #condition_name { 32 | pub fn not(mut self) -> Self { 33 | self.not = true; 34 | self 35 | } 36 | pub fn attr_exists(self, field: #attr_enum_name) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 37 | let cond = ::raiden::condition::Cond::Func(::raiden::condition::ConditionFunctionExpression::AttributeExists(field.into_attr_name())); 38 | ::raiden::ConditionFilledOrWaitOperator { 39 | not: self.not, 40 | cond, 41 | _token: std::marker::PhantomData, 42 | } 43 | } 44 | pub fn attr_not_exists(self, field: #attr_enum_name) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 45 | let cond = ::raiden::condition::Cond::Func(::raiden::condition::ConditionFunctionExpression::AttributeNotExists(field.into_attr_name())); 46 | ::raiden::ConditionFilledOrWaitOperator { 47 | not: self.not, 48 | cond, 49 | _token: std::marker::PhantomData, 50 | } 51 | } 52 | pub fn attr_type(self, field: #attr_enum_name, t: ::raiden::AttributeType) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 53 | let cond = ::raiden::condition::Cond::Func(::raiden::condition::ConditionFunctionExpression::AttributeType(field.into_attr_name(), t)); 54 | ::raiden::ConditionFilledOrWaitOperator { 55 | not: self.not, 56 | cond, 57 | _token: std::marker::PhantomData, 58 | } 59 | } 60 | pub fn begins_with(self, field: #attr_enum_name, s: impl Into) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 61 | let cond = ::raiden::condition::Cond::Func(::raiden::condition::ConditionFunctionExpression::BeginsWith(field.into_attr_name(), s.into())); 62 | ::raiden::ConditionFilledOrWaitOperator { 63 | not: self.not, 64 | cond, 65 | _token: std::marker::PhantomData, 66 | } 67 | } 68 | 69 | pub fn contains(self, field: #attr_enum_name, s: impl Into) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 70 | let cond = ::raiden::condition::Cond::Func(::raiden::condition::ConditionFunctionExpression::Contains(field.into_attr_name(), s.into())); 71 | ::raiden::ConditionFilledOrWaitOperator { 72 | not: self.not, 73 | cond, 74 | _token: std::marker::PhantomData, 75 | } 76 | } 77 | 78 | pub fn attr(self, field: #attr_enum_name) -> #wait_attr_op_name { 79 | #wait_attr_op_name { 80 | not: self.not, 81 | attr_or_placeholder: ::raiden::AttrOrPlaceholder::Attr(field.into_attr_name()), 82 | attr_value: None, 83 | } 84 | } 85 | 86 | pub fn value(self, value: impl ::raiden::IntoAttribute) -> #wait_attr_op_name { 87 | let placeholder = format!("value{}", ::raiden::generate_value_id()); 88 | #wait_attr_op_name { 89 | not: self.not, 90 | attr_or_placeholder: ::raiden::AttrOrPlaceholder::Placeholder(placeholder), 91 | attr_value: Some(value.into_attr()), 92 | } 93 | } 94 | } 95 | 96 | pub struct #wait_attr_op_name { 97 | not: bool, 98 | attr_or_placeholder: ::raiden::AttrOrPlaceholder, 99 | attr_value: Option<::raiden::AttributeValue> 100 | } 101 | 102 | impl #wait_attr_op_name { 103 | pub fn eq_attr(self, attr: #attr_enum_name) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 104 | let attr = ::raiden::AttrOrPlaceholder::Attr(attr.into_attr_name()); 105 | let cond = ::raiden::condition::Cond::Cmp(::raiden::condition::ConditionComparisonExpression::Eq(self.attr_or_placeholder, self.attr_value, attr, None)); 106 | ::raiden::ConditionFilledOrWaitOperator { 107 | not: self.not, 108 | cond, 109 | _token: std::marker::PhantomData, 110 | } 111 | 112 | } 113 | 114 | pub fn eq_value(self, value: impl ::raiden::IntoAttribute) -> ::raiden::ConditionFilledOrWaitOperator<#condition_token_name> { 115 | let placeholder = ::raiden::AttrOrPlaceholder::Placeholder(format!("value{}", ::raiden::generate_value_id())); 116 | let cond = ::raiden::condition::Cond::Cmp(::raiden::condition::ConditionComparisonExpression::Eq(self.attr_or_placeholder, self.attr_value, placeholder, Some(value.into_attr()))); 117 | ::raiden::ConditionFilledOrWaitOperator { 118 | not: self.not, 119 | cond, 120 | _token: std::marker::PhantomData, 121 | } 122 | } 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /raiden-derive/src/condition/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod builder; 2 | 3 | pub use builder::*; 4 | -------------------------------------------------------------------------------- /raiden-derive/src/filter_expression/builder.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | 3 | pub fn expand_filter_expression_builder( 4 | attr_enum_name: &proc_macro2::Ident, 5 | struct_name: &proc_macro2::Ident, 6 | ) -> proc_macro2::TokenStream { 7 | let filter_expression_token_name = format_ident!("{}FilterExpressionToken", struct_name); 8 | quote! { 9 | 10 | pub struct #filter_expression_token_name; 11 | 12 | impl #struct_name { 13 | pub fn filter_expression(attr: #attr_enum_name) -> ::raiden::FilterExpression<#filter_expression_token_name> { 14 | let attr = attr.into_attr_name(); 15 | ::raiden::FilterExpression { 16 | attr, 17 | is_size: false, 18 | _token: std::marker::PhantomData, 19 | } 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /raiden-derive/src/filter_expression/mod.rs: -------------------------------------------------------------------------------- 1 | // #partitionKeyName = :partitionkeyval 2 | // #partitionKeyName = :partitionkeyval AND #sortKeyName = :sortkeyval 3 | // #partitionKeyName = :partitionkeyval AND #sortKeyName <> :sortkeyval 4 | // #partitionKeyName = :partitionkeyval AND #sortKeyName < :sortkeyval 5 | // #partitionKeyName = :partitionkeyval AND #sortKeyName <= :sortkeyval 6 | // #partitionKeyName = :partitionkeyval AND #sortKeyName > :sortkeyval 7 | // #partitionKeyName = :partitionkeyval AND #sortKeyName >= :sortkeyval 8 | // #partitionKeyName = :partitionkeyval AND #sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 9 | // #partitionKeyName = :partitionkeyval AND begins_with ( #sortKeyName, :sortkeyval ) 10 | pub mod builder; 11 | 12 | pub use builder::*; 13 | -------------------------------------------------------------------------------- /raiden-derive/src/finder/mod.rs: -------------------------------------------------------------------------------- 1 | use syn::{punctuated::Punctuated, Expr, ExprLit, Lit, Meta, MetaNameValue, Token}; 2 | 3 | pub(crate) fn find_unary_attr(attr: &syn::Attribute, name: &str) -> Option { 4 | match attr.meta { 5 | Meta::List(ref list) => { 6 | match list.parse_args_with(Punctuated::::parse_terminated) { 7 | Ok(parsed) if parsed.is_empty() => None, 8 | Ok(parsed) if parsed.len() > 1 => panic!("TODO: should unary"), 9 | Ok(parsed) => { 10 | let meta = parsed.first().expect("should get meta"); 11 | 12 | if meta.path().segments[0].ident == name { 13 | Some(meta.path().segments[0].ident.clone()) 14 | } else { 15 | None 16 | } 17 | } 18 | _ => None, 19 | } 20 | } 21 | _ => None, 22 | } 23 | } 24 | 25 | pub(crate) fn find_eq_string_from(attr: &syn::Attribute, name: &str) -> Option { 26 | match attr.meta { 27 | Meta::List(ref list) => { 28 | match list.parse_args_with(Punctuated::::parse_terminated) { 29 | Ok(parsed) => { 30 | for meta in parsed.iter() { 31 | match meta { 32 | Meta::NameValue(MetaNameValue { 33 | value: 34 | Expr::Lit(ExprLit { 35 | lit: Lit::Str(lit), .. 36 | }), 37 | .. 38 | }) if meta.path().segments[0].ident == name => { 39 | return Some(lit.value()); 40 | } 41 | _ => continue, 42 | } 43 | } 44 | 45 | None 46 | } 47 | _ => None, 48 | } 49 | } 50 | _ => None, 51 | } 52 | } 53 | 54 | pub(crate) fn find_table_name(attrs: &[syn::Attribute]) -> Option { 55 | for attr in attrs { 56 | if attr.path().segments[0].ident != "raiden" { 57 | continue; 58 | } 59 | 60 | if let Some(lit) = find_eq_string_from(attr, "table_name") { 61 | return Some(lit); 62 | } 63 | } 64 | 65 | None 66 | } 67 | 68 | pub(crate) fn find_rename_all(attrs: &[syn::Attribute]) -> Option { 69 | for attr in attrs { 70 | if attr.path().segments[0].ident != "raiden" { 71 | continue; 72 | } 73 | 74 | if let Some(lit) = find_eq_string_from(attr, "rename_all") { 75 | return Some(lit); 76 | } 77 | } 78 | 79 | None 80 | } 81 | 82 | pub(crate) fn find_rename_value(attrs: &[syn::Attribute]) -> Option { 83 | for attr in attrs { 84 | if attr.path().segments[0].ident != "raiden" { 85 | continue; 86 | } 87 | 88 | if let Some(lit) = find_eq_string_from(attr, "rename") { 89 | return Some(lit); 90 | } 91 | } 92 | 93 | None 94 | } 95 | 96 | pub(crate) fn include_unary_attr(attrs: &[syn::Attribute], name: &str) -> bool { 97 | !attrs.is_empty() 98 | && attrs.iter().any(|attr| { 99 | attr.path().segments[0].ident == "raiden" && find_unary_attr(attr, name).is_some() 100 | }) 101 | } 102 | 103 | // TODO: Add validation 104 | pub(crate) fn find_partition_key_field(fields: &syn::FieldsNamed) -> Option { 105 | let fields: Vec = fields 106 | .named 107 | .iter() 108 | .filter(|&f| include_unary_attr(&f.attrs, "partition_key")) 109 | .cloned() 110 | .collect(); 111 | 112 | if fields.len() > 1 { 113 | panic!("partition key should be only one.") 114 | } 115 | fields.first().cloned() 116 | } 117 | 118 | pub(crate) fn find_sort_key_field(fields: &syn::FieldsNamed) -> Option { 119 | let fields: Vec = fields 120 | .named 121 | .iter() 122 | .filter(|f| include_unary_attr(&f.attrs, "sort_key")) 123 | .cloned() 124 | .collect(); 125 | 126 | if fields.len() > 1 { 127 | panic!("sort key should be only one.") 128 | } 129 | 130 | fields.first().cloned() 131 | } 132 | 133 | pub(crate) fn is_option(ty: &syn::Type) -> bool { 134 | match ty { 135 | syn::Type::Path(syn::TypePath { 136 | path: syn::Path { segments, .. }, 137 | .. 138 | }) => segments.iter().any(|s| s.ident == "Option"), 139 | _ => false, 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /raiden-derive/src/helpers.rs: -------------------------------------------------------------------------------- 1 | pub fn is_reserved(v: &str) -> bool { 2 | matches!( 3 | v, 4 | "as" | "break" 5 | | "const" 6 | | "continue" 7 | | "crate" 8 | | "else" 9 | | "enum" 10 | | "extern" 11 | | "false" 12 | | "fn" 13 | | "for" 14 | | "if" 15 | | "impl" 16 | | "in" 17 | | "let" 18 | | "loop" 19 | | "match" 20 | | "mod" 21 | | "move" 22 | | "mut" 23 | | "pub" 24 | | "ref" 25 | | "return" 26 | | "self" 27 | | "Self" 28 | | "static" 29 | | "struct" 30 | | "super" 31 | | "trait" 32 | | "true" 33 | | "type" 34 | | "unsafe" 35 | | "use" 36 | | "where" 37 | | "while" 38 | | "async" 39 | | "await" 40 | | "dyn" 41 | | "abstract" 42 | | "become" 43 | | "box" 44 | | "do" 45 | | "final" 46 | | "macro" 47 | | "override" 48 | | "priv" 49 | | "typeof" 50 | | "unsized" 51 | | "virtual" 52 | | "yield" 53 | | "try" 54 | ) 55 | } 56 | -------------------------------------------------------------------------------- /raiden-derive/src/key/mod.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::*; 2 | use quote::*; 3 | use syn::*; 4 | 5 | use crate::finder::*; 6 | use crate::rename::{rename, RenameAllType}; 7 | 8 | pub fn fetch_partition_key( 9 | fields: &syn::FieldsNamed, 10 | rename_all_type: crate::rename::RenameAllType, 11 | ) -> (Ident, Type) { 12 | match find_partition_key_field(fields) { 13 | Some(key) => { 14 | // Rename partition key if renamed. 15 | let renamed = find_rename_value(&key.attrs); 16 | if renamed.is_some() { 17 | (format_ident!("{}", renamed.unwrap()), key.ty) 18 | } else if rename_all_type != RenameAllType::None { 19 | let ident = format_ident!( 20 | "{}", 21 | rename(rename_all_type, key.ident.unwrap().to_string()) 22 | ); 23 | (ident, key.ty) 24 | } else { 25 | (key.ident.unwrap(), key.ty) 26 | } 27 | } 28 | None => panic!("Please specify partition key"), 29 | } 30 | } 31 | 32 | pub fn fetch_sort_key( 33 | fields: &syn::FieldsNamed, 34 | rename_all_type: crate::rename::RenameAllType, 35 | ) -> Option<(Ident, Type)> { 36 | match find_sort_key_field(fields) { 37 | Some(key) => { 38 | // Rename partition key if renamed. 39 | let renamed = find_rename_value(&key.attrs); 40 | if renamed.is_some() { 41 | Some((format_ident!("{}", renamed.unwrap()), key.ty)) 42 | } else if rename_all_type != RenameAllType::None { 43 | let ident = format_ident!( 44 | "{}", 45 | rename(rename_all_type, key.ident.unwrap().to_string()) 46 | ); 47 | Some((ident, key.ty)) 48 | } else { 49 | Some((key.ident.unwrap(), key.ty)) 50 | } 51 | } 52 | None => None, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /raiden-derive/src/key_condition/builder.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | 3 | pub fn expand_key_condition_builder( 4 | attr_enum_name: &proc_macro2::Ident, 5 | struct_name: &proc_macro2::Ident, 6 | ) -> proc_macro2::TokenStream { 7 | let key_condition_token_name = format_ident!("{}KeyConditionToken", struct_name); 8 | quote! { 9 | 10 | pub struct #key_condition_token_name; 11 | 12 | impl #struct_name { 13 | pub fn key_condition(attr: #attr_enum_name) -> ::raiden::KeyCondition<#key_condition_token_name> { 14 | let attr = attr.into_attr_name(); 15 | ::raiden::KeyCondition { 16 | attr, 17 | _token: std::marker::PhantomData, 18 | } 19 | } 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /raiden-derive/src/key_condition/mod.rs: -------------------------------------------------------------------------------- 1 | // #partitionKeyName = :partitionkeyval 2 | // #partitionKeyName = :partitionkeyval AND #sortKeyName = :sortkeyval 3 | // #partitionKeyName = :partitionkeyval AND #sortKeyName < :sortkeyval 4 | // #partitionKeyName = :partitionkeyval AND #sortKeyName <= :sortkeyval 5 | // #partitionKeyName = :partitionkeyval AND #sortKeyName > :sortkeyval 6 | // #partitionKeyName = :partitionkeyval AND #sortKeyName >= :sortkeyval 7 | // #partitionKeyName = :partitionkeyval AND #sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 8 | // #partitionKeyName = :partitionkeyval AND begins_with ( #sortKeyName, :sortkeyval ) 9 | pub mod builder; 10 | 11 | pub use builder::*; 12 | -------------------------------------------------------------------------------- /raiden-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use quote::*; 3 | 4 | use syn::*; 5 | 6 | mod attribute; 7 | mod condition; 8 | mod filter_expression; 9 | mod finder; 10 | mod helpers; 11 | mod key; 12 | mod key_condition; 13 | mod ops; 14 | mod rename; 15 | 16 | use crate::rename::*; 17 | use std::str::FromStr; 18 | 19 | #[proc_macro_derive(Raiden, attributes(raiden))] 20 | pub fn derive_raiden(input: TokenStream) -> TokenStream { 21 | let input = syn::parse_macro_input!(input as DeriveInput); 22 | 23 | let struct_name = input.ident; 24 | 25 | let client_name = format_ident!("{}Client", struct_name); 26 | 27 | let attr_enum_name = format_ident!("{}AttrNames", struct_name); 28 | 29 | let attrs = input.attrs; 30 | 31 | let table_name = if let Some(name) = finder::find_table_name(&attrs) { 32 | name 33 | } else { 34 | struct_name.to_string() 35 | }; 36 | 37 | let rename_all = finder::find_rename_all(&attrs); 38 | let rename_all_type = if let Some(rename_all) = rename_all { 39 | rename::RenameAllType::from_str(&rename_all).unwrap() 40 | } else { 41 | rename::RenameAllType::None 42 | }; 43 | 44 | let fields = match input.data { 45 | Data::Struct(DataStruct { 46 | fields: Fields::Named(n), 47 | .. 48 | }) => n, 49 | _ => unimplemented!(), 50 | }; 51 | 52 | let partition_key = key::fetch_partition_key(&fields, rename_all_type); 53 | let sort_key = key::fetch_sort_key(&fields, rename_all_type); 54 | 55 | let table_name_field = format_ident!("table_name"); 56 | let client_field = format_ident!("client"); 57 | let n = vec![ 58 | quote! { #table_name_field: &'static str }, 59 | quote! { #client_field: ::raiden::DynamoDbClient }, 60 | ]; 61 | 62 | // let struct_fields = fields.named.iter().map(|f| { 63 | // let ident = &f.ident.clone().unwrap(); 64 | // let name = ident_case::RenameRule::PascalCase.apply_to_field(ident.to_string()); 65 | // let name = format_ident!("{}", name); 66 | // quote! { 67 | // #name 68 | // } 69 | // }); 70 | 71 | let get_item = ops::expand_get_item( 72 | &partition_key, 73 | &sort_key, 74 | &struct_name, 75 | &fields, 76 | rename_all_type, 77 | ); 78 | 79 | let query = ops::expand_query(&struct_name, &fields, rename_all_type); 80 | 81 | let scan = ops::expand_scan(&struct_name, &fields, rename_all_type); 82 | 83 | let batch_get = ops::expand_batch_get( 84 | &partition_key, 85 | &sort_key, 86 | &struct_name, 87 | &fields, 88 | rename_all_type, 89 | ); 90 | 91 | let put_item = ops::expand_put_item(&struct_name, &fields, rename_all_type); 92 | 93 | let update_item = ops::expand_update_item( 94 | &partition_key, 95 | &sort_key, 96 | &fields, 97 | &attr_enum_name, 98 | &struct_name, 99 | rename_all_type, 100 | ); 101 | 102 | let delete_item = ops::expand_delete_item(&partition_key, &sort_key, &struct_name); 103 | 104 | let batch_delete = ops::expand_batch_delete(&partition_key, &sort_key, &struct_name); 105 | 106 | let attr_names = 107 | attribute::expand_attr_names(&attr_enum_name, &fields, rename_all_type, &struct_name); 108 | 109 | let condition_builder = 110 | condition::expand_condition_builder(&attr_enum_name, &struct_name, &fields); 111 | 112 | let key_condition_builder = 113 | key_condition::expand_key_condition_builder(&attr_enum_name, &struct_name); 114 | 115 | let filter_expression_builder = 116 | filter_expression::expand_filter_expression_builder(&attr_enum_name, &struct_name); 117 | 118 | let transact_write = ops::expand_transact_write( 119 | &struct_name, 120 | &partition_key, 121 | &sort_key, 122 | &fields, 123 | &attr_enum_name, 124 | rename_all_type, 125 | &table_name, 126 | ); 127 | 128 | let insertion_attribute_name = fields.named.iter().map(|f| { 129 | let ident = &f.ident.clone().unwrap(); 130 | let renamed = crate::finder::find_rename_value(&f.attrs); 131 | let result = create_renamed(ident.to_string(), renamed, rename_all_type); 132 | quote! { 133 | names.insert( 134 | format!("#{}", #result.clone()), 135 | #result.to_string(), 136 | ); 137 | } 138 | }); 139 | 140 | let expanded = quote! { 141 | use ::raiden::IntoAttribute as _; 142 | use ::raiden::IntoAttrName as _; 143 | use ::raiden::DynamoDb as _; 144 | 145 | pub struct #client_name { 146 | #( 147 | #n, 148 | )* 149 | table_prefix: String, 150 | table_suffix: String, 151 | retry_condition: ::raiden::RetryCondition, 152 | attribute_names: Option<::raiden::AttributeNames>, 153 | projection_expression: Option 154 | } 155 | 156 | #attr_names 157 | 158 | #condition_builder 159 | 160 | #key_condition_builder 161 | 162 | #filter_expression_builder 163 | 164 | #get_item 165 | 166 | #batch_get 167 | 168 | #query 169 | 170 | #scan 171 | 172 | #put_item 173 | 174 | #update_item 175 | 176 | #delete_item 177 | 178 | #batch_delete 179 | 180 | #transact_write 181 | 182 | impl #client_name { 183 | 184 | pub fn new(region: ::raiden::Region) -> Self { 185 | let client = ::raiden::DynamoDbClient::new(region); 186 | Self::new_with_dynamo_db_client(client) 187 | } 188 | 189 | pub fn new_with_client(client: ::raiden::Client, region: ::raiden::Region) -> Self { 190 | let client = ::raiden::DynamoDbClient::new_with_client(client, region); 191 | Self::new_with_dynamo_db_client(client) 192 | } 193 | 194 | fn new_with_dynamo_db_client(client: ::raiden::DynamoDbClient) -> Self { 195 | let names = { 196 | let mut names: ::raiden::AttributeNames = std::collections::HashMap::new(); 197 | #(#insertion_attribute_name)* 198 | names 199 | }; 200 | let projection_expression = Some(names.keys().map(|v| v.to_string()).collect::>().join(", ")); 201 | 202 | Self { 203 | table_name: #table_name, 204 | table_prefix: "".to_owned(), 205 | table_suffix: "".to_owned(), 206 | client, 207 | retry_condition: ::raiden::RetryCondition::new(), 208 | attribute_names: Some(names), 209 | projection_expression 210 | } 211 | } 212 | 213 | pub fn with_retries(mut self, s: Box) -> Self { 214 | self.retry_condition.strategy = s; 215 | self 216 | } 217 | 218 | pub fn table_prefix(mut self, prefix: impl Into) -> Self { 219 | self.table_prefix = prefix.into(); 220 | self 221 | } 222 | 223 | pub fn table_suffix(mut self, suffix: impl Into) -> Self { 224 | self.table_suffix = suffix.into(); 225 | self 226 | } 227 | 228 | pub fn table_name(&self) -> String { 229 | format!("{}{}{}", self.table_prefix, self.table_name.to_string(), self.table_suffix) 230 | } 231 | } 232 | 233 | impl #struct_name { 234 | pub fn client(region: ::raiden::Region) -> #client_name { 235 | #client_name::new(region) 236 | } 237 | pub fn client_with(client: ::raiden::Client, region: ::raiden::Region) -> #client_name { 238 | #client_name::new_with_client(client, region) 239 | } 240 | } 241 | 242 | impl ::raiden::IdGenerator for #struct_name {} 243 | }; 244 | // Hand the output tokens back to the compiler. 245 | proc_macro::TokenStream::from(expanded) 246 | } 247 | 248 | // fn fetch_raiden_field(fields: &syn::FieldsNamed) -> Vec { 249 | // let fields: Vec = fields 250 | // .named 251 | // .iter() 252 | // .cloned() 253 | // .filter(|f| { 254 | // f.attrs.len() > 0 255 | // && f.attrs 256 | // .iter() 257 | // .any(|attr| attr.path.segments[0].ident == "raiden") 258 | // }) 259 | // .collect(); 260 | // dbg!(&fields.len()); 261 | // fields 262 | // } 263 | 264 | // fn check_attr_of( 265 | // name: &str, 266 | // tokens: &mut proc_macro2::token_stream::IntoIter, 267 | // ) -> Option { 268 | // dbg!(&name); 269 | // let mut tokens = match tokens.next() { 270 | // Some(proc_macro2::TokenTree::Group(g)) => g.stream().into_iter(), 271 | // _ => return None, 272 | // }; 273 | // dbg!(&name); 274 | // 275 | // match tokens.next() { 276 | // Some(proc_macro2::TokenTree::Ident(ref ident)) if *ident == name => { 277 | // return Some(tokens); 278 | // } 279 | // _ => return None, 280 | // }; 281 | // } 282 | -------------------------------------------------------------------------------- /raiden-derive/src/ops/batch_delete.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | use syn::*; 3 | 4 | pub(crate) fn expand_batch_delete( 5 | partition_key: &(Ident, Type), 6 | sort_key: &Option<(Ident, Type)>, 7 | struct_name: &Ident, 8 | ) -> proc_macro2::TokenStream { 9 | let trait_name = format_ident!("{}BatchDelete", struct_name); 10 | let client_name = format_ident!("{}Client", struct_name); 11 | let builder_name = format_ident!("{}BatchDeleteBuilder", struct_name); 12 | let (partition_key_ident, partition_key_type) = partition_key; 13 | 14 | let client_trait = if let Some(sort_key) = sort_key { 15 | let (sort_key_ident, sort_key_type) = sort_key; 16 | quote! { 17 | pub trait #trait_name { 18 | fn batch_delete(&self, keys: std::vec::Vec<(impl Into<#partition_key_type>, impl Into<#sort_key_type>)>) -> #builder_name; 19 | } 20 | 21 | impl #trait_name for #client_name { 22 | fn batch_delete(&self, keys: std::vec::Vec<(impl Into<#partition_key_type>, impl Into<#sort_key_type>)>) -> #builder_name { 23 | let write_requests = { 24 | let mut write_requests = vec![]; 25 | for (pk, sk) in keys.into_iter() { 26 | let pk_attr_value = pk.into().into_attr(); 27 | let sk_attr_value = sk.into().into_attr(); 28 | 29 | let write_request = { 30 | let mut write_request = ::raiden::WriteRequest::default(); 31 | let delete_request = ::raiden::DeleteRequest { 32 | key: vec![ 33 | (stringify!(#partition_key_ident).to_string(), pk_attr_value), 34 | (stringify!(#sort_key_ident).to_string(), sk_attr_value) 35 | ].into_iter().collect(), 36 | }; 37 | write_request.delete_request = Some(delete_request); 38 | write_request 39 | }; 40 | 41 | write_requests.push(write_request); 42 | } 43 | 44 | write_requests 45 | }; 46 | 47 | #builder_name { 48 | client: &self.client, 49 | write_requests, 50 | table_name: self.table_name(), 51 | } 52 | } 53 | } 54 | } 55 | } else { 56 | quote! { 57 | pub trait #trait_name { 58 | fn batch_delete(&self, keys: std::vec::Vec>) -> #builder_name; 59 | } 60 | 61 | impl #trait_name for #client_name { 62 | fn batch_delete(&self, keys: std::vec::Vec>) -> #builder_name { 63 | let write_requests = { 64 | let mut write_requests = vec![]; 65 | for pk in keys.into_iter() { 66 | let pk_attr_value = pk.into().into_attr(); 67 | 68 | let write_request = { 69 | let mut write_request = ::raiden::WriteRequest::default(); 70 | let delete_request = ::raiden::DeleteRequest { 71 | key: vec![ 72 | (stringify!(#partition_key_ident).to_string(), pk_attr_value), 73 | ].into_iter().collect(), 74 | }; 75 | write_request.delete_request = Some(delete_request); 76 | write_request 77 | }; 78 | 79 | write_requests.push(write_request); 80 | } 81 | 82 | write_requests 83 | }; 84 | 85 | #builder_name { 86 | client: &self.client, 87 | write_requests, 88 | table_name: self.table_name(), 89 | } 90 | } 91 | } 92 | } 93 | }; 94 | 95 | let api_call_token = super::api_call_token!("batch_write_item"); 96 | let (call_inner_run, inner_run_args) = if cfg!(feature = "tracing") { 97 | ( 98 | quote! { #builder_name::inner_run(&self.table_name, &self.client, input).await? }, 99 | quote! { table_name: &str, }, 100 | ) 101 | } else { 102 | ( 103 | quote! { #builder_name::inner_run(&self.client, input).await? }, 104 | quote! {}, 105 | ) 106 | }; 107 | 108 | quote! { 109 | #client_trait 110 | 111 | pub struct #builder_name<'a> { 112 | pub client: &'a ::raiden::DynamoDbClient, 113 | pub write_requests: std::vec::Vec<::raiden::WriteRequest>, 114 | pub table_name: String, 115 | } 116 | 117 | impl<'a> #builder_name<'a> { 118 | pub async fn run(mut self) -> Result<::raiden::batch_delete::BatchDeleteOutput, ::raiden::RaidenError> { 119 | // TODO: set the number of retry to 5 for now, which should be made more flexible 120 | const RETRY: usize = 5; 121 | const MAX_ITEMS_PER_REQUEST: usize = 25; 122 | 123 | for _ in 0..RETRY { 124 | loop { 125 | let len = self.write_requests.len(); 126 | 127 | // len == 0 means there are no items to be processed anymore 128 | if len == 0 { 129 | break; 130 | } 131 | 132 | let start = len.saturating_sub(MAX_ITEMS_PER_REQUEST); 133 | let end = std::cmp::min(len, start + MAX_ITEMS_PER_REQUEST); 134 | // take requests up to 25 from the request buffer 135 | let req = self.write_requests.drain(start..end).collect::>(); 136 | let request_items = vec![(self.table_name.clone(), req)] 137 | .into_iter() 138 | .collect::>(); 139 | let input = ::raiden::BatchWriteItemInput { 140 | request_items, 141 | ..std::default::Default::default() 142 | }; 143 | 144 | let result = #call_inner_run; 145 | 146 | let mut unprocessed_items = match result.unprocessed_items { 147 | None => { 148 | // move on to the next iteration to check if there are unprocessed 149 | // requests 150 | continue; 151 | } 152 | Some(unprocessed_items) => { 153 | if unprocessed_items.is_empty() { 154 | // move on to the next iteration to check if there are unprocessed 155 | // requests 156 | continue; 157 | } 158 | 159 | unprocessed_items 160 | }, 161 | }; 162 | 163 | let unprocessed_requests = unprocessed_items 164 | .remove(&self.table_name) 165 | .expect("reqeust_items hashmap must have a value for the table name"); 166 | // push unprocessed requests back to the request buffer 167 | self.write_requests.extend(unprocessed_requests); 168 | } 169 | } 170 | 171 | // when retry is done the specified times, treat it as success even if there are 172 | // still unprocessed items 173 | let unprocessed_items = self.write_requests 174 | .into_iter() 175 | .filter_map(|write_request| write_request.delete_request) 176 | .collect::>(); 177 | Ok(::raiden::batch_delete::BatchDeleteOutput { 178 | consumed_capacity: None, 179 | unprocessed_items, 180 | }) 181 | } 182 | 183 | async fn inner_run( 184 | #inner_run_args 185 | client: &::raiden::DynamoDbClient, 186 | input: ::raiden::BatchWriteItemInput, 187 | ) -> Result<::raiden::BatchWriteItemOutput, ::raiden::RaidenError> { 188 | Ok(#api_call_token?) 189 | } 190 | } 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /raiden-derive/src/ops/get.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::*; 2 | use quote::*; 3 | use syn::*; 4 | 5 | pub(crate) fn expand_get_item( 6 | partition_key: &(Ident, Type), 7 | sort_key: &Option<(Ident, Type)>, 8 | struct_name: &Ident, 9 | fields: &syn::FieldsNamed, 10 | rename_all_type: crate::rename::RenameAllType, 11 | ) -> TokenStream { 12 | let trait_name = format_ident!("{}GetItem", struct_name); 13 | let client_name = format_ident!("{}Client", struct_name); 14 | let builder_name = format_ident!("{}GetItemBuilder", struct_name); 15 | let from_item = super::expand_attr_to_item(format_ident!("res_item"), fields, rename_all_type); 16 | let (partition_key_ident, partition_key_type) = partition_key; 17 | 18 | let client_trait = if let Some(sort_key) = sort_key { 19 | let (sort_key_ident, sort_key_type) = sort_key; 20 | quote! { 21 | pub trait #trait_name { 22 | fn get(&self, pk: impl Into<#partition_key_type>, sk: impl Into<#sort_key_type>) -> #builder_name; 23 | } 24 | 25 | impl #trait_name for #client_name { 26 | fn get(&self, pk: impl Into<#partition_key_type>, sk: impl Into<#sort_key_type>) -> #builder_name { 27 | let mut input = ::raiden::GetItemInput::default(); 28 | let pk_attr: ::raiden::AttributeValue = pk.into().into_attr(); 29 | let sk_attr: ::raiden::AttributeValue = sk.into().into_attr(); 30 | input.projection_expression = self.projection_expression.clone(); 31 | input.expression_attribute_names = self.attribute_names.clone(); 32 | let mut key_set: std::collections::HashMap = std::collections::HashMap::new(); 33 | key_set.insert(stringify!(#partition_key_ident).to_owned(), pk_attr); 34 | key_set.insert(stringify!(#sort_key_ident).to_owned(), sk_attr); 35 | input.key = key_set; 36 | input.table_name = self.table_name(); 37 | #builder_name { 38 | client: &self.client, 39 | input, 40 | policy: self.retry_condition.strategy.policy(), 41 | condition: &self.retry_condition, 42 | } 43 | } 44 | } 45 | } 46 | } else { 47 | quote! { 48 | pub trait #trait_name { 49 | fn get(&self, key: impl Into<#partition_key_type>) -> #builder_name; 50 | } 51 | 52 | impl #trait_name for #client_name { 53 | fn get(&self, key: impl Into<#partition_key_type>) -> #builder_name { 54 | let key_attr: ::raiden::AttributeValue = key.into().into_attr(); 55 | let mut key_set: std::collections::HashMap = std::collections::HashMap::new(); 56 | key_set.insert(stringify!(#partition_key_ident).to_owned(), key_attr); 57 | let input = ::raiden::GetItemInput { 58 | key: key_set, 59 | table_name: self.table_name(), 60 | projection_expression: self.projection_expression.clone(), 61 | expression_attribute_names: self.attribute_names.clone(), 62 | ..::raiden::GetItemInput::default() 63 | }; 64 | 65 | #builder_name { 66 | client: &self.client, 67 | input, 68 | policy: self.retry_condition.strategy.policy(), 69 | condition: &self.retry_condition, 70 | } 71 | } 72 | } 73 | } 74 | }; 75 | 76 | let api_call_token = super::api_call_token!("get_item"); 77 | let (call_inner_run, inner_run_args) = if cfg!(feature = "tracing") { 78 | ( 79 | quote! { #builder_name::inner_run(input.table_name.clone(), client, input).await }, 80 | quote! { table_name: String, }, 81 | ) 82 | } else { 83 | ( 84 | quote! { #builder_name::inner_run(client, input).await }, 85 | quote! {}, 86 | ) 87 | }; 88 | 89 | quote! { 90 | #client_trait 91 | 92 | pub struct #builder_name<'a> { 93 | pub client: &'a ::raiden::DynamoDbClient, 94 | pub input: ::raiden::GetItemInput, 95 | pub policy: ::raiden::Policy, 96 | pub condition: &'a ::raiden::retry::RetryCondition, 97 | } 98 | 99 | impl<'a> #builder_name<'a> { 100 | pub fn consistent(mut self) -> Self { 101 | self.input.consistent_read = Some(true); 102 | self 103 | } 104 | 105 | pub async fn run(self) -> Result<::raiden::get::GetOutput<#struct_name>, ::raiden::RaidenError> { 106 | let policy: ::raiden::RetryPolicy = self.policy.into(); 107 | let client = self.client; 108 | let input = self.input; 109 | policy.retry_if(move || { 110 | let client = client.clone(); 111 | let input = input.clone(); 112 | async { #call_inner_run } 113 | }, self.condition).await 114 | } 115 | 116 | async fn inner_run( 117 | #inner_run_args 118 | client: ::raiden::DynamoDbClient, 119 | input: ::raiden::GetItemInput, 120 | ) -> Result<::raiden::get::GetOutput<#struct_name>, ::raiden::RaidenError> { 121 | let res = #api_call_token?; 122 | if res.item.is_none() { 123 | return Err(::raiden::RaidenError::ResourceNotFound("resource not found".to_owned())); 124 | }; 125 | let mut res_item = res.item.unwrap(); 126 | let item = #struct_name { 127 | #(#from_item)* 128 | }; 129 | Ok(::raiden::get::GetOutput { 130 | item, 131 | consumed_capacity: res.consumed_capacity, 132 | }) 133 | } 134 | } 135 | } 136 | } 137 | 138 | /* 139 | https://github.com/rusoto/rusoto/blob/master/rusoto/services/dynamodb/src/generated.rs#L1137 140 | #[cfg_attr(feature = "deserialize_structs", derive(Deserialize))] 141 | pub struct GetItemInput { 142 | pub attributes_to_get: Option>, 143 | pub consistent_read: Option, 144 | pub expression_attribute_names: Option<::std::collections::HashMap>, 145 | pub key: ::std::collections::HashMap, 146 | pub projection_expression: Option, 147 | pub return_consumed_capacity: Option, 148 | pub table_name: String, 149 | } 150 | */ 151 | -------------------------------------------------------------------------------- /raiden-derive/src/ops/mod.rs: -------------------------------------------------------------------------------- 1 | mod batch_delete; 2 | mod batch_get; 3 | mod delete; 4 | mod get; 5 | mod put; 6 | mod query; 7 | mod scan; 8 | mod shared; 9 | mod transact_write; 10 | mod update; 11 | 12 | pub(crate) use batch_delete::*; 13 | pub(crate) use batch_get::*; 14 | pub(crate) use delete::*; 15 | pub(crate) use get::*; 16 | pub(crate) use put::*; 17 | pub(crate) use query::*; 18 | pub(crate) use scan::*; 19 | pub(crate) use shared::*; 20 | pub(crate) use transact_write::*; 21 | pub(crate) use update::*; 22 | -------------------------------------------------------------------------------- /raiden-derive/src/ops/shared.rs: -------------------------------------------------------------------------------- 1 | use quote::*; 2 | 3 | pub(crate) fn expand_attr_to_item( 4 | item_ident: proc_macro2::Ident, 5 | fields: &syn::FieldsNamed, 6 | rename_all_type: crate::rename::RenameAllType, 7 | ) -> Vec { 8 | fields.named.iter().map(|f| { 9 | let ident = &f.ident.clone().unwrap(); 10 | let use_default = crate::finder::include_unary_attr(&f.attrs, "use_default"); 11 | let renamed = crate::finder::find_rename_value(&f.attrs); 12 | let attr_key = if let Some(renamed) = renamed { 13 | renamed 14 | } else if rename_all_type != crate::rename::RenameAllType::None { 15 | crate::rename::rename(rename_all_type, ident.to_string()) 16 | } else { 17 | ident.to_string() 18 | }; 19 | let ty = &f.ty; 20 | 21 | let item = quote! { 22 | let item = <#ty as ResolveAttribute>::resolve_attr(&#attr_key, &mut #item_ident); 23 | }; 24 | if crate::finder::is_option(ty) { 25 | quote! { 26 | #ident: { 27 | #item 28 | if item.is_none() { 29 | None 30 | } else { 31 | let converted = ::raiden::FromAttribute::from_attr(item); 32 | if converted.is_err() { 33 | return Err(::raiden::RaidenError::AttributeConvertError{ attr_name: #attr_key.to_string() }); 34 | } 35 | converted.unwrap() 36 | } 37 | }, 38 | } 39 | } else if use_default { 40 | quote! { 41 | #ident: { 42 | #item 43 | if item.is_none() { 44 | Default::default() 45 | } else { 46 | let item = item.unwrap(); 47 | // If null is true, use default value. 48 | if let Some(true) = item.null { 49 | Default::default() 50 | } else { 51 | let converted = ::raiden::FromAttribute::from_attr(Some(item)); 52 | if converted.is_err() { 53 | // TODO: improve error handling. 54 | return Err(::raiden::RaidenError::AttributeConvertError{ attr_name: #attr_key.to_string() }); 55 | } 56 | converted.unwrap() 57 | } 58 | } 59 | }, 60 | } 61 | } else { 62 | quote! { 63 | #ident: { 64 | #item 65 | let converted = ::raiden::FromAttribute::from_attr(item); 66 | if converted.is_err() { 67 | // TODO: improve error handling. 68 | return Err(::raiden::RaidenError::AttributeConvertError{ attr_name: #attr_key.to_string() }); 69 | } 70 | converted.unwrap() 71 | }, 72 | } 73 | } 74 | }).collect() 75 | } 76 | 77 | macro_rules! api_call_token { 78 | ($operation: literal) => { 79 | $crate::ops::api_call_token!("table_name", "client", $operation, "input") 80 | }; 81 | ($table_name: literal, $client: literal, $operation: literal, $input: literal) => {{ 82 | let table_name = ::quote::format_ident!($table_name); 83 | let client = ::quote::format_ident!($client); 84 | let operation = ::quote::format_ident!($operation); 85 | let input = ::quote::format_ident!($input); 86 | 87 | let span_token = if cfg!(feature = "tracing") { 88 | ::quote::quote! { 89 | use tracing::Instrument; 90 | let fut = fut.instrument(::tracing::debug_span!( 91 | "dynamodb::action", 92 | table = #table_name, 93 | api = std::stringify!(#operation), 94 | )); 95 | } 96 | } else { 97 | ::quote::quote! {} 98 | }; 99 | 100 | ::quote::quote! {{ 101 | let fut = #client.#operation(#input); 102 | 103 | #span_token 104 | 105 | fut.await 106 | }} 107 | }}; 108 | } 109 | 110 | pub(super) use api_call_token; 111 | -------------------------------------------------------------------------------- /raiden-derive/src/rename/mod.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | // "lowercase", "UPPERCASE", "PascalCase", "camelCase", "snake_case", "SCREAMING_SNAKE_CASE", "kebab-case", "SCREAMING-KEBAB-CASE". 4 | 5 | #[derive(Copy, Clone, Debug, PartialEq)] 6 | pub enum RenameAllType { 7 | LowerCase, 8 | CamelCase, 9 | PascalCase, 10 | SnakeCase, 11 | ScreamingSnakeCase, 12 | KebabCase, 13 | None, 14 | } 15 | 16 | impl FromStr for RenameAllType { 17 | type Err = (); 18 | fn from_str(s: &str) -> Result { 19 | match s { 20 | "lowercase" => Ok(RenameAllType::LowerCase), 21 | "camelCase" => Ok(RenameAllType::CamelCase), 22 | "PascalCase" => Ok(RenameAllType::PascalCase), 23 | "snake_case" => Ok(RenameAllType::SnakeCase), 24 | "SCREAMING_SNAKE_CASE" => Ok(RenameAllType::ScreamingSnakeCase), 25 | "kebab-case" => Ok(RenameAllType::KebabCase), 26 | _ => panic!("{} is not support type.", s), 27 | } 28 | } 29 | } 30 | 31 | pub fn rename(t: RenameAllType, base: String) -> String { 32 | match t { 33 | crate::rename::RenameAllType::LowerCase => { 34 | ident_case::RenameRule::LowerCase.apply_to_field(base) 35 | } 36 | crate::rename::RenameAllType::CamelCase => { 37 | ident_case::RenameRule::CamelCase.apply_to_field(base) 38 | } 39 | crate::rename::RenameAllType::PascalCase => { 40 | ident_case::RenameRule::PascalCase.apply_to_field(base) 41 | } 42 | crate::rename::RenameAllType::SnakeCase => { 43 | ident_case::RenameRule::SnakeCase.apply_to_field(base) 44 | } 45 | crate::rename::RenameAllType::ScreamingSnakeCase => { 46 | ident_case::RenameRule::ScreamingSnakeCase.apply_to_field(base) 47 | } 48 | crate::rename::RenameAllType::KebabCase => { 49 | ident_case::RenameRule::KebabCase.apply_to_field(base) 50 | } 51 | _ => panic!("{} is not supported rename type", base), 52 | } 53 | } 54 | 55 | pub fn create_renamed( 56 | basename: String, 57 | renamed: Option, 58 | rename_all_type: RenameAllType, 59 | ) -> String { 60 | if let Some(renamed) = renamed { 61 | renamed 62 | } else if rename_all_type != RenameAllType::None { 63 | rename(rename_all_type, basename) 64 | } else { 65 | basename 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /raiden/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /raiden/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "raiden" 3 | authors.workspace = true 4 | edition.workspace = true 5 | license.workspace = true 6 | rust-version.workspace = true 7 | version.workspace = true 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | again = "^0.1.2" 13 | base64 = "^0.22.1" 14 | md-5 = "^0.10.6" 15 | raiden-derive = { version = "*", path = "../raiden-derive" } 16 | rusoto_core_default = { package = "rusoto_core", version = "0.48.0", optional = true } 17 | rusoto_core_rustls = { package = "rusoto_core", version = "0.48.0", default-features = false, features = [ 18 | "rustls", 19 | ], optional = true } 20 | rusoto_credential = "0.48.0" 21 | rusoto_dynamodb_default = { package = "rusoto_dynamodb", version = "0.48.0", features = [ 22 | "serialize_structs", 23 | ], optional = true } 24 | rusoto_dynamodb_rustls = { package = "rusoto_dynamodb", version = "0.48.0", default-features = false, features = [ 25 | "rustls", 26 | "serialize_structs", 27 | ], optional = true } 28 | safe-builder = { tag = "0.0.6", git = "https://github.com/raiden-rs/safe-builder.git" } 29 | serde = { version = "^1.0.219", features = ["derive"] } 30 | serde_derive = "^1.0.219" 31 | serde_json = "^1.0.140" 32 | thiserror = "^2.0.12" 33 | tracing = { version = "^0.1.41", optional = true } 34 | uuid = { version = "^1.16.0", features = ["v4"] } 35 | 36 | [dev-dependencies] 37 | pretty_assertions = "^1.4.1" 38 | raiden = { path = "./", features = ["tracing"], default-features = false } 39 | time = "^0.3.41" 40 | tokio = "^1.45.0" 41 | tracing-subscriber = { version = "^0.3.19", features = ["env-filter", "time"] } 42 | 43 | [features] 44 | default = ["rusoto_core_default", "rusoto_dynamodb_default"] 45 | rustls = ["rusoto_core_rustls", "rusoto_dynamodb_rustls"] 46 | tracing = ["dep:tracing", "raiden-derive/tracing"] 47 | 48 | [package.metadata.cargo-machete] 49 | ignored = ["md-5"] 50 | -------------------------------------------------------------------------------- /raiden/examples/delete.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "QueryTestData0")] 9 | #[derive(Debug, Clone)] 10 | #[allow(dead_code)] 11 | pub struct Test { 12 | #[raiden(partition_key)] 13 | id: String, 14 | name: String, 15 | #[raiden(sort_key)] 16 | year: usize, 17 | } 18 | 19 | fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("delete=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let rt = tokio::runtime::Runtime::new().unwrap(); 30 | async fn example() { 31 | let client = Test::client(Region::Custom { 32 | endpoint: "http://localhost:8000".into(), 33 | name: "ap-northeast-1".into(), 34 | }); 35 | 36 | let res = client.delete("id1", 2003_usize).run().await; 37 | dbg!(&res); 38 | } 39 | rt.block_on(example()); 40 | } 41 | -------------------------------------------------------------------------------- /raiden/examples/get_with_reserved.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "ReservedTestData0")] 9 | pub struct Reserved { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | pub r#type: String, 13 | } 14 | 15 | fn main() { 16 | tracing_subscriber::fmt() 17 | .with_env_filter(EnvFilter::new("get_with_reserved=debug,info")) 18 | .with_file(true) 19 | .with_line_number(true) 20 | .with_span_events(FmtSpan::CLOSE) 21 | .with_target(true) 22 | .with_timer(UtcTime::rfc_3339()) 23 | .init(); 24 | 25 | let rt = tokio::runtime::Runtime::new().unwrap(); 26 | async fn example() { 27 | let client = Reserved::client(Region::Custom { 28 | endpoint: "http://localhost:8000".into(), 29 | name: "ap-northeast-1".into(), 30 | }); 31 | let _ = client.get("id0").run().await; 32 | } 33 | rt.block_on(example()); 34 | } 35 | -------------------------------------------------------------------------------- /raiden/examples/get_with_retries.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "user")] 9 | pub struct User { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | } 13 | 14 | struct MyRetryStrategy; 15 | 16 | impl RetryStrategy for MyRetryStrategy { 17 | fn should_retry(&self, _error: &RaidenError) -> bool { 18 | true 19 | } 20 | 21 | fn policy(&self) -> Policy { 22 | Policy::Limit(3) 23 | } 24 | } 25 | 26 | fn main() { 27 | tracing_subscriber::fmt() 28 | .with_env_filter(EnvFilter::new("get_with_retries=debug,info")) 29 | .with_file(true) 30 | .with_line_number(true) 31 | .with_span_events(FmtSpan::CLOSE) 32 | .with_target(true) 33 | .with_timer(UtcTime::rfc_3339()) 34 | .init(); 35 | 36 | let rt = tokio::runtime::Runtime::new().unwrap(); 37 | async fn example() { 38 | let client = User::client(Region::Custom { 39 | endpoint: "http://localhost:8000".into(), 40 | name: "ap-northeast-1".into(), 41 | }); 42 | let _ = client 43 | .with_retries(Box::new(MyRetryStrategy)) 44 | .get("anonymous") 45 | .run() 46 | .await; 47 | } 48 | rt.block_on(example()); 49 | } 50 | -------------------------------------------------------------------------------- /raiden/examples/hello.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "user")] 9 | pub struct User { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | #[raiden(sort_key)] 13 | pub year: usize, 14 | #[raiden(uuid)] 15 | pub uuid: String, 16 | pub name: String, 17 | } 18 | 19 | fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("hello=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let rt = tokio::runtime::Runtime::new().unwrap(); 30 | async fn example() { 31 | let client = User::client(Region::Custom { 32 | endpoint: "http://localhost:8000".into(), 33 | name: "ap-northeast-1".into(), 34 | }); 35 | //let user = UserPutItemInput { 36 | // id: "a".to_owned(), 37 | // name: "bokuweb".to_owned(), 38 | // // uuid: "aa".to_owned(), 39 | //}; 40 | //let cond = User::condition() 41 | // .attr(User::name()) 42 | // .eq_attr(User::name()); 43 | // 44 | //// let cond = User::condition().not().attr_type(User::name(), AttributeType::N); 45 | //// .and(User::condition().not().attribute_exists(User::id())); 46 | let keys: Vec<(&str, usize)> = vec![("bokuweb", 2019), ("raiden", 2020)]; 47 | let _ = client.batch_get(keys).run().await; 48 | } 49 | rt.block_on(example()); 50 | } 51 | -------------------------------------------------------------------------------- /raiden/examples/last_key.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "LastEvaluateKeyData")] 9 | pub struct Test { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | pub ref_id: String, 13 | pub long_text: String, 14 | } 15 | 16 | fn main() { 17 | tracing_subscriber::fmt() 18 | .with_env_filter(EnvFilter::new("last_key=debug,info")) 19 | .with_file(true) 20 | .with_line_number(true) 21 | .with_span_events(FmtSpan::CLOSE) 22 | .with_target(true) 23 | .with_timer(UtcTime::rfc_3339()) 24 | .init(); 25 | 26 | let rt = tokio::runtime::Runtime::new().unwrap(); 27 | async fn example() { 28 | let client = Test::client(Region::Custom { 29 | endpoint: "http://localhost:8000".into(), 30 | name: "ap-northeast-1".into(), 31 | }); 32 | let cond = Test::key_condition(Test::ref_id()).eq("id0"); 33 | let res = client 34 | .query() 35 | .index("testGSI") 36 | .limit(5) 37 | .key_condition(cond) 38 | .run() 39 | .await; 40 | dbg!(&res.unwrap().items.len()); 41 | } 42 | rt.block_on(example()); 43 | } 44 | -------------------------------------------------------------------------------- /raiden/examples/put.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Debug, Clone, PartialEq)] 8 | pub struct CustomId(String); 9 | 10 | impl From for CustomId { 11 | fn from(v: String) -> CustomId { 12 | CustomId(v) 13 | } 14 | } 15 | 16 | impl raiden::IntoAttribute for CustomId { 17 | fn into_attr(self) -> raiden::AttributeValue { 18 | raiden::AttributeValue { 19 | s: Some(self.0), 20 | ..::raiden::AttributeValue::default() 21 | } 22 | } 23 | } 24 | 25 | impl raiden::FromAttribute for CustomId { 26 | fn from_attr(value: Option) -> Result { 27 | Ok(CustomId(value.unwrap().s.unwrap())) 28 | } 29 | } 30 | 31 | #[derive(Raiden)] 32 | #[raiden(table_name = "user")] 33 | pub struct User { 34 | #[raiden(partition_key)] 35 | pub id: String, 36 | #[raiden(uuid)] 37 | pub uuid: CustomId, 38 | pub name: String, 39 | } 40 | 41 | fn main() { 42 | tracing_subscriber::fmt() 43 | .with_env_filter(EnvFilter::new("put=debug,info")) 44 | .with_file(true) 45 | .with_line_number(true) 46 | .with_span_events(FmtSpan::CLOSE) 47 | .with_target(true) 48 | .with_timer(UtcTime::rfc_3339()) 49 | .init(); 50 | 51 | let rt = tokio::runtime::Runtime::new().unwrap(); 52 | async fn example() { 53 | let client = User::client(Region::Custom { 54 | endpoint: "http://localhost:8000".into(), 55 | name: "ap-northeast-1".into(), 56 | }); 57 | let input = User::put_item_builder() 58 | .id("testId".to_owned()) 59 | .name("bokuweb".to_owned()) 60 | .build(); 61 | let _ = client.put(input).run().await; 62 | } 63 | rt.block_on(example()); 64 | } 65 | -------------------------------------------------------------------------------- /raiden/examples/query.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden, Debug)] 8 | #[allow(dead_code)] 9 | pub struct QueryTestData0 { 10 | #[raiden(partition_key)] 11 | #[allow(dead_code)] 12 | id: String, 13 | name: String, 14 | year: usize, 15 | num: usize, 16 | } 17 | 18 | #[tokio::main] 19 | async fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("query=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let client = QueryTestData0::client(Region::Custom { 30 | endpoint: "http://localhost:8000".into(), 31 | name: "ap-northeast-1".into(), 32 | }); 33 | let cond = QueryTestData0::key_condition(QueryTestData0::id()) 34 | .eq("id0") 35 | .and(QueryTestData0::key_condition(QueryTestData0::year()).eq(1999)); 36 | let res = client.query().key_condition(cond).run().await; 37 | dbg!(&res); 38 | 39 | let cond = QueryTestData0::key_condition(QueryTestData0::id()) 40 | .eq("id0") 41 | .and(QueryTestData0::key_condition(QueryTestData0::year()).eq(1999)); 42 | let res = client.query().key_condition(cond).run().await; 43 | dbg!(&res); 44 | 45 | let cond = QueryTestData0::key_condition(QueryTestData0::id()).eq("id0"); 46 | let filter = QueryTestData0::filter_expression(QueryTestData0::num()).eq(1000); 47 | let res = client 48 | .query() 49 | .key_condition(cond) 50 | .filter(filter) 51 | .run() 52 | .await; 53 | dbg!(&res); 54 | } 55 | -------------------------------------------------------------------------------- /raiden/examples/query_rename.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "Project")] 9 | #[raiden(rename_all = "camelCase")] 10 | pub struct Project { 11 | #[raiden(partition_key)] 12 | pub id: String, 13 | pub org_id: String, 14 | pub updated_at: String, 15 | } 16 | 17 | fn main() { 18 | tracing_subscriber::fmt() 19 | .with_env_filter(EnvFilter::new("query_rename=debug,info")) 20 | .with_file(true) 21 | .with_line_number(true) 22 | .with_span_events(FmtSpan::CLOSE) 23 | .with_target(true) 24 | .with_timer(UtcTime::rfc_3339()) 25 | .init(); 26 | 27 | let rt = tokio::runtime::Runtime::new().unwrap(); 28 | async fn example() { 29 | let client = Project::client(Region::Custom { 30 | endpoint: "http://localhost:8000".into(), 31 | name: "ap-northeast-1".into(), 32 | }); 33 | let cond = Project::key_condition(Project::org_id()).eq("myOrg"); 34 | let _res = client 35 | .query() 36 | .index("orgIndex") 37 | .limit(11) 38 | .key_condition(cond) 39 | .run() 40 | .await; 41 | } 42 | rt.block_on(example()); 43 | } 44 | -------------------------------------------------------------------------------- /raiden/examples/scan.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden, Debug)] 8 | #[raiden(table_name = "ScanTestData0")] 9 | #[allow(dead_code)] 10 | pub struct ScanTestData0 { 11 | #[raiden(partition_key)] 12 | id: String, 13 | name: String, 14 | year: usize, 15 | num: usize, 16 | } 17 | 18 | #[tokio::main] 19 | async fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("scan=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let client = ScanTestData0::client(Region::Custom { 30 | endpoint: "http://localhost:8000".into(), 31 | name: "ap-northeast-1".into(), 32 | }); 33 | let res = client.scan().run().await; 34 | dbg!(&res); 35 | } 36 | -------------------------------------------------------------------------------- /raiden/examples/scan_with_filter.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden, Debug)] 8 | #[raiden(table_name = "ScanWithFilterTestData0")] 9 | #[allow(dead_code)] 10 | pub struct Scan { 11 | #[raiden(partition_key)] 12 | id: String, 13 | name: String, 14 | year: usize, 15 | num: usize, 16 | } 17 | 18 | #[tokio::main] 19 | async fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("scan_with_filter=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let client = Scan::client(Region::Custom { 30 | endpoint: "http://localhost:8000".into(), 31 | name: "ap-northeast-1".into(), 32 | }); 33 | let filter = Scan::filter_expression(Scan::num()).eq(1000); 34 | let res = client.scan().filter(filter).run().await.unwrap(); 35 | assert_eq!(res.items.len(), 50); 36 | } 37 | -------------------------------------------------------------------------------- /raiden/examples/transact_write.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "user")] 9 | pub struct User { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | pub name: String, 13 | } 14 | 15 | fn main() { 16 | tracing_subscriber::fmt() 17 | .with_env_filter(EnvFilter::new("raiden=debug,info")) 18 | .with_file(true) 19 | .with_line_number(true) 20 | .with_span_events(FmtSpan::CLOSE) 21 | .with_target(true) 22 | .with_timer(UtcTime::rfc_3339()) 23 | .init(); 24 | 25 | let rt = tokio::runtime::Runtime::new().unwrap(); 26 | async fn example() { 27 | let tx = ::raiden::WriteTx::new(Region::Custom { 28 | endpoint: "http://localhost:8000".into(), 29 | name: "ap-northeast-1".into(), 30 | }); 31 | let cond = User::condition().attr_not_exists(User::id()); 32 | let input = User::put_item_builder() 33 | .id("testId".to_owned()) 34 | .name("bokuweb".to_owned()) 35 | .build(); 36 | let input2 = User::put_item_builder() 37 | .id("testId2".to_owned()) 38 | .name("bokuweb".to_owned()) 39 | .build(); 40 | tx.put(User::put(input).condition(cond)) 41 | .put(User::put(input2)) 42 | .run() 43 | .await 44 | .unwrap(); 45 | } 46 | rt.block_on(example()); 47 | } 48 | -------------------------------------------------------------------------------- /raiden/examples/transact_write_with_http_client.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "user")] 9 | pub struct User { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | pub name: String, 13 | } 14 | 15 | fn main() { 16 | tracing_subscriber::fmt() 17 | .with_env_filter(EnvFilter::new("raiden=debug,info")) 18 | .with_file(true) 19 | .with_line_number(true) 20 | .with_span_events(FmtSpan::CLOSE) 21 | .with_target(true) 22 | .with_timer(UtcTime::rfc_3339()) 23 | .init(); 24 | 25 | let rt = tokio::runtime::Runtime::new().unwrap(); 26 | async fn example() { 27 | let dispatcher = 28 | raiden::request::HttpClient::new().expect("failed to create request dispatcher"); 29 | let credentials_provider = raiden::credential::DefaultCredentialsProvider::new() 30 | .expect("failed to create credentials provider"); 31 | let core_client = raiden::Client::new_with(credentials_provider, dispatcher); 32 | 33 | let tx = ::raiden::WriteTx::new_with_client( 34 | core_client, 35 | Region::Custom { 36 | endpoint: "http://localhost:8000".into(), 37 | name: "ap-northeast-1".into(), 38 | }, 39 | ); 40 | let cond = User::condition().attr_not_exists(User::id()); 41 | let input = User::put_item_builder() 42 | .id("testId".to_owned()) 43 | .name("bokuweb".to_owned()) 44 | .build(); 45 | let input2 = User::put_item_builder() 46 | .id("testId2".to_owned()) 47 | .name("bokuweb".to_owned()) 48 | .build(); 49 | tx.put(User::put(input).condition(cond)) 50 | .put(User::put(input2)) 51 | .run() 52 | .await 53 | .unwrap(); 54 | } 55 | rt.block_on(example()); 56 | } 57 | -------------------------------------------------------------------------------- /raiden/examples/update.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden, Debug)] 8 | #[raiden(table_name = "UpdateTestData0")] 9 | #[allow(dead_code)] 10 | pub struct Example { 11 | #[raiden(partition_key)] 12 | id: String, 13 | name: String, 14 | age: u8, 15 | } 16 | 17 | fn main() { 18 | tracing_subscriber::fmt() 19 | .with_env_filter(EnvFilter::new("update=debug,info")) 20 | .with_file(true) 21 | .with_line_number(true) 22 | .with_span_events(FmtSpan::CLOSE) 23 | .with_target(true) 24 | .with_timer(UtcTime::rfc_3339()) 25 | .init(); 26 | 27 | let rt = tokio::runtime::Runtime::new().unwrap(); 28 | async fn example() { 29 | let client = Example::client(Region::Custom { 30 | endpoint: "http://localhost:8000".into(), 31 | name: "ap-northeast-1".into(), 32 | }); 33 | let set_expression = Example::update_expression() 34 | .set(Example::name()) 35 | .value("updated!!"); 36 | let res = client 37 | .update("id0") 38 | .set(set_expression) 39 | .run() 40 | .await 41 | .unwrap(); 42 | dbg!(res.item); 43 | } 44 | rt.block_on(example()); 45 | } 46 | -------------------------------------------------------------------------------- /raiden/examples/with_http_client.rs: -------------------------------------------------------------------------------- 1 | use raiden::*; 2 | use tracing_subscriber::{ 3 | fmt::{format::FmtSpan, time::UtcTime}, 4 | EnvFilter, 5 | }; 6 | 7 | #[derive(Raiden)] 8 | #[raiden(table_name = "user")] 9 | pub struct User { 10 | #[raiden(partition_key)] 11 | pub id: String, 12 | #[raiden(sort_key)] 13 | pub year: usize, 14 | #[raiden(uuid)] 15 | pub uuid: String, 16 | pub name: String, 17 | } 18 | 19 | fn main() { 20 | tracing_subscriber::fmt() 21 | .with_env_filter(EnvFilter::new("with_http_client=debug,info")) 22 | .with_file(true) 23 | .with_line_number(true) 24 | .with_span_events(FmtSpan::CLOSE) 25 | .with_target(true) 26 | .with_timer(UtcTime::rfc_3339()) 27 | .init(); 28 | 29 | let rt = tokio::runtime::Runtime::new().unwrap(); 30 | async fn example() { 31 | let dispatcher = 32 | raiden::request::HttpClient::new().expect("failed to create request dispatcher"); 33 | let credentials_provider = raiden::credential::DefaultCredentialsProvider::new() 34 | .expect("failed to create credentials provider"); 35 | let core_client = raiden::Client::new_with(credentials_provider, dispatcher); 36 | 37 | let client = User::client_with( 38 | core_client, 39 | Region::Custom { 40 | endpoint: "http://localhost:8000".into(), 41 | name: "ap-northeast-1".into(), 42 | }, 43 | ); 44 | 45 | let keys: Vec<(&str, usize)> = vec![("bokuweb", 2019), ("raiden", 2020)]; 46 | let _ = client.batch_get(keys).run().await; 47 | } 48 | rt.block_on(example()); 49 | } 50 | -------------------------------------------------------------------------------- /raiden/src/errors/transaction.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use thiserror::Error; 4 | 5 | const TRANSACTION_CANCELLED_MESSAGE_PREFIX: &str = 6 | "Transaction cancelled, please refer cancellation reasons for specific reasons"; 7 | 8 | #[derive(Clone, Debug, PartialEq)] 9 | pub struct RaidenTransactionCancellationReasons( 10 | pub Vec>, 11 | ); 12 | 13 | impl RaidenTransactionCancellationReasons { 14 | // If `message` is unexcepted format, [RaidenTransactionCancellationReason::Unknown] is returned instead of Err(_) 15 | // TODO: Fix it later. 16 | #[allow(clippy::should_implement_trait)] 17 | pub fn from_str(message: &str) -> Self { 18 | if !message.starts_with(TRANSACTION_CANCELLED_MESSAGE_PREFIX) { 19 | return RaidenTransactionCancellationReasons(vec![Some( 20 | RaidenTransactionCancellationReason::Unknown, 21 | )]); 22 | } 23 | 24 | RaidenTransactionCancellationReasons( 25 | message[TRANSACTION_CANCELLED_MESSAGE_PREFIX.len()..] 26 | .trim_matches(|c| char::is_whitespace(c) || c == '[' || c == ']') 27 | .split(',') 28 | .map(str::trim) 29 | .map(|reason| match reason { 30 | "None" => None, 31 | reason => Some(RaidenTransactionCancellationReason::from_str(reason)), 32 | }) 33 | .collect(), 34 | ) 35 | } 36 | 37 | fn has_error(&self, r: RaidenTransactionCancellationReason) -> bool { 38 | self.0 39 | .iter() 40 | .any(|reason| matches!(reason, Some(error) if *error == r)) 41 | } 42 | 43 | pub fn has_conditional_check_failed(&self) -> bool { 44 | self.has_error(RaidenTransactionCancellationReason::ConditionalCheckFailed) 45 | } 46 | 47 | pub fn has_item_collection_size_limit_exceeded(&self) -> bool { 48 | self.has_error(RaidenTransactionCancellationReason::ItemCollectionSizeLimitExceeded) 49 | } 50 | 51 | pub fn has_transaction_conflict(&self) -> bool { 52 | self.has_error(RaidenTransactionCancellationReason::TransactionConflict) 53 | } 54 | 55 | pub fn has_provisioned_throughput_exceeded(&self) -> bool { 56 | self.has_error(RaidenTransactionCancellationReason::ProvisionedThroughputExceeded) 57 | } 58 | 59 | pub fn has_throttling_error(&self) -> bool { 60 | self.has_error(RaidenTransactionCancellationReason::ThrottlingError) 61 | } 62 | 63 | pub fn has_validation_error(&self) -> bool { 64 | self.has_error(RaidenTransactionCancellationReason::ValidationError) 65 | } 66 | } 67 | 68 | impl fmt::Display for RaidenTransactionCancellationReasons { 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | let reasons = self 71 | .0 72 | .iter() 73 | .map(|reason| match reason { 74 | Some(reason) => reason.to_string(), 75 | None => String::from("None"), 76 | }) 77 | .collect::>() 78 | .join(", "); 79 | f.write_fmt(format_args!("[{reasons}]")) 80 | } 81 | } 82 | 83 | #[derive(Error, Clone, Debug, PartialEq)] 84 | pub enum RaidenTransactionCancellationReason { 85 | #[error("Unknown")] 86 | Unknown, 87 | #[error("ConditionalCheckFailed")] 88 | ConditionalCheckFailed, 89 | #[error("ItemCollectionSizeLimitExceeded")] 90 | ItemCollectionSizeLimitExceeded, 91 | #[error("TransactionConflict")] 92 | TransactionConflict, 93 | #[error("ProvisionedThroughputExceeded")] 94 | ProvisionedThroughputExceeded, 95 | #[error("ThrottlingError")] 96 | ThrottlingError, 97 | #[error("ValidationError")] 98 | ValidationError, 99 | } 100 | 101 | impl RaidenTransactionCancellationReason { 102 | // TODO: Fix it later. 103 | #[allow(clippy::should_implement_trait)] 104 | pub fn from_str(reason: &str) -> Self { 105 | match reason { 106 | "ConditionalCheckFailed" => Self::ConditionalCheckFailed, 107 | "ItemCollectionSizeLimitExceeded" => Self::ItemCollectionSizeLimitExceeded, 108 | "TransactionConflict" => Self::TransactionConflict, 109 | "ProvisionedThroughputExceeded" => Self::ProvisionedThroughputExceeded, 110 | "ThrottlingError" => Self::ThrottlingError, 111 | "ValidationError" => Self::ValidationError, 112 | // If `reason` is unexcepted, Self::Unknown is returned instead of Err(_) 113 | _ => Self::Unknown, 114 | } 115 | } 116 | } 117 | 118 | #[cfg(test)] 119 | mod tests { 120 | use crate::{RaidenTransactionCancellationReason, RaidenTransactionCancellationReasons}; 121 | 122 | #[test] 123 | fn parse_message_single() { 124 | let message = "Transaction cancelled, please refer cancellation reasons for specific reasons [ConditionalCheckFailed]"; 125 | let reasons = RaidenTransactionCancellationReasons::from_str(message); 126 | 127 | assert_eq!( 128 | reasons, 129 | RaidenTransactionCancellationReasons(vec![Some( 130 | RaidenTransactionCancellationReason::ConditionalCheckFailed 131 | ),]) 132 | ); 133 | } 134 | 135 | #[test] 136 | fn parse_message_multi() { 137 | let message = "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]"; 138 | let reasons = RaidenTransactionCancellationReasons::from_str(message); 139 | 140 | assert_eq!( 141 | reasons, 142 | RaidenTransactionCancellationReasons(vec![ 143 | None, 144 | Some(RaidenTransactionCancellationReason::ConditionalCheckFailed), 145 | ]) 146 | ); 147 | } 148 | 149 | #[test] 150 | fn parse_message_unknown() { 151 | let message = "Transaction cancelled, please refer cancellation reasons for specific reasons [UnknownSuperError]"; 152 | let reasons = RaidenTransactionCancellationReasons::from_str(message); 153 | 154 | assert_eq!( 155 | reasons, 156 | RaidenTransactionCancellationReasons(vec![Some( 157 | RaidenTransactionCancellationReason::Unknown 158 | ),]) 159 | ); 160 | } 161 | 162 | #[test] 163 | fn parse_message_unexpected_format() { 164 | let message = "A language empowering everyone to build reliable and efficient software"; 165 | let reasons = RaidenTransactionCancellationReasons::from_str(message); 166 | 167 | assert_eq!( 168 | reasons, 169 | RaidenTransactionCancellationReasons(vec![Some( 170 | RaidenTransactionCancellationReason::Unknown 171 | ),]) 172 | ); 173 | } 174 | 175 | #[test] 176 | fn has_error() { 177 | let results = RaidenTransactionCancellationReasons(vec![ 178 | None, 179 | Some(RaidenTransactionCancellationReason::TransactionConflict), 180 | Some(RaidenTransactionCancellationReason::ConditionalCheckFailed), 181 | ]); 182 | 183 | assert!(results.has_error(RaidenTransactionCancellationReason::ConditionalCheckFailed)); 184 | assert!(results.has_error(RaidenTransactionCancellationReason::TransactionConflict)); 185 | 186 | assert!(!results 187 | .has_error(RaidenTransactionCancellationReason::ItemCollectionSizeLimitExceeded)); 188 | assert!( 189 | !results.has_error(RaidenTransactionCancellationReason::ProvisionedThroughputExceeded) 190 | ); 191 | assert!(!results.has_error(RaidenTransactionCancellationReason::ThrottlingError)); 192 | assert!(!results.has_error(RaidenTransactionCancellationReason::ValidationError)); 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /raiden/src/id_generator/mod.rs: -------------------------------------------------------------------------------- 1 | pub trait IdGenerator { 2 | #[cfg(not(test))] 3 | fn gen() -> String { 4 | match std::env::var("RAIDEN_UUID_FIXED_BY") { 5 | Ok(v) => v, 6 | Err(_) => uuid::Uuid::new_v4().to_string(), 7 | } 8 | } 9 | 10 | #[cfg(test)] 11 | fn gen() -> String { 12 | "01234567-89ab-cdef-0123-456789abcdef".to_owned() 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raiden/src/next_token/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use base64::{engine::general_purpose::STANDARD, Engine}; 4 | 5 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] 6 | pub struct NextToken(String); 7 | 8 | impl NextToken { 9 | pub fn new(token: impl Into) -> Self { 10 | Self(token.into()) 11 | } 12 | pub fn into_attr_values(self) -> Result { 13 | let decoded = match STANDARD.decode(self.0) { 14 | Ok(decoded) => decoded, 15 | Err(_) => return Err(super::RaidenError::NextTokenDecodeError), 16 | }; 17 | let s = match std::str::from_utf8(&decoded[..]) { 18 | Ok(s) => s, 19 | Err(_) => return Err(super::RaidenError::NextTokenDecodeError), 20 | }; 21 | 22 | let deserialized: std::collections::HashMap = 23 | match serde_json::from_str(s) { 24 | Ok(deserialized) => deserialized, 25 | Err(_) => return Err(super::RaidenError::NextTokenDecodeError), 26 | }; 27 | Ok(deserialized) 28 | } 29 | 30 | pub fn from_attr(key: &super::AttributeValues) -> Self { 31 | let serialized = serde_json::to_string(key).expect("should serialize"); 32 | Self(STANDARD.encode(serialized)) 33 | } 34 | } 35 | 36 | impl fmt::Display for NextToken { 37 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 38 | write!(f, "{}", self.0) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /raiden/src/ops/batch_delete.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/69e7c9150d98916ef8fc814f5cd17eb0e4dee3d3/rusoto/services/dynamodb/src/generated.rs#L395 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct BatchDeleteOutput { 6 | pub consumed_capacity: Option>, 7 | pub unprocessed_items: Vec, 8 | } 9 | -------------------------------------------------------------------------------- /raiden/src/ops/batch_get.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/69e7c9150d98916ef8fc814f5cd17eb0e4dee3d3/rusoto/services/dynamodb/src/generated.rs#L356 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct BatchGetOutput { 6 | pub consumed_capacity: Option>, 7 | pub items: Vec, 8 | pub unprocessed_keys: Option, 9 | } 10 | -------------------------------------------------------------------------------- /raiden/src/ops/get.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/cf22a4348ae717a20760bb9934cfd118ddb4437e/rusoto/services/dynamodb/src/generated.rs#L1168 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct GetOutput { 6 | pub consumed_capacity: Option, 7 | pub item: T, 8 | } 9 | -------------------------------------------------------------------------------- /raiden/src/ops/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod batch_delete; 2 | pub mod batch_get; 3 | pub mod get; 4 | pub mod put; 5 | pub mod query; 6 | pub mod scan; 7 | pub mod update; 8 | 9 | pub mod transact_write; 10 | 11 | pub use transact_write::*; 12 | -------------------------------------------------------------------------------- /raiden/src/ops/put.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/cf22a4348ae717a20760bb9934cfd118ddb4437e/rusoto/services/dynamodb/src/generated.rs#L1168 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct PutOutput { 6 | pub consumed_capacity: Option, 7 | pub item: T, 8 | } 9 | -------------------------------------------------------------------------------- /raiden/src/ops/query.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/cf22a4348ae717a20760bb9934cfd118ddb4437e/rusoto/services/dynamodb/src/generated.rs#L1168 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct QueryOutput { 6 | pub consumed_capacity: Option, 7 | pub items: Vec, 8 | pub count: Option, 9 | pub next_token: Option, 10 | pub scanned_count: Option, 11 | } 12 | -------------------------------------------------------------------------------- /raiden/src/ops/scan.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/cf22a4348ae717a20760bb9934cfd118ddb4437e/rusoto/services/dynamodb/src/generated.rs#L2406 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct ScanOutput { 6 | pub consumed_capacity: Option, 7 | pub items: Vec, 8 | pub count: Option, 9 | pub last_evaluated_key: Option<::std::collections::HashMap>, 10 | pub scanned_count: Option, 11 | } 12 | -------------------------------------------------------------------------------- /raiden/src/ops/transact_write.rs: -------------------------------------------------------------------------------- 1 | use crate::{DynamoDb, TransactWriteItem}; 2 | 3 | pub struct WriteTx { 4 | items: Vec, 5 | client: crate::DynamoDbClient, 6 | retry_condition: crate::RetryCondition, 7 | } 8 | impl WriteTx { 9 | pub fn new(region: crate::Region) -> Self { 10 | let client = crate::DynamoDbClient::new(region); 11 | Self { 12 | items: vec![], 13 | client, 14 | retry_condition: crate::RetryCondition::new(), 15 | } 16 | } 17 | pub fn new_with_client(client: crate::Client, region: crate::Region) -> Self { 18 | let client = crate::DynamoDbClient::new_with_client(client, region); 19 | Self { 20 | items: vec![], 21 | client, 22 | retry_condition: crate::RetryCondition::new(), 23 | } 24 | } 25 | 26 | pub fn with_retries(mut self, s: Box) -> Self { 27 | self.retry_condition.strategy = s; 28 | self 29 | } 30 | 31 | pub fn put(mut self, builder: impl TransactWritePutBuilder) -> Self { 32 | self.items.push(TransactWriteItem { 33 | condition_check: None, 34 | delete: None, 35 | update: None, 36 | put: Some(builder.build()), 37 | }); 38 | self 39 | } 40 | 41 | pub fn update(mut self, builder: impl TransactWriteUpdateBuilder) -> Self { 42 | self.items.push(TransactWriteItem { 43 | condition_check: None, 44 | delete: None, 45 | update: Some(builder.build()), 46 | put: None, 47 | }); 48 | self 49 | } 50 | 51 | pub fn delete(mut self, builder: impl TransactWriteDeleteBuilder) -> Self { 52 | self.items.push(TransactWriteItem { 53 | condition_check: None, 54 | delete: Some(builder.build()), 55 | update: None, 56 | put: None, 57 | }); 58 | self 59 | } 60 | 61 | pub fn condition_check(mut self, builder: impl TransactWriteConditionCheckBuilder) -> Self { 62 | self.items.push(TransactWriteItem { 63 | condition_check: Some(builder.build()), 64 | delete: None, 65 | update: None, 66 | put: None, 67 | }); 68 | self 69 | } 70 | 71 | pub async fn run(self) -> Result<(), crate::RaidenError> { 72 | let policy: crate::RetryPolicy = self.retry_condition.strategy.policy().into(); 73 | let client = self.client; 74 | let input = crate::TransactWriteItemsInput { 75 | client_request_token: None, 76 | return_consumed_capacity: None, 77 | return_item_collection_metrics: None, 78 | transact_items: self.items, 79 | }; 80 | policy 81 | .retry_if( 82 | move || { 83 | let client = client.clone(); 84 | let input = input.clone(); 85 | async { WriteTx::inner_run(client, input).await } 86 | }, 87 | &self.retry_condition, 88 | ) 89 | .await 90 | } 91 | 92 | #[cfg_attr(feature = "tracing", tracing::instrument( 93 | level = tracing::Level::DEBUG, 94 | name = "dynamodb::action", 95 | skip_all, 96 | fields(api = "transact_write_items") 97 | ))] 98 | async fn inner_run( 99 | client: crate::DynamoDbClient, 100 | input: crate::TransactWriteItemsInput, 101 | ) -> Result<(), crate::RaidenError> { 102 | let _res = client.transact_write_items(input).await?; 103 | // TODO: ADD Resp 104 | Ok(()) 105 | } 106 | } 107 | 108 | pub trait TransactWritePutBuilder { 109 | fn build(self) -> crate::Put; 110 | } 111 | 112 | pub trait TransactWriteUpdateBuilder { 113 | fn build(self) -> crate::Update; 114 | } 115 | 116 | pub trait TransactWriteDeleteBuilder { 117 | fn build(self) -> crate::Delete; 118 | } 119 | 120 | pub trait TransactWriteConditionCheckBuilder { 121 | fn build(self) -> crate::ConditionCheck; 122 | } 123 | -------------------------------------------------------------------------------- /raiden/src/ops/update.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | // See. https://github.com/rusoto/rusoto/blob/cf22a4348ae717a20760bb9934cfd118ddb4437e/rusoto/services/dynamodb/src/generated.rs#L2971 4 | #[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)] 5 | pub struct UpdateOutput { 6 | pub consumed_capacity: Option, 7 | pub item: Option, 8 | pub item_collection_metrics: Option, 9 | } 10 | -------------------------------------------------------------------------------- /raiden/src/retry/mod.rs: -------------------------------------------------------------------------------- 1 | use again::Condition; 2 | pub use again::RetryPolicy; 3 | use std::time::Duration; 4 | 5 | use super::RaidenError; 6 | 7 | #[derive(Clone, Copy, PartialEq, Debug)] 8 | pub enum Policy { 9 | None, 10 | Limit(usize), 11 | Pause(usize, Duration), 12 | Exponential(usize, Duration), 13 | } 14 | 15 | impl Default for Policy { 16 | fn default() -> Self { 17 | Policy::Exponential(5, Duration::from_millis(50)) 18 | } 19 | } 20 | 21 | #[allow(clippy::from_over_into)] 22 | impl Into for Policy { 23 | fn into(self) -> RetryPolicy { 24 | match self { 25 | Policy::None => RetryPolicy::default().with_max_retries(0), 26 | Policy::Limit(times) => RetryPolicy::default() 27 | .with_max_retries(times) 28 | .with_jitter(true), 29 | Policy::Pause(times, duration) => RetryPolicy::fixed(duration) 30 | .with_max_retries(times) 31 | .with_jitter(true), 32 | Policy::Exponential(times, duration) => RetryPolicy::exponential(duration) 33 | .with_max_retries(times) 34 | .with_jitter(true), 35 | } 36 | } 37 | } 38 | 39 | pub struct RetryCondition { 40 | pub strategy: Box, 41 | } 42 | 43 | impl RetryCondition { 44 | pub fn new() -> Self { 45 | Default::default() 46 | } 47 | } 48 | 49 | impl Default for RetryCondition { 50 | fn default() -> Self { 51 | Self { 52 | strategy: Box::new(DefaultRetryStrategy), 53 | } 54 | } 55 | } 56 | 57 | impl Condition for &RetryCondition { 58 | fn is_retryable(&mut self, error: &RaidenError) -> bool { 59 | self.strategy.should_retry(error) 60 | } 61 | } 62 | 63 | pub trait RetryStrategy { 64 | fn should_retry(&self, error: &RaidenError) -> bool; 65 | fn policy(&self) -> Policy; 66 | } 67 | 68 | #[derive(Clone, Copy, PartialEq, Debug)] 69 | pub struct DefaultRetryStrategy; 70 | 71 | impl RetryStrategy for DefaultRetryStrategy { 72 | fn should_retry(&self, error: &RaidenError) -> bool { 73 | matches!( 74 | error, 75 | RaidenError::InternalServerError(_) 76 | | RaidenError::ProvisionedThroughputExceeded(_) 77 | | RaidenError::RequestLimitExceeded(_) 78 | // Sometimes I ran into `HttpDispatchError { message: "Error during dispatch: connection closed before message completed" }` and 79 | // CredentialsError { message: "Request ID: Some(\"xxx\") Body: \n \n Sender\n Throttling\n Rate exceeded\n \n xxx\n\n" } 80 | | RaidenError::HttpDispatch(_) 81 | | RaidenError::Credentials(_) 82 | // INFO: For now, return true, when unknown error detected. 83 | // This is because, sometimes throttlingException is included in unknown error. 84 | // please make more rigorous classification of errors. 85 | | RaidenError::Unknown(_) 86 | ) 87 | } 88 | 89 | fn policy(&self) -> Policy { 90 | Policy::default() 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /raiden/src/types.rs: -------------------------------------------------------------------------------- 1 | pub type Placeholder = String; 2 | -------------------------------------------------------------------------------- /raiden/src/update_expression/add.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub struct Add { 4 | target: T, 5 | } 6 | 7 | pub struct AddExpressionFilled { 8 | target: T, 9 | value: (super::Placeholder, super::AttributeValue), 10 | } 11 | 12 | impl Add { 13 | pub fn new(target: T) -> Self { 14 | Self { target } 15 | } 16 | 17 | pub fn value(self, value: impl super::IntoAttribute) -> AddExpressionFilled { 18 | let placeholder = format!(":value{}", super::generate_value_id()); 19 | let value = (placeholder, value.into_attr()); 20 | let Add { target } = self; 21 | AddExpressionFilled:: { target, value } 22 | } 23 | } 24 | 25 | impl UpdateAddExpressionBuilder for AddExpressionFilled { 26 | fn build(self) -> (String, super::AttributeNames, super::AttributeValues) { 27 | let attr = self.target.into_attr_name(); 28 | let attr_name = format!("#{attr}"); 29 | 30 | let mut names: super::AttributeNames = std::collections::HashMap::new(); 31 | let mut values: super::AttributeValues = std::collections::HashMap::new(); 32 | let (placeholder, value) = self.value; 33 | 34 | // See. https://github.com/raiden-rs/raiden/issues/57 35 | // https://github.com/raiden-rs/raiden/issues/58 36 | if value.null.is_some() || value == AttributeValue::default() { 37 | return ("".to_owned(), names, values); 38 | } 39 | 40 | names.insert(attr_name.clone(), attr); 41 | let expression = format!("{attr_name} {placeholder}"); 42 | values.insert(placeholder, value); 43 | (expression, names, values) 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod tests { 49 | 50 | use super::*; 51 | use crate::*; 52 | 53 | #[cfg(test)] 54 | use pretty_assertions::assert_eq; 55 | 56 | #[derive(Debug, Clone, Copy, PartialEq)] 57 | enum UserAttrNames { 58 | Age, 59 | } 60 | 61 | impl super::super::IntoAttrName for UserAttrNames { 62 | fn into_attr_name(self) -> String { 63 | match self { 64 | UserAttrNames::Age => "age".to_owned(), 65 | } 66 | } 67 | } 68 | 69 | #[test] 70 | fn test_add_value_expression() { 71 | crate::value_id::reset_value_id(); 72 | let (expression, names, values) = Add::new(UserAttrNames::Age).value(42).build(); 73 | let mut expected_names = std::collections::HashMap::new(); 74 | let mut expected_values = std::collections::HashMap::new(); 75 | expected_names.insert("#age".to_owned(), "age".to_owned()); 76 | expected_values.insert(":value0".to_owned(), 42.into_attr()); 77 | assert_eq!(expression, "#age :value0".to_owned(),); 78 | assert_eq!(names, expected_names); 79 | assert_eq!(values, expected_values); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /raiden/src/update_expression/delete.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub struct Delete { 4 | target: T, 5 | } 6 | 7 | pub struct DeleteExpressionFilled { 8 | target: T, 9 | value: (super::Placeholder, super::AttributeValue), 10 | } 11 | 12 | impl Delete { 13 | pub fn new(target: T) -> Self { 14 | Self { target } 15 | } 16 | 17 | pub fn value(self, value: impl super::IntoAttribute) -> DeleteExpressionFilled { 18 | let placeholder = format!(":value{}", super::generate_value_id()); 19 | let value = (placeholder, value.into_attr()); 20 | let Delete { target } = self; 21 | DeleteExpressionFilled:: { target, value } 22 | } 23 | } 24 | 25 | impl UpdateDeleteExpressionBuilder for DeleteExpressionFilled { 26 | fn build(self) -> (String, super::AttributeNames, super::AttributeValues) { 27 | let attr = self.target.into_attr_name(); 28 | let attr_name = format!("#{attr}"); 29 | 30 | let mut names: super::AttributeNames = std::collections::HashMap::new(); 31 | let mut values: super::AttributeValues = std::collections::HashMap::new(); 32 | let (placeholder, value) = self.value; 33 | 34 | // See. https://github.com/raiden-rs/raiden/issues/57 35 | // https://github.com/raiden-rs/raiden/issues/58 36 | if value.null.is_some() || value == AttributeValue::default() { 37 | return ("".to_owned(), names, values); 38 | } 39 | 40 | names.insert(attr_name.clone(), attr); 41 | let expression = format!("{attr_name} {placeholder}"); 42 | values.insert(placeholder, value); 43 | (expression, names, values) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /raiden/src/update_expression/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod add; 2 | pub mod delete; 3 | pub mod set; 4 | 5 | pub use add::*; 6 | pub use delete::*; 7 | pub use set::*; 8 | 9 | use super::{ 10 | generate_value_id, AttributeNames, AttributeValue, AttributeValues, IntoAttrName, 11 | IntoAttribute, Placeholder, 12 | }; 13 | 14 | pub enum SetOrRemove { 15 | Set(String, AttributeNames, AttributeValues), 16 | Remove(String, AttributeNames), 17 | } 18 | 19 | pub trait UpdateSetExpressionBuilder { 20 | fn build(self) -> SetOrRemove; 21 | } 22 | 23 | pub trait UpdateAddExpressionBuilder { 24 | fn build(self) -> (String, AttributeNames, AttributeValues); 25 | } 26 | 27 | pub trait UpdateDeleteExpressionBuilder { 28 | fn build(self) -> (String, AttributeNames, AttributeValues); 29 | } 30 | -------------------------------------------------------------------------------- /raiden/src/value_id.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | static VALUE_ID: AtomicUsize = AtomicUsize::new(0); 3 | 4 | pub fn generate_value_id() -> usize { 5 | use std::sync::atomic::Ordering; 6 | 7 | let id = VALUE_ID.load(Ordering::Relaxed); 8 | VALUE_ID.store(id.wrapping_add(1), Ordering::Relaxed); 9 | id 10 | } 11 | 12 | pub fn reset_value_id() { 13 | use std::sync::atomic::Ordering; 14 | 15 | VALUE_ID.store(0, Ordering::Relaxed); 16 | } 17 | -------------------------------------------------------------------------------- /raiden/tests/all/batch_delete.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod partition_key_tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[derive(Raiden, Debug, Clone, PartialEq)] 9 | pub struct BatchDeleteTest0 { 10 | #[raiden(partition_key)] 11 | id: String, 12 | name: String, 13 | } 14 | 15 | #[test] 16 | fn test_batch_delete_item() { 17 | let rt = tokio::runtime::Runtime::new().unwrap(); 18 | async fn example() { 19 | let client = BatchDeleteTest0::client(Region::Custom { 20 | endpoint: "http://localhost:8000".into(), 21 | name: "ap-northeast-1".into(), 22 | }); 23 | 24 | let res: batch_delete::BatchDeleteOutput = client 25 | .batch_delete(vec!["id0", "id1", "id2"]) 26 | .run() 27 | .await 28 | .unwrap(); 29 | assert_eq!( 30 | res, 31 | batch_delete::BatchDeleteOutput { 32 | consumed_capacity: None, 33 | unprocessed_items: vec![], 34 | } 35 | ); 36 | } 37 | rt.block_on(example()); 38 | } 39 | 40 | #[test] 41 | fn test_batch_delete_item_for_stored_and_unstored_keys() { 42 | let rt = tokio::runtime::Runtime::new().unwrap(); 43 | async fn example() { 44 | let client = BatchDeleteTest0::client(Region::Custom { 45 | endpoint: "http://localhost:8000".into(), 46 | name: "ap-northeast-1".into(), 47 | }); 48 | 49 | let res = client.batch_delete(vec!["id3", "unstored"]).run().await; 50 | assert!(res.is_ok()); 51 | } 52 | rt.block_on(example()); 53 | } 54 | 55 | #[test] 56 | fn test_batch_delete_item_for_unstored_keys() { 57 | let rt = tokio::runtime::Runtime::new().unwrap(); 58 | async fn example() { 59 | let client = BatchDeleteTest0::client(Region::Custom { 60 | endpoint: "http://localhost:8000".into(), 61 | name: "ap-northeast-1".into(), 62 | }); 63 | 64 | let res = client 65 | .batch_delete(vec!["unstored0", "unstored1", "unstored2"]) 66 | .run() 67 | .await; 68 | assert!(res.is_ok()); 69 | } 70 | rt.block_on(example()); 71 | } 72 | 73 | #[test] 74 | fn test_batch_delete_over_25_items() { 75 | let rt = tokio::runtime::Runtime::new().unwrap(); 76 | async fn example() { 77 | let client = BatchDeleteTest0::client(Region::Custom { 78 | endpoint: "http://localhost:8000".into(), 79 | name: "ap-northeast-1".into(), 80 | }); 81 | 82 | let res = client 83 | .batch_delete((4..=100).map(|i| format!("id{i}")).collect()) 84 | .run() 85 | .await; 86 | assert!(res.is_ok()); 87 | } 88 | rt.block_on(example()); 89 | } 90 | } 91 | 92 | #[cfg(test)] 93 | mod partition_key_and_sort_key_tests { 94 | #[cfg(test)] 95 | use raiden::*; 96 | 97 | #[derive(Raiden, Debug, Clone, PartialEq)] 98 | pub struct BatchDeleteTest1 { 99 | #[raiden(partition_key)] 100 | id: String, 101 | name: String, 102 | #[raiden(sort_key)] 103 | year: usize, 104 | } 105 | 106 | #[test] 107 | fn test_batch_delete_item_with_sort_key() { 108 | let rt = tokio::runtime::Runtime::new().unwrap(); 109 | async fn example() { 110 | let client = BatchDeleteTest1::client(Region::Custom { 111 | endpoint: "http://localhost:8000".into(), 112 | name: "ap-northeast-1".into(), 113 | }); 114 | 115 | let res = client 116 | .batch_delete(vec![ 117 | ("id0", 1999_usize), 118 | ("id1", 2000_usize), 119 | ("id2", 2001_usize), 120 | ]) 121 | .run() 122 | .await; 123 | assert!(res.is_ok()); 124 | } 125 | rt.block_on(example()); 126 | } 127 | 128 | #[test] 129 | fn test_batch_delete_item_with_sort_key_for_stored_and_unstored_keys() { 130 | let rt = tokio::runtime::Runtime::new().unwrap(); 131 | async fn example() { 132 | let client = BatchDeleteTest1::client(Region::Custom { 133 | endpoint: "http://localhost:8000".into(), 134 | name: "ap-northeast-1".into(), 135 | }); 136 | 137 | let res = client 138 | .batch_delete(vec![("id3", 2002_usize), ("unstored", 2000_usize)]) 139 | .run() 140 | .await; 141 | assert!(res.is_ok()); 142 | } 143 | rt.block_on(example()); 144 | } 145 | 146 | #[test] 147 | fn test_batch_delete_item_with_sort_key_for_unstored_keys() { 148 | let rt = tokio::runtime::Runtime::new().unwrap(); 149 | async fn example() { 150 | let client = BatchDeleteTest1::client(Region::Custom { 151 | endpoint: "http://localhost:8000".into(), 152 | name: "ap-northeast-1".into(), 153 | }); 154 | 155 | let res = client 156 | .batch_delete(vec![ 157 | ("unstored0", 1999_usize), 158 | ("unstore1", 2000_usize), 159 | ("unstored2", 2001_usize), 160 | ]) 161 | .run() 162 | .await; 163 | assert!(res.is_ok()); 164 | } 165 | rt.block_on(example()); 166 | } 167 | 168 | #[test] 169 | fn test_batch_delete_with_sort_key_over_25_items() { 170 | let rt = tokio::runtime::Runtime::new().unwrap(); 171 | async fn example() { 172 | let client = BatchDeleteTest1::client(Region::Custom { 173 | endpoint: "http://localhost:8000".into(), 174 | name: "ap-northeast-1".into(), 175 | }); 176 | 177 | let res = client 178 | .batch_delete( 179 | (4..=100) 180 | .map(|i| (format!("id{i}"), 1999_usize + i)) 181 | .collect(), 182 | ) 183 | .run() 184 | .await; 185 | assert!(res.is_ok()); 186 | } 187 | rt.block_on(example()); 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /raiden/tests/all/condition.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::condition::*; 7 | use raiden::*; 8 | 9 | #[allow(dead_code)] 10 | #[derive(Raiden)] 11 | #[raiden(table_name = "user")] 12 | #[derive(Debug, Clone)] 13 | pub struct User { 14 | #[raiden(partition_key)] 15 | id: String, 16 | name: String, 17 | } 18 | 19 | #[test] 20 | fn test_attribute_exists_condition() { 21 | let cond = User::condition().attr_exists(User::name()); 22 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 23 | let mut expected_names: std::collections::HashMap = 24 | std::collections::HashMap::new(); 25 | expected_names.insert("#name".to_owned(), "name".to_owned()); 26 | assert_eq!(condition_expression, "attribute_exists(#name)".to_owned(),); 27 | assert_eq!(attribute_names, expected_names); 28 | } 29 | 30 | #[test] 31 | fn test_not_attribute_exists_condition() { 32 | let cond = User::condition().not().attr_exists(User::name()); 33 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 34 | let mut expected_names: std::collections::HashMap = 35 | std::collections::HashMap::new(); 36 | expected_names.insert("#name".to_owned(), "name".to_owned()); 37 | assert_eq!( 38 | condition_expression, 39 | "NOT (attribute_exists(#name))".to_owned(), 40 | ); 41 | assert_eq!(attribute_names, expected_names); 42 | } 43 | 44 | #[test] 45 | fn test_attribute_not_exists_condition() { 46 | let cond = User::condition().attr_not_exists(User::name()); 47 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 48 | let mut expected_names: std::collections::HashMap = 49 | std::collections::HashMap::new(); 50 | expected_names.insert("#name".to_owned(), "name".to_owned()); 51 | assert_eq!( 52 | condition_expression, 53 | "attribute_not_exists(#name)".to_owned(), 54 | ); 55 | assert_eq!(attribute_names, expected_names); 56 | } 57 | 58 | #[test] 59 | fn test_attribute_not_exists_condition_and_operator() { 60 | let cond = User::condition() 61 | .attr_not_exists(User::name()) 62 | .and(User::condition().attr_not_exists(User::id())); 63 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 64 | let mut expected_names: std::collections::HashMap = 65 | std::collections::HashMap::new(); 66 | expected_names.insert("#name".to_owned(), "name".to_owned()); 67 | expected_names.insert("#id".to_owned(), "id".to_owned()); 68 | assert_eq!( 69 | condition_expression, 70 | "attribute_not_exists(#name) AND (attribute_not_exists(#id))".to_owned() 71 | ); 72 | assert_eq!(attribute_names, expected_names); 73 | } 74 | 75 | #[test] 76 | fn test_attribute_type_condition() { 77 | let cond = User::condition().attr_type(User::id(), raiden::AttributeType::S); 78 | let (condition_expression, attribute_names, attribute_values) = cond.build(); 79 | let mut expected_names: std::collections::HashMap = 80 | std::collections::HashMap::new(); 81 | expected_names.insert("#id".to_owned(), "id".to_owned()); 82 | let mut expected_values: raiden::AttributeValues = std::collections::HashMap::new(); 83 | expected_values.insert( 84 | ":typeS".to_owned(), 85 | raiden::AttributeValue { 86 | s: Some("S".to_string()), 87 | ..raiden::AttributeValue::default() 88 | }, 89 | ); 90 | 91 | assert_eq!( 92 | condition_expression, 93 | "attribute_type(#id, :typeS)".to_owned() 94 | ); 95 | assert_eq!(attribute_names, expected_names); 96 | assert_eq!(attribute_values, expected_values); 97 | } 98 | 99 | #[test] 100 | fn test_begins_with_condition() { 101 | let cond = User::condition().begins_with(User::name(), "boku"); 102 | let (condition_expression, attribute_names, attribute_values) = cond.build(); 103 | let mut expected_names: raiden::AttributeNames = std::collections::HashMap::new(); 104 | expected_names.insert("#name".to_owned(), "name".to_owned()); 105 | let mut expected_values: raiden::AttributeValues = std::collections::HashMap::new(); 106 | expected_values.insert( 107 | ":begins_with_17d8e2e8233d9a6ae428061cb2cdf226".to_owned(), 108 | raiden::AttributeValue { 109 | s: Some("boku".to_string()), 110 | ..raiden::AttributeValue::default() 111 | }, 112 | ); 113 | 114 | assert_eq!( 115 | condition_expression, 116 | "begins_with(#name, :begins_with_17d8e2e8233d9a6ae428061cb2cdf226)".to_owned() 117 | ); 118 | assert_eq!(attribute_names, expected_names); 119 | assert_eq!(attribute_values, expected_values); 120 | } 121 | 122 | #[test] 123 | fn test_and_condition() { 124 | let cond = User::condition() 125 | .attr_exists(User::name()) 126 | .and(User::condition().attr_exists(User::id())); 127 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 128 | let mut expected_names: std::collections::HashMap = 129 | std::collections::HashMap::new(); 130 | expected_names.insert("#id".to_owned(), "id".to_owned()); 131 | expected_names.insert("#name".to_owned(), "name".to_owned()); 132 | assert_eq!( 133 | condition_expression, 134 | "attribute_exists(#name) AND (attribute_exists(#id))".to_owned(), 135 | ); 136 | assert_eq!(attribute_names, expected_names); 137 | } 138 | 139 | #[test] 140 | fn test_three_and_condition() { 141 | let cond = User::condition().attr_exists(User::name()).and( 142 | User::condition().attr_exists(User::id()).and( 143 | User::condition() 144 | .attr_exists(User::id()) 145 | .and(User::condition().attr_exists(User::id())), 146 | ), 147 | ); 148 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 149 | let mut expected_names: std::collections::HashMap = 150 | std::collections::HashMap::new(); 151 | expected_names.insert("#id".to_owned(), "id".to_owned()); 152 | expected_names.insert("#name".to_owned(), "name".to_owned()); 153 | assert_eq!( 154 | condition_expression, 155 | "attribute_exists(#name) AND (attribute_exists(#id) AND (attribute_exists(#id) AND (attribute_exists(#id))))".to_owned(), 156 | ); 157 | assert_eq!(attribute_names, expected_names); 158 | } 159 | 160 | #[test] 161 | fn test_or_condition() { 162 | let cond = User::condition() 163 | .attr_exists(User::name()) 164 | .or(User::condition().attr_exists(User::id())); 165 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 166 | let mut expected_names: std::collections::HashMap = 167 | std::collections::HashMap::new(); 168 | expected_names.insert("#id".to_owned(), "id".to_owned()); 169 | expected_names.insert("#name".to_owned(), "name".to_owned()); 170 | assert_eq!( 171 | condition_expression, 172 | "attribute_exists(#name) OR (attribute_exists(#id))".to_owned(), 173 | ); 174 | assert_eq!(attribute_names, expected_names); 175 | } 176 | 177 | #[test] 178 | fn test_three_or_condition() { 179 | let cond = User::condition() 180 | .attr_exists(User::name()) 181 | .or(User::condition() 182 | .attr_exists(User::id()) 183 | .or(User::condition() 184 | .attr_exists(User::id()) 185 | .or(User::condition().attr_exists(User::id())))); 186 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 187 | let mut expected_names: std::collections::HashMap = 188 | std::collections::HashMap::new(); 189 | expected_names.insert("#id".to_owned(), "id".to_owned()); 190 | expected_names.insert("#name".to_owned(), "name".to_owned()); 191 | assert_eq!( 192 | condition_expression, 193 | "attribute_exists(#name) OR (attribute_exists(#id) OR (attribute_exists(#id) OR (attribute_exists(#id))))".to_owned(), 194 | ); 195 | assert_eq!(attribute_names, expected_names); 196 | } 197 | 198 | #[test] 199 | fn test_cmp_eq_attr_attr_condition() { 200 | let cond = User::condition().attr(User::name()).eq_attr(User::name()); 201 | let (condition_expression, attribute_names, _attribute_values) = cond.build(); 202 | let mut expected_names: std::collections::HashMap = 203 | std::collections::HashMap::new(); 204 | expected_names.insert("#name".to_owned(), "name".to_owned()); 205 | assert_eq!(condition_expression, "#name = #name".to_owned(),); 206 | assert_eq!(attribute_names, expected_names); 207 | } 208 | 209 | #[test] 210 | fn test_cmp_eq_value_attr_condition() { 211 | reset_value_id(); 212 | let cond = User::condition().value("bokuweb").eq_attr(User::name()); 213 | let (condition_expression, attribute_names, attribute_values) = cond.build(); 214 | let mut expected_names: std::collections::HashMap = 215 | std::collections::HashMap::new(); 216 | expected_names.insert("#name".to_owned(), "name".to_owned()); 217 | let mut expected_values: raiden::AttributeValues = std::collections::HashMap::new(); 218 | expected_values.insert( 219 | ":value0".to_owned(), 220 | raiden::AttributeValue { 221 | s: Some("bokuweb".to_string()), 222 | ..raiden::AttributeValue::default() 223 | }, 224 | ); 225 | assert_eq!(condition_expression, ":value0 = #name".to_owned(),); 226 | assert_eq!(attribute_names, expected_names); 227 | assert_eq!(attribute_values, expected_values); 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /raiden/tests/all/delete.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[allow(dead_code)] 9 | #[derive(Raiden, Debug, Clone)] 10 | pub struct DeleteTest0 { 11 | #[raiden(partition_key)] 12 | id: String, 13 | name: String, 14 | removable: bool, 15 | } 16 | 17 | #[test] 18 | fn test_delete_item() { 19 | let rt = tokio::runtime::Runtime::new().unwrap(); 20 | async fn example() { 21 | let client = DeleteTest0::client(Region::Custom { 22 | endpoint: "http://localhost:8000".into(), 23 | name: "ap-northeast-1".into(), 24 | }); 25 | 26 | let res = client.delete("id0").run().await; 27 | assert_eq!(res.is_ok(), true); 28 | } 29 | rt.block_on(example()); 30 | } 31 | 32 | #[test] 33 | fn test_delete_item_with_unstored_key() { 34 | let rt = tokio::runtime::Runtime::new().unwrap(); 35 | async fn example() { 36 | let client = DeleteTest0::client(Region::Custom { 37 | endpoint: "http://localhost:8000".into(), 38 | name: "ap-northeast-1".into(), 39 | }); 40 | 41 | let res = client.delete("unstored").run().await; 42 | assert_eq!(res.is_ok(), true); 43 | } 44 | rt.block_on(example()); 45 | } 46 | 47 | #[test] 48 | fn test_delete_item_with_condition() { 49 | let rt = tokio::runtime::Runtime::new().unwrap(); 50 | async fn example() { 51 | let client = DeleteTest0::client(Region::Custom { 52 | endpoint: "http://localhost:8000".into(), 53 | name: "ap-northeast-1".into(), 54 | }); 55 | let cond = DeleteTest0::condition() 56 | .attr(DeleteTest0::removable()) 57 | .eq_value(true); 58 | let res = client.delete("id0").condition(cond.clone()).run().await; 59 | assert_eq!(res.is_ok(), false); 60 | let res = client.delete("id1").condition(cond).run().await; 61 | assert_eq!(res.is_ok(), true); 62 | } 63 | rt.block_on(example()); 64 | } 65 | 66 | #[allow(dead_code)] 67 | #[derive(Raiden, Debug, Clone)] 68 | pub struct DeleteTest1 { 69 | #[raiden(partition_key)] 70 | id: String, 71 | name: String, 72 | #[raiden(sort_key)] 73 | year: usize, 74 | } 75 | 76 | #[test] 77 | fn test_delete_item_with_sort_key() { 78 | let rt = tokio::runtime::Runtime::new().unwrap(); 79 | async fn example() { 80 | let client = DeleteTest1::client(Region::Custom { 81 | endpoint: "http://localhost:8000".into(), 82 | name: "ap-northeast-1".into(), 83 | }); 84 | 85 | let res = client.delete("id0", 1999_usize).run().await; 86 | assert_eq!(res.is_ok(), true); 87 | } 88 | rt.block_on(example()); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /raiden/tests/all/key_condition.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[allow(dead_code)] 9 | #[derive(Raiden)] 10 | #[raiden(table_name = "user")] 11 | #[derive(Debug, Clone)] 12 | pub struct User { 13 | #[raiden(partition_key)] 14 | id: String, 15 | name: String, 16 | year: usize, 17 | num: usize, 18 | #[raiden(rename = "Renamed")] 19 | rename: usize, 20 | } 21 | 22 | #[test] 23 | fn test_eq_key_condition() { 24 | reset_value_id(); 25 | let cond = User::key_condition(User::name()).eq("bokuweb"); 26 | let (key_condition, attribute_names, attribute_values) = cond.build(); 27 | let mut expected_names: std::collections::HashMap = 28 | std::collections::HashMap::new(); 29 | expected_names.insert("#name".to_owned(), "name".to_owned()); 30 | let mut expected_values: std::collections::HashMap = 31 | std::collections::HashMap::new(); 32 | expected_values.insert(":value0".to_owned(), "bokuweb".into_attr()); 33 | assert_eq!(key_condition, "#name = :value0".to_owned(),); 34 | assert_eq!(attribute_names, expected_names); 35 | assert_eq!(attribute_values, expected_values); 36 | } 37 | 38 | #[test] 39 | fn test_two_and_key_condition() { 40 | reset_value_id(); 41 | 42 | let cond = User::key_condition(User::name()).eq("bokuweb").and( 43 | User::key_condition(User::year()) 44 | .eq(1999) 45 | .and(User::key_condition(User::num()).eq(100)), 46 | ); 47 | 48 | let (key_condition, attribute_names, attribute_values) = cond.build(); 49 | let mut expected_names: std::collections::HashMap = 50 | std::collections::HashMap::new(); 51 | expected_names.insert("#name".to_owned(), "name".to_owned()); 52 | expected_names.insert("#year".to_owned(), "year".to_owned()); 53 | expected_names.insert("#num".to_owned(), "num".to_owned()); 54 | let mut expected_values: std::collections::HashMap = 55 | std::collections::HashMap::new(); 56 | expected_values.insert(":value0".to_owned(), "bokuweb".into_attr()); 57 | expected_values.insert(":value1".to_owned(), 1999.into_attr()); 58 | expected_values.insert(":value2".to_owned(), 100.into_attr()); 59 | 60 | assert_eq!( 61 | key_condition, 62 | "#name = :value0 AND (#year = :value1 AND (#num = :value2))".to_owned(), 63 | ); 64 | assert_eq!(attribute_names, expected_names); 65 | assert_eq!(attribute_values, expected_values); 66 | } 67 | 68 | #[test] 69 | fn test_begins_with_key_condition() { 70 | reset_value_id(); 71 | 72 | let cond = User::key_condition(User::name()).begins_with("bokuweb"); 73 | let (key_condition, attribute_names, attribute_values) = cond.build(); 74 | let mut expected_names: std::collections::HashMap = 75 | std::collections::HashMap::new(); 76 | expected_names.insert("#name".to_owned(), "name".to_owned()); 77 | let mut expected_values: std::collections::HashMap = 78 | std::collections::HashMap::new(); 79 | expected_values.insert(":value0".to_owned(), "bokuweb".into_attr()); 80 | assert_eq!(key_condition, "begins_with(#name, :value0)".to_owned(),); 81 | assert_eq!(attribute_names, expected_names); 82 | assert_eq!(attribute_values, expected_values); 83 | } 84 | 85 | #[test] 86 | fn test_begins_with_id_and_key_condition() { 87 | reset_value_id(); 88 | 89 | let cond = User::key_condition(User::id()) 90 | .eq("id3") 91 | .and(User::key_condition(User::year()).begins_with("20")); 92 | let (key_condition, _attribute_names, _attribute_values) = cond.build(); 93 | assert_eq!( 94 | key_condition, 95 | "#id = :value0 AND (begins_with(#year, :value1))".to_owned(), 96 | ); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /raiden/tests/all/mod.rs: -------------------------------------------------------------------------------- 1 | mod batch_delete; 2 | mod batch_get; 3 | mod condition; 4 | mod delete; 5 | mod filter_expression; 6 | mod get; 7 | mod key_condition; 8 | mod put; 9 | mod query; 10 | mod rename; 11 | mod rename_all; 12 | mod scan; 13 | mod transact_write; 14 | mod update; 15 | -------------------------------------------------------------------------------- /raiden/tests/all/rename.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[derive(Raiden)] 9 | #[raiden(table_name = "RenameTestData0")] 10 | #[derive(Debug, Clone, PartialEq)] 11 | pub struct RenameTest { 12 | #[raiden(partition_key)] 13 | id: String, 14 | name: String, 15 | #[raiden(rename = "renamed")] 16 | before_rename: usize, 17 | } 18 | 19 | #[derive(Raiden)] 20 | #[raiden(table_name = "RenameTestData0")] 21 | #[derive(Debug, Clone, PartialEq)] 22 | pub struct RenameKeyTest { 23 | #[raiden(partition_key)] 24 | #[raiden(rename = "id")] 25 | before_renamed_id: String, 26 | name: String, 27 | #[raiden(rename = "renamed")] 28 | before_rename: usize, 29 | } 30 | 31 | #[test] 32 | fn test_rename_get_item() { 33 | let rt = tokio::runtime::Runtime::new().unwrap(); 34 | async fn example() { 35 | let client = RenameTest::client(Region::Custom { 36 | endpoint: "http://localhost:8000".into(), 37 | name: "ap-northeast-1".into(), 38 | }); 39 | 40 | let res = client.get("id0").run().await; 41 | assert_eq!( 42 | res.unwrap(), 43 | get::GetOutput { 44 | item: RenameTest { 45 | id: "id0".to_owned(), 46 | name: "john".to_owned(), 47 | before_rename: 1999, 48 | }, 49 | consumed_capacity: None, 50 | } 51 | ); 52 | assert_eq!( 53 | RenameTestAttrNames::Renamed.into_attr_name(), 54 | "renamed".to_owned() 55 | ); 56 | } 57 | rt.block_on(example()); 58 | } 59 | 60 | #[test] 61 | fn test_rename_key_get_item() { 62 | let rt = tokio::runtime::Runtime::new().unwrap(); 63 | async fn example() { 64 | let client = RenameKeyTest::client(Region::Custom { 65 | endpoint: "http://localhost:8000".into(), 66 | name: "ap-northeast-1".into(), 67 | }); 68 | 69 | let res = client.get("id0").run().await; 70 | assert_eq!( 71 | res.unwrap(), 72 | get::GetOutput { 73 | item: RenameKeyTest { 74 | before_renamed_id: "id0".to_owned(), 75 | name: "john".to_owned(), 76 | before_rename: 1999, 77 | }, 78 | consumed_capacity: None, 79 | } 80 | ); 81 | } 82 | rt.block_on(example()); 83 | } 84 | 85 | #[test] 86 | fn test_rename_query() { 87 | let rt = tokio::runtime::Runtime::new().unwrap(); 88 | async fn example() { 89 | let client = RenameTest::client(Region::Custom { 90 | endpoint: "http://localhost:8000".into(), 91 | name: "ap-northeast-1".into(), 92 | }); 93 | 94 | let cond = RenameTest::key_condition(RenameTest::id()).eq("id0"); 95 | let res = client.query().key_condition(cond).run().await; 96 | 97 | assert_eq!( 98 | res.unwrap(), 99 | query::QueryOutput { 100 | consumed_capacity: None, 101 | count: Some(1), 102 | items: vec![RenameTest { 103 | id: "id0".to_owned(), 104 | name: "john".to_owned(), 105 | before_rename: 1999, 106 | },], 107 | next_token: None, 108 | scanned_count: Some(1), 109 | } 110 | ) 111 | } 112 | rt.block_on(example()); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /raiden/tests/all/rename_all.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[derive(Raiden)] 9 | #[raiden(table_name = "RenameAllCamelCaseTestData0")] 10 | #[raiden(rename_all = "camelCase")] 11 | #[derive(Debug, Clone, PartialEq)] 12 | pub struct RenameAllCamelCaseTest { 13 | #[raiden(partition_key)] 14 | partition_key: String, 15 | foo_bar: String, 16 | project_id: usize, 17 | } 18 | 19 | #[test] 20 | fn test_rename_all_camelcase_get() { 21 | let rt = tokio::runtime::Runtime::new().unwrap(); 22 | async fn example() { 23 | let client = RenameAllCamelCaseTest::client(Region::Custom { 24 | endpoint: "http://localhost:8000".into(), 25 | name: "ap-northeast-1".into(), 26 | }); 27 | 28 | let res = client.get("id0").run().await; 29 | assert_eq!( 30 | res.unwrap(), 31 | get::GetOutput { 32 | item: RenameAllCamelCaseTest { 33 | partition_key: "id0".to_owned(), 34 | foo_bar: "john".to_owned(), 35 | project_id: 1, 36 | }, 37 | consumed_capacity: None, 38 | } 39 | ); 40 | } 41 | rt.block_on(example()); 42 | } 43 | 44 | #[derive(Raiden)] 45 | #[raiden(table_name = "RenameAllPascalCaseTestData0")] 46 | #[raiden(rename_all = "PascalCase")] 47 | #[derive(Debug, Clone, PartialEq)] 48 | pub struct RenameAllPascalCaseTest { 49 | #[raiden(partition_key)] 50 | partition_key: String, 51 | foo_bar: String, 52 | project_id: usize, 53 | } 54 | 55 | #[test] 56 | fn test_rename_all_pascalcase_get() { 57 | let rt = tokio::runtime::Runtime::new().unwrap(); 58 | async fn example() { 59 | let client = RenameAllPascalCaseTest::client(Region::Custom { 60 | endpoint: "http://localhost:8000".into(), 61 | name: "ap-northeast-1".into(), 62 | }); 63 | 64 | let res = client.get("id0").run().await; 65 | assert_eq!( 66 | res.unwrap(), 67 | get::GetOutput { 68 | item: RenameAllPascalCaseTest { 69 | partition_key: "id0".to_owned(), 70 | foo_bar: "john".to_owned(), 71 | project_id: 1, 72 | }, 73 | consumed_capacity: None, 74 | } 75 | ); 76 | } 77 | rt.block_on(example()); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /raiden/tests/all/scan.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[derive(Raiden, Debug, PartialEq)] 9 | pub struct ScanTestData0 { 10 | #[raiden(partition_key)] 11 | id: String, 12 | name: String, 13 | year: usize, 14 | num: usize, 15 | } 16 | 17 | #[test] 18 | fn test_scan() { 19 | let rt = tokio::runtime::Runtime::new().unwrap(); 20 | async fn example() { 21 | let client = ScanTestData0::client(Region::Custom { 22 | endpoint: "http://localhost:8000".into(), 23 | name: "ap-northeast-1".into(), 24 | }); 25 | let res = client.scan().run().await; 26 | 27 | assert_eq!( 28 | res.unwrap(), 29 | scan::ScanOutput { 30 | consumed_capacity: None, 31 | count: Some(1), 32 | items: vec![ScanTestData0 { 33 | id: "scanId0".to_owned(), 34 | name: "scanAlice".to_owned(), 35 | year: 2001, 36 | num: 2000 37 | }], 38 | last_evaluated_key: None, 39 | scanned_count: Some(1), 40 | } 41 | ) 42 | } 43 | rt.block_on(example()); 44 | } 45 | 46 | #[derive(Raiden)] 47 | #[raiden(table_name = "LastEvaluateKeyData")] 48 | #[allow(dead_code)] 49 | pub struct Test { 50 | #[raiden(partition_key)] 51 | id: String, 52 | ref_id: String, 53 | long_text: String, 54 | } 55 | 56 | #[test] 57 | fn test_scan_limit_1() { 58 | let rt = tokio::runtime::Runtime::new().unwrap(); 59 | async fn example() { 60 | let client = Test::client(Region::Custom { 61 | endpoint: "http://localhost:8000".into(), 62 | name: "ap-northeast-1".into(), 63 | }); 64 | let res = client.scan().limit(1).run().await; 65 | assert_eq!(res.unwrap().items.len(), 1); 66 | } 67 | rt.block_on(example()); 68 | } 69 | 70 | #[test] 71 | fn test_scan_limit_5() { 72 | let rt = tokio::runtime::Runtime::new().unwrap(); 73 | async fn example() { 74 | let client = Test::client(Region::Custom { 75 | endpoint: "http://localhost:8000".into(), 76 | name: "ap-northeast-1".into(), 77 | }); 78 | let res = client.scan().limit(5).run().await; 79 | assert_eq!(res.unwrap().items.len(), 5); 80 | } 81 | rt.block_on(example()); 82 | } 83 | 84 | #[test] 85 | fn test_scan_no_limit() { 86 | let rt = tokio::runtime::Runtime::new().unwrap(); 87 | async fn example() { 88 | let client = Test::client(Region::Custom { 89 | endpoint: "http://localhost:8000".into(), 90 | name: "ap-northeast-1".into(), 91 | }); 92 | let res = client.scan().run().await; 93 | assert_eq!(res.unwrap().items.len(), 10); 94 | } 95 | rt.block_on(example()); 96 | } 97 | 98 | #[test] 99 | fn test_scan_over_limit() { 100 | let rt = tokio::runtime::Runtime::new().unwrap(); 101 | async fn example() { 102 | let client = Test::client(Region::Custom { 103 | endpoint: "http://localhost:8000".into(), 104 | name: "ap-northeast-1".into(), 105 | }); 106 | let res = client.scan().limit(11).run().await; 107 | assert_eq!(res.unwrap().items.len(), 10); 108 | } 109 | rt.block_on(example()); 110 | } 111 | 112 | #[derive(Raiden)] 113 | #[raiden(table_name = "Project")] 114 | #[raiden(rename_all = "camelCase")] 115 | #[allow(dead_code)] 116 | pub struct Project { 117 | #[raiden(partition_key)] 118 | id: String, 119 | org_id: String, 120 | updated_at: String, 121 | } 122 | 123 | #[test] 124 | fn test_scan_with_renamed() { 125 | let rt = tokio::runtime::Runtime::new().unwrap(); 126 | async fn example() { 127 | let client = Project::client(Region::Custom { 128 | endpoint: "http://localhost:8000".into(), 129 | name: "ap-northeast-1".into(), 130 | }); 131 | let res = client.scan().limit(11).run().await; 132 | assert_eq!(res.unwrap().items.len(), 10); 133 | } 134 | rt.block_on(example()); 135 | } 136 | 137 | #[derive(Raiden, Debug, PartialEq)] 138 | #[raiden(table_name = "ScanTestData0")] 139 | pub struct ScanTestData0a { 140 | #[raiden(partition_key)] 141 | id: String, 142 | name: String, 143 | } 144 | 145 | #[test] 146 | fn test_scan_for_projection_expression() { 147 | let rt = tokio::runtime::Runtime::new().unwrap(); 148 | async fn example() { 149 | let client = ScanTestData0a::client(Region::Custom { 150 | endpoint: "http://localhost:8000".into(), 151 | name: "ap-northeast-1".into(), 152 | }); 153 | let res = client.scan().run().await; 154 | 155 | assert_eq!( 156 | res.unwrap(), 157 | scan::ScanOutput { 158 | consumed_capacity: None, 159 | count: Some(1), 160 | items: vec![ScanTestData0a { 161 | id: "scanId0".to_owned(), 162 | name: "scanAlice".to_owned(), 163 | }], 164 | last_evaluated_key: None, 165 | scanned_count: Some(1), 166 | } 167 | ) 168 | } 169 | rt.block_on(example()); 170 | } 171 | 172 | #[derive(Raiden, Debug, PartialEq)] 173 | #[raiden(table_name = "ScanLargeDataTest")] 174 | pub struct ScanLargeDataTest { 175 | #[raiden(partition_key)] 176 | id: String, 177 | ref_id: String, 178 | name: String, 179 | } 180 | 181 | #[test] 182 | fn should_be_scan_when_the_size_is_1mb_or_larger() { 183 | let rt = tokio::runtime::Runtime::new().unwrap(); 184 | async fn example() { 185 | let client = ScanLargeDataTest::client(Region::Custom { 186 | endpoint: "http://localhost:8000".into(), 187 | name: "ap-northeast-1".into(), 188 | }); 189 | let res = client.scan().run().await; 190 | assert_eq!(res.unwrap().items.len(), 100) 191 | } 192 | rt.block_on(example()); 193 | } 194 | 195 | #[derive(Raiden, Debug)] 196 | #[raiden(table_name = "ScanWithFilterTestData0")] 197 | #[allow(dead_code)] 198 | pub struct Scan { 199 | #[raiden(partition_key)] 200 | id: String, 201 | name: String, 202 | year: usize, 203 | num: usize, 204 | option: Option, 205 | } 206 | 207 | #[tokio::test] 208 | async fn test_simple_filter() { 209 | let client = Scan::client(Region::Custom { 210 | endpoint: "http://localhost:8000".into(), 211 | name: "ap-northeast-1".into(), 212 | }); 213 | let filter = Scan::filter_expression(Scan::num()).eq(1000); 214 | let res = client.scan().filter(filter).run().await.unwrap(); 215 | assert_eq!(res.items.len(), 50); 216 | } 217 | 218 | #[tokio::test] 219 | async fn test_size_filter() { 220 | let client = Scan::client(Region::Custom { 221 | endpoint: "http://localhost:8000".into(), 222 | name: "ap-northeast-1".into(), 223 | }); 224 | let filter = Scan::filter_expression(Scan::name()).size().eq(10); 225 | let res = client.scan().filter(filter).run().await.unwrap(); 226 | assert_eq!(res.items.len(), 10); 227 | } 228 | 229 | #[tokio::test] 230 | async fn test_or_with_contain_filter() { 231 | let client = Scan::client(Region::Custom { 232 | endpoint: "http://localhost:8000".into(), 233 | name: "ap-northeast-1".into(), 234 | }); 235 | let filter = Scan::filter_expression(Scan::num()) 236 | .eq(1000) 237 | .or(Scan::filter_expression(Scan::id()).contains("scanId50")); 238 | let res = client.scan().filter(filter).run().await.unwrap(); 239 | assert_eq!(res.items.len(), 51); 240 | } 241 | 242 | #[tokio::test] 243 | async fn test_attribute_exists_filter() { 244 | let client = Scan::client(Region::Custom { 245 | endpoint: "http://localhost:8000".into(), 246 | name: "ap-northeast-1".into(), 247 | }); 248 | let filter = Scan::filter_expression(Scan::option()).attribute_exists(); 249 | let res = client.scan().filter(filter).run().await.unwrap(); 250 | assert_eq!(res.items.len(), 50); 251 | } 252 | 253 | #[tokio::test] 254 | async fn test_attribute_not_exists_filter() { 255 | let client = Scan::client(Region::Custom { 256 | endpoint: "http://localhost:8000".into(), 257 | name: "ap-northeast-1".into(), 258 | }); 259 | let filter = Scan::filter_expression(Scan::option()).attribute_not_exists(); 260 | let res = client.scan().filter(filter).run().await.unwrap(); 261 | assert_eq!(res.items.len(), 50); 262 | } 263 | 264 | #[tokio::test] 265 | async fn test_attribute_type_filter() { 266 | let client = Scan::client(Region::Custom { 267 | endpoint: "http://localhost:8000".into(), 268 | name: "ap-northeast-1".into(), 269 | }); 270 | let filter = 271 | Scan::filter_expression(Scan::option()).attribute_type(raiden::AttributeType::S); 272 | let res = client.scan().filter(filter).run().await.unwrap(); 273 | assert_eq!(res.items.len(), 50); 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /raiden/tests/all/transact_write.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | 4 | #[cfg(test)] 5 | use pretty_assertions::assert_eq; 6 | use raiden::*; 7 | 8 | #[allow(dead_code)] 9 | #[derive(Raiden)] 10 | #[raiden(table_name = "user")] 11 | #[derive(Debug, Clone)] 12 | pub struct User { 13 | #[raiden(partition_key)] 14 | id: String, 15 | name: String, 16 | } 17 | 18 | #[test] 19 | fn test_minimum_transact_write() { 20 | let rt = tokio::runtime::Runtime::new().unwrap(); 21 | async fn example() { 22 | let tx = ::raiden::WriteTx::new(Region::Custom { 23 | endpoint: "http://localhost:8000".into(), 24 | name: "ap-northeast-1".into(), 25 | }); 26 | let cond = User::condition().attr_not_exists(User::id()); 27 | let input = User::put_item_builder() 28 | .id("testId".to_owned()) 29 | .name("bokuweb".to_owned()) 30 | .build(); 31 | let input2 = User::put_item_builder() 32 | .id("testId2".to_owned()) 33 | .name("bokuweb".to_owned()) 34 | .build(); 35 | assert_eq!( 36 | tx.put(User::put(input).condition(cond)) 37 | .put(User::put(input2)) 38 | .run() 39 | .await 40 | .is_ok(), 41 | true, 42 | ) 43 | } 44 | rt.block_on(example()); 45 | } 46 | 47 | #[test] 48 | fn test_transact_write_put_and_update() { 49 | let rt = tokio::runtime::Runtime::new().unwrap(); 50 | async fn example() { 51 | let tx = ::raiden::WriteTx::new(Region::Custom { 52 | endpoint: "http://localhost:8000".into(), 53 | name: "ap-northeast-1".into(), 54 | }); 55 | let input = User::put_item_builder() 56 | .id("testId".to_owned()) 57 | .name("bokuweb".to_owned()) 58 | .build(); 59 | let set_expression = User::update_expression() 60 | .set(User::name()) 61 | .value("updated!!"); 62 | 63 | let res = tx 64 | .put(User::put(input)) 65 | .update(User::update("testId2").set(set_expression)) 66 | .run() 67 | .await; 68 | assert_eq!(res.is_ok(), true); 69 | } 70 | rt.block_on(example()); 71 | } 72 | 73 | #[test] 74 | fn test_transact_write_with_prefix_suffix() { 75 | let rt = tokio::runtime::Runtime::new().unwrap(); 76 | async fn example() { 77 | let tx = ::raiden::WriteTx::new(Region::Custom { 78 | endpoint: "http://localhost:8000".into(), 79 | name: "ap-northeast-1".into(), 80 | }); 81 | let input = User::put_item_builder() 82 | .id("testId".to_owned()) 83 | .name("bokuweb".to_owned()) 84 | .build(); 85 | assert_eq!( 86 | tx.put( 87 | User::put(input) 88 | .table_prefix("test-") 89 | .table_suffix("-staging"), 90 | ) 91 | .run() 92 | .await 93 | .is_ok(), 94 | true, 95 | ) 96 | } 97 | rt.block_on(example()); 98 | } 99 | 100 | use std::sync::atomic::{AtomicUsize, Ordering}; 101 | 102 | static RETRY_COUNT: AtomicUsize = AtomicUsize::new(0); 103 | struct MyRetryStrategy; 104 | 105 | impl RetryStrategy for MyRetryStrategy { 106 | fn should_retry(&self, _error: &RaidenError) -> bool { 107 | RETRY_COUNT.fetch_add(1, Ordering::Relaxed); 108 | true 109 | } 110 | 111 | fn policy(&self) -> Policy { 112 | Policy::Limit(3) 113 | } 114 | } 115 | 116 | #[test] 117 | fn test_retry() { 118 | let rt = tokio::runtime::Runtime::new().unwrap(); 119 | async fn example() { 120 | let tx = ::raiden::WriteTx::new(Region::Custom { 121 | endpoint: "http://localhost:8000".into(), 122 | name: "ap-northeast-1".into(), 123 | }); 124 | let input = User::put_item_builder() 125 | .id("testId".to_owned()) 126 | .name("bokuweb".to_owned()) 127 | .build(); 128 | assert_eq!( 129 | tx.with_retries(Box::new(MyRetryStrategy)) 130 | .put(User::put(input).table_prefix("unknown")) 131 | .run() 132 | .await 133 | .is_err(), 134 | true, 135 | ) 136 | } 137 | rt.block_on(example()); 138 | assert_eq!(RETRY_COUNT.load(Ordering::Relaxed), 4) 139 | } 140 | 141 | #[derive(Raiden, Debug, Clone, PartialEq)] 142 | pub struct TxDeleteTestData0 { 143 | #[raiden(partition_key)] 144 | id: String, 145 | name: String, 146 | } 147 | 148 | #[test] 149 | fn test_transact_delete_and_put() { 150 | let rt = tokio::runtime::Runtime::new().unwrap(); 151 | async fn example() { 152 | let tx = ::raiden::WriteTx::new(Region::Custom { 153 | endpoint: "http://localhost:8000".into(), 154 | name: "ap-northeast-1".into(), 155 | }); 156 | let input = TxDeleteTestData0::put_item_builder() 157 | .id("testId".to_owned()) 158 | .name("bokuweb".to_owned()) 159 | .build(); 160 | assert_eq!( 161 | tx.put(TxDeleteTestData0::put(input)) 162 | .delete(TxDeleteTestData0::delete("id0")) 163 | .run() 164 | .await 165 | .is_ok(), 166 | true, 167 | ); 168 | 169 | let client = TxDeleteTestData0::client(Region::Custom { 170 | endpoint: "http://localhost:8000".into(), 171 | name: "ap-northeast-1".into(), 172 | }); 173 | let res = client.get("id0").run().await; 174 | assert_eq!( 175 | res.unwrap_err(), 176 | RaidenError::ResourceNotFound("resource not found".to_owned()) 177 | ); 178 | let res = client.get("testId").run().await; 179 | assert_eq!( 180 | res.unwrap().item, 181 | TxDeleteTestData0 { 182 | id: "testId".to_owned(), 183 | name: "bokuweb".to_owned() 184 | } 185 | ); 186 | } 187 | rt.block_on(example()); 188 | } 189 | 190 | #[derive(Raiden, Debug, Clone, PartialEq)] 191 | pub struct TxConditionalCheckTestData0 { 192 | #[raiden(partition_key)] 193 | id: String, 194 | name: String, 195 | } 196 | 197 | #[derive(Raiden, Debug, Clone, PartialEq)] 198 | pub struct TxConditionalCheckTestData1 { 199 | #[raiden(partition_key)] 200 | id: String, 201 | name: String, 202 | } 203 | 204 | #[test] 205 | fn should_succeed_to_put_when_condition_check_ok() { 206 | let rt = tokio::runtime::Runtime::new().unwrap(); 207 | async fn example() { 208 | let tx = ::raiden::WriteTx::new(Region::Custom { 209 | endpoint: "http://localhost:8000".into(), 210 | name: "ap-northeast-1".into(), 211 | }); 212 | let input = TxConditionalCheckTestData0::put_item_builder() 213 | .id("testId0".to_owned()) 214 | .name("bokuweb".to_owned()) 215 | .build(); 216 | let cond = TxConditionalCheckTestData1::condition() 217 | .attr_exists(TxConditionalCheckTestData1::id()); 218 | assert_eq!( 219 | tx.put(TxConditionalCheckTestData0::put(input)) 220 | .condition_check( 221 | TxConditionalCheckTestData1::condition_check("id1").condition(cond) 222 | ) 223 | .run() 224 | .await 225 | .is_ok(), 226 | true, 227 | ); 228 | 229 | let client = TxConditionalCheckTestData0::client(Region::Custom { 230 | endpoint: "http://localhost:8000".into(), 231 | name: "ap-northeast-1".into(), 232 | }); 233 | let res = client.get("testId0").run().await; 234 | assert_eq!( 235 | res.unwrap().item, 236 | TxConditionalCheckTestData0 { 237 | id: "testId0".to_owned(), 238 | name: "bokuweb".to_owned() 239 | } 240 | ); 241 | } 242 | rt.block_on(example()); 243 | } 244 | 245 | #[test] 246 | fn should_fail_to_put_when_condition_check_ng() { 247 | let rt = tokio::runtime::Runtime::new().unwrap(); 248 | async fn example() { 249 | let tx = ::raiden::WriteTx::new(Region::Custom { 250 | endpoint: "http://localhost:8000".into(), 251 | name: "ap-northeast-1".into(), 252 | }); 253 | let input = TxConditionalCheckTestData0::put_item_builder() 254 | .id("testId1".to_owned()) 255 | .name("bokuweb".to_owned()) 256 | .build(); 257 | let cond = TxConditionalCheckTestData1::condition() 258 | .attr_not_exists(TxConditionalCheckTestData1::id()); 259 | 260 | let res = tx 261 | .put(TxConditionalCheckTestData0::put(input)) 262 | .condition_check( 263 | TxConditionalCheckTestData1::condition_check("id1").condition(cond), 264 | ) 265 | .run() 266 | .await; 267 | assert_eq!(res.is_err(), true,); 268 | assert_eq!( 269 | res.unwrap_err(), 270 | RaidenError::TransactionCanceled { 271 | reasons: RaidenTransactionCancellationReasons(vec![ 272 | None, 273 | Some(RaidenTransactionCancellationReason::ConditionalCheckFailed), 274 | ]), 275 | } 276 | ); 277 | } 278 | rt.block_on(example()); 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /raiden/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod all; 2 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | # https://rust-lang.github.io/rustup/overrides.html#the-toolchain-file 2 | [toolchain] 3 | channel = "1.80.0" 4 | components = ["cargo", "clippy", "rustfmt", "rust-analyzer"] 5 | targets = [] 6 | -------------------------------------------------------------------------------- /setup/deps.ts: -------------------------------------------------------------------------------- 1 | export { 2 | AttributeValue, 3 | CreateTableCommand, 4 | DynamoDBClient, 5 | PutItemCommand, 6 | } from "https://esm.sh/@aws-sdk/client-dynamodb@3.48.0"; 7 | 8 | export type { 9 | CreateTableCommandInput, 10 | PutItemCommandInput, 11 | } from "https://esm.sh/@aws-sdk/client-dynamodb@3.48.0"; 12 | -------------------------------------------------------------------------------- /setup/dynamo_util.ts: -------------------------------------------------------------------------------- 1 | import { 2 | AttributeValue, 3 | CreateTableCommand, 4 | CreateTableCommandInput, 5 | DynamoDBClient, 6 | PutItemCommand, 7 | PutItemCommandInput, 8 | } from "./deps.ts"; 9 | 10 | export type CreateAndPut = { 11 | table: CreateTableCommandInput; 12 | items: Array<{ 13 | [key: string]: AttributeValue; 14 | }>; 15 | }; 16 | 17 | export async function createTableAndPutItems( 18 | client: DynamoDBClient, 19 | { table, items }: CreateAndPut, 20 | ) { 21 | await createTable(client, table); 22 | 23 | // NOTE: Running `put` operations concurrently with `Promise.all` would lead to running out of write buffer. 24 | for (const item of items) { 25 | await put(client, { 26 | TableName: table.TableName, 27 | Item: item, 28 | }); 29 | } 30 | } 31 | 32 | export function getCredFromEnv(): { 33 | accessKeyId: string; 34 | secretAccessKey: string; 35 | } { 36 | const accessKeyId = Deno.env.get("AWS_ACCESS_KEY_ID"); 37 | const secretAccessKey = Deno.env.get("AWS_SECRET_ACCESS_KEY"); 38 | 39 | if (accessKeyId === undefined || secretAccessKey === undefined) { 40 | throw new Error( 41 | "Failed to get aws credentials. Make sure environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are set.", 42 | ); 43 | } 44 | 45 | return { accessKeyId, secretAccessKey }; 46 | } 47 | 48 | async function createTable( 49 | client: DynamoDBClient, 50 | input: CreateTableCommandInput, 51 | ) { 52 | await client.send(new CreateTableCommand(input)); 53 | } 54 | 55 | async function put(client: DynamoDBClient, input: PutItemCommandInput) { 56 | await client.send(new PutItemCommand(input)); 57 | } 58 | -------------------------------------------------------------------------------- /setup/fixtures/batch_delete_test_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const batchDeleteTest0: CreateAndPut = { 4 | table: { 5 | TableName: "BatchDeleteTest0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [...Array(101).keys()].map((i) => { 11 | return { 12 | id: { S: `id${i}` }, 13 | name: { S: "bob" }, 14 | }; 15 | }), 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/batch_delete_test_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const batchDeleteTest1: CreateAndPut = { 4 | table: { 5 | TableName: "BatchDeleteTest1", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [...Array(101).keys()].map((i) => { 17 | const year = 1999 + i; 18 | return { 19 | id: { S: `id${i}` }, 20 | name: { S: "alice" }, 21 | year: { N: year.toString() }, 22 | }; 23 | }), 24 | }; 25 | -------------------------------------------------------------------------------- /setup/fixtures/batch_test_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const batchTest0: CreateAndPut = { 4 | table: { 5 | TableName: "BatchTest0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [...Array(101).keys()].map((i) => { 11 | return { 12 | id: { S: `id${i}` }, 13 | name: { S: "bob" }, 14 | }; 15 | }), 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/batch_test_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const batchTest1: CreateAndPut = { 4 | table: { 5 | TableName: "BatchTest1", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 50, WriteCapacityUnits: 50 }, 15 | }, 16 | items: [...Array(250).keys()].map((i) => { 17 | return { 18 | id: { S: `id${i}` }, 19 | name: { S: "bob" }, 20 | year: { N: `${2000 + i}` }, 21 | num: { N: `${i}` }, 22 | }; 23 | }), 24 | }; 25 | -------------------------------------------------------------------------------- /setup/fixtures/batch_test_2.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const batchTest2: CreateAndPut = { 4 | table: { 5 | TableName: "BatchTest2", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 50, WriteCapacityUnits: 50 }, 9 | }, 10 | items: [...Array(250).keys()].map((i) => { 11 | return { 12 | id: { S: `id${i}` }, 13 | name: { S: [...new Array(100000)].map((_) => "test").join("") }, 14 | }; 15 | }), 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/delete_test_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const deleteTest0: CreateAndPut = { 4 | table: { 5 | TableName: "DeleteTest0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | name: { S: "bokuweb" }, 14 | number_set: { NS: ["1"] }, 15 | }, 16 | { 17 | id: { S: "id1" }, 18 | name: { S: "bokuweb" }, 19 | removable: { BOOL: true }, 20 | }, 21 | ], 22 | }; 23 | -------------------------------------------------------------------------------- /setup/fixtures/delete_test_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const deleteTest1: CreateAndPut = { 4 | table: { 5 | TableName: "DeleteTest1", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [ 17 | { 18 | id: { S: "id0" }, 19 | name: { S: "alice" }, 20 | year: { N: "1999" }, 21 | }, 22 | ], 23 | }; 24 | -------------------------------------------------------------------------------- /setup/fixtures/empty_put_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const emptyPutTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "EmptyPutTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/empty_set_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const emptySetTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "EmptySetTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | nset: { NS: ["2000"] }, 14 | sset: { SS: ["Hello"] }, 15 | }, 16 | { 17 | id: { S: "id1" }, 18 | nset: { NS: ["2001"] }, 19 | sset: { SS: ["World"] }, 20 | }, 21 | ], 22 | }; 23 | -------------------------------------------------------------------------------- /setup/fixtures/empty_string_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const emptyStringTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "EmptyStringTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | name: { NULL: true }, 14 | }, 15 | ], 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/float_test.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const floatTest: CreateAndPut = { 4 | table: { 5 | TableName: "FloatTest", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "primary_key" }, 13 | float32: { N: "1.23" }, 14 | float64: { N: "2.34" }, 15 | }, 16 | ], 17 | }; 18 | -------------------------------------------------------------------------------- /setup/fixtures/last_evaluate_key_data.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const lastEvaluateKeyData: CreateAndPut = { 4 | table: { 5 | TableName: "LastEvaluateKeyData", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { AttributeName: "id", AttributeType: "S" }, 9 | { AttributeName: "ref_id", AttributeType: "S" }, 10 | ], 11 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 12 | GlobalSecondaryIndexes: [ 13 | { 14 | IndexName: "testGSI", 15 | KeySchema: [{ AttributeName: "ref_id", KeyType: "HASH" }], 16 | Projection: { 17 | ProjectionType: "ALL", 18 | }, 19 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 20 | }, 21 | ], 22 | }, 23 | items: [...Array(10).keys()].map((i) => { 24 | return { 25 | id: { S: `id${i}` }, 26 | ref_id: { S: `id0` }, 27 | long_text: { S: new Array(100000).fill("Test").join("") }, 28 | }; 29 | }), 30 | }; 31 | -------------------------------------------------------------------------------- /setup/fixtures/project.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const project: CreateAndPut = { 4 | table: { 5 | TableName: "Project", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { AttributeName: "id", AttributeType: "S" }, 9 | { AttributeName: "orgId", AttributeType: "S" }, 10 | { AttributeName: "updatedAt", AttributeType: "S" }, 11 | ], 12 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 13 | GlobalSecondaryIndexes: [ 14 | { 15 | IndexName: "orgIndex", 16 | KeySchema: [ 17 | { 18 | AttributeName: "orgId", 19 | KeyType: "HASH", 20 | }, 21 | { 22 | AttributeName: "updatedAt", 23 | KeyType: "RANGE", 24 | }, 25 | ], 26 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 27 | Projection: { 28 | ProjectionType: "ALL", 29 | }, 30 | }, 31 | ], 32 | }, 33 | items: [...Array(10).keys()].map((i) => { 34 | return { 35 | id: { S: `id${i}` }, 36 | orgId: { S: `myOrg` }, 37 | updatedAt: { S: "2019-03-11T00:00+0900" }, 38 | }; 39 | }), 40 | }; 41 | -------------------------------------------------------------------------------- /setup/fixtures/put_item_condition_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const putItemConditionData0: CreateAndPut = { 4 | table: { 5 | TableName: "PutItemConditionData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | name: { S: "bokuweb" }, 14 | num: { N: "1000" }, 15 | }, 16 | ], 17 | }; 18 | -------------------------------------------------------------------------------- /setup/fixtures/query_large_data_test.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const queryLargeDataTest: CreateAndPut = { 4 | table: { 5 | TableName: "QueryLargeDataTest", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { AttributeName: "id", AttributeType: "S" }, 9 | { AttributeName: "ref_id", AttributeType: "S" }, 10 | ], 11 | ProvisionedThroughput: { ReadCapacityUnits: 50, WriteCapacityUnits: 50 }, 12 | GlobalSecondaryIndexes: [ 13 | { 14 | IndexName: "testGSI", 15 | KeySchema: [{ AttributeName: "ref_id", KeyType: "HASH" }], 16 | Projection: { 17 | ProjectionType: "ALL", 18 | }, 19 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 20 | }, 21 | ], 22 | }, 23 | items: [...Array(100).keys()].map((i) => { 24 | return { 25 | id: { S: `id${i}` }, 26 | ref_id: { S: "ref" }, 27 | name: { S: [...new Array(100000)].map((_) => "test").join("") }, // 400KB 28 | }; 29 | }), 30 | }; 31 | -------------------------------------------------------------------------------- /setup/fixtures/query_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const queryTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "QueryTestData0", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [ 17 | { 18 | id: { S: "id0" }, 19 | name: { S: "john" }, 20 | year: { N: "1999" }, 21 | num: { N: "1000" }, 22 | }, 23 | { 24 | id: { S: "id0" }, 25 | name: { S: "john" }, 26 | year: { N: "2000" }, 27 | num: { N: "2000" }, 28 | }, 29 | { 30 | id: { S: "id1" }, 31 | name: { S: "bob" }, 32 | year: { N: "2003" }, 33 | num: { N: "300" }, 34 | }, 35 | { 36 | id: { S: "id2" }, 37 | name: { S: "alice" }, 38 | year: { N: "2013" }, 39 | num: { N: "4000" }, 40 | }, 41 | { 42 | id: { S: "id3" }, 43 | name: { S: "bar0" }, 44 | year: { N: "1987" }, 45 | num: { N: "4000" }, 46 | }, 47 | { 48 | id: { S: "id3" }, 49 | name: { S: "bar1" }, 50 | year: { N: "2000" }, 51 | num: { N: "4000" }, 52 | }, 53 | { 54 | id: { S: "id3" }, 55 | name: { S: "bar2" }, 56 | year: { N: "2029" }, 57 | num: { N: "4000" }, 58 | }, 59 | { 60 | id: { S: "id4" }, 61 | name: { S: "bar0" }, 62 | year: { N: "2029" }, 63 | num: { N: "4000" }, 64 | }, 65 | { 66 | id: { S: "id4" }, 67 | name: { S: "bar1" }, 68 | year: { N: "2000" }, 69 | num: { N: "4000" }, 70 | option: { S: "option2" }, 71 | }, 72 | { 73 | id: { S: "id4" }, 74 | name: { S: "bob" }, 75 | year: { N: "1999" }, 76 | num: { N: "4000" }, 77 | option: { S: "option2" }, 78 | }, 79 | { 80 | id: { S: "id5" }, 81 | name: { S: "bob" }, 82 | year: { N: "1999" }, 83 | num: { N: "4000" }, 84 | option: { S: "option2" }, 85 | }, 86 | { 87 | id: { S: "id5" }, 88 | name: { S: "bob0" }, 89 | year: { N: "2000" }, 90 | num: { N: "4000" }, 91 | option: { S: "option2" }, 92 | }, 93 | { 94 | id: { S: "id5" }, 95 | name: { S: "bob1" }, 96 | year: { N: "3000" }, 97 | num: { N: "4000" }, 98 | option: { S: "option2" }, 99 | }, 100 | ], 101 | }; 102 | -------------------------------------------------------------------------------- /setup/fixtures/query_test_data_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const queryTestData1: CreateAndPut = { 4 | table: { 5 | TableName: "QueryTestData1", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "name", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "name", AttributeType: "S" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [ 17 | { id: { S: "id0" }, name: { S: "john" } }, 18 | { id: { S: "id0" }, name: { S: "jack" } }, 19 | { id: { S: "id0" }, name: { S: "bob" } }, 20 | ], 21 | }; 22 | -------------------------------------------------------------------------------- /setup/fixtures/rename_all_camel_case_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const renameAllCamelCaseTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "RenameAllCamelCaseTestData0", 6 | KeySchema: [{ AttributeName: "partitionKey", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { 9 | AttributeName: "partitionKey", 10 | AttributeType: "S", 11 | }, 12 | ], 13 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 14 | }, 15 | items: [ 16 | { 17 | partitionKey: { S: "id0" }, 18 | fooBar: { S: "john" }, 19 | projectId: { N: "1" }, 20 | }, 21 | { partitionKey: { S: "id1" }, fooBar: { S: "bob" }, projectId: { N: "2" } }, 22 | ], 23 | }; 24 | -------------------------------------------------------------------------------- /setup/fixtures/rename_all_pascal_case_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const renameAllPascalCaseTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "RenameAllPascalCaseTestData0", 6 | KeySchema: [{ AttributeName: "PartitionKey", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { 9 | AttributeName: "PartitionKey", 10 | AttributeType: "S", 11 | }, 12 | ], 13 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 14 | }, 15 | items: [ 16 | { 17 | PartitionKey: { S: "id0" }, 18 | FooBar: { S: "john" }, 19 | ProjectId: { N: "1" }, 20 | }, 21 | { PartitionKey: { S: "id1" }, FooBar: { S: "bob" }, ProjectId: { N: "2" } }, 22 | ], 23 | }; 24 | -------------------------------------------------------------------------------- /setup/fixtures/rename_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const renameTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "RenameTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { id: { S: "id0" }, name: { S: "john" }, renamed: { N: "1999" } }, 12 | { id: { S: "id1" }, name: { S: "bob" }, renamed: { N: "2003" } }, 13 | ], 14 | }; 15 | -------------------------------------------------------------------------------- /setup/fixtures/reserved_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const reservedTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "ReservedTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [{ id: { S: "id0" }, type: { S: "reserved" } }], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/scan_large_data_test.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const scanLargeDataTest: CreateAndPut = { 4 | table: { 5 | TableName: "ScanLargeDataTest", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 50, WriteCapacityUnits: 50 }, 9 | }, 10 | items: [...Array(100).keys()].map((i) => { 11 | return { 12 | id: { S: `id${i}` }, 13 | ref_id: { S: "ref" }, 14 | name: { S: [...new Array(100000)].map((_) => "test").join("") }, // 400KB 15 | }; 16 | }), 17 | }; 18 | -------------------------------------------------------------------------------- /setup/fixtures/scan_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const scanTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "ScanTestData0", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [ 17 | { 18 | id: { S: "scanId0" }, 19 | name: { S: "scanAlice" }, 20 | year: { N: "2001" }, 21 | num: { N: "2000" }, 22 | }, 23 | ], 24 | }; 25 | -------------------------------------------------------------------------------- /setup/fixtures/scan_with_filter_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const scanWithFilterTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "ScanWithFilterTestData0", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "year", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "year", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [...new Array(100)].map((_, i) => { 17 | return { 18 | id: { S: `scanId${i}` }, 19 | name: { S: `scanAlice${i}` }, 20 | year: { N: "2001" }, 21 | num: { N: i % 2 ? "1000" : "2000" }, 22 | option: i % 2 ? { S: `option${i}` } : null, 23 | }; 24 | }), 25 | }; 26 | -------------------------------------------------------------------------------- /setup/fixtures/test_user_staging.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const testUserStaging: CreateAndPut = { 4 | table: { 5 | TableName: "test-user-staging", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [ 8 | { AttributeName: "id", AttributeType: "S" }, 9 | { AttributeName: "orgId", AttributeType: "S" }, 10 | { AttributeName: "updatedAt", AttributeType: "S" }, 11 | ], 12 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 13 | GlobalSecondaryIndexes: [ 14 | { 15 | IndexName: "orgIndex", 16 | KeySchema: [ 17 | { 18 | AttributeName: "orgId", 19 | KeyType: "HASH", 20 | }, 21 | { 22 | AttributeName: "updatedAt", 23 | KeyType: "RANGE", 24 | }, 25 | ], 26 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 27 | Projection: { 28 | ProjectionType: "ALL", 29 | }, 30 | }, 31 | ], 32 | }, 33 | items: [], 34 | }; 35 | -------------------------------------------------------------------------------- /setup/fixtures/tx_conditional_check_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const txConditionalCheckTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "TxConditionalCheckTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { id: { S: "id0" }, name: { S: "john" }, renamed: { N: "1999" } }, 12 | { id: { S: "id1" }, name: { S: "bob" }, renamed: { N: "2003" } }, 13 | ], 14 | }; 15 | -------------------------------------------------------------------------------- /setup/fixtures/tx_conditional_check_test_data_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const txConditionalCheckTestData1: CreateAndPut = { 4 | table: { 5 | TableName: "TxConditionalCheckTestData1", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [{ id: { S: "id1" }, name: { S: "world" } }], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/tx_delete_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const txDeleteTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "TxDeleteTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [{ id: { S: "id0" }, name: { S: "hello" } }], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/update_add_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateAddTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateAddTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | sset: { SS: ["foo", "bar"] }, 14 | }, 15 | 16 | { 17 | id: { S: "id1" }, 18 | sset: { NULL: true }, 19 | }, 20 | 21 | { 22 | id: { S: "id2" }, 23 | }, 24 | ], 25 | }; 26 | -------------------------------------------------------------------------------- /setup/fixtures/update_delete_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateDeleteTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateDeleteTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | sset: { SS: ["foo", "bar"] }, 14 | }, 15 | ], 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/update_remove_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateRemoveTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateRemoveTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [{ id: { S: "id1" }, name: { S: "world" } }, { id: { S: "id2" } }], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/update_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | name: { S: "john" }, 14 | age: { N: "12" }, 15 | num: { N: "1" }, 16 | }, 17 | { id: { S: "id1" }, name: { S: "bob" }, age: { N: "18" }, num: { N: "1" } }, 18 | ], 19 | }; 20 | -------------------------------------------------------------------------------- /setup/fixtures/update_test_data_1.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateTestData1: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateTestData1", 6 | KeySchema: [ 7 | { AttributeName: "id", KeyType: "HASH" }, 8 | { AttributeName: "age", KeyType: "RANGE" }, 9 | ], 10 | AttributeDefinitions: [ 11 | { AttributeName: "id", AttributeType: "S" }, 12 | { AttributeName: "age", AttributeType: "N" }, 13 | ], 14 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 15 | }, 16 | items: [{ id: { S: "id0" }, name: { S: "john" }, age: { N: "36" } }], 17 | }; 18 | -------------------------------------------------------------------------------- /setup/fixtures/update_with_contains_in_set_condition.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const updateWithContainsInSetCondition: CreateAndPut = { 4 | table: { 5 | TableName: "UpdateWithContainsInSetCondition", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | name: { S: "bokuweb" }, 14 | sset: { SS: ["Hello"] }, 15 | }, 16 | ], 17 | }; 18 | -------------------------------------------------------------------------------- /setup/fixtures/use_default_for_null_data.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const useDefaultForNull: CreateAndPut = { 4 | table: { 5 | TableName: "UseDefaultForNull", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "id0" }, 13 | flag: { NULL: true }, 14 | }, 15 | ], 16 | }; 17 | -------------------------------------------------------------------------------- /setup/fixtures/use_default_test_data_0.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const useDefaultTestData0: CreateAndPut = { 4 | table: { 5 | TableName: "UseDefaultTestData0", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [{ id: { S: "id0" } }], 11 | }; 12 | -------------------------------------------------------------------------------- /setup/fixtures/user.ts: -------------------------------------------------------------------------------- 1 | import type { CreateAndPut } from "../dynamo_util.ts"; 2 | 3 | export const user: CreateAndPut = { 4 | table: { 5 | TableName: "user", 6 | KeySchema: [{ AttributeName: "id", KeyType: "HASH" }], 7 | AttributeDefinitions: [{ AttributeName: "id", AttributeType: "S" }], 8 | ProvisionedThroughput: { ReadCapacityUnits: 5, WriteCapacityUnits: 5 }, 9 | }, 10 | items: [ 11 | { 12 | id: { S: "user_primary_key" }, 13 | name: { S: "bokuweb" }, 14 | num_usize: { N: "42" }, 15 | num_u8: { N: "255" }, 16 | num_i8: { N: "-127" }, 17 | option_i16: { N: "-1" }, 18 | string_set: { SS: ["Hello"] }, 19 | number_set: { NS: ["1"] }, 20 | }, 21 | { 22 | id: { S: "id0" }, 23 | name: { S: "bokuweb" }, 24 | num: { N: "1000" }, 25 | }, 26 | ], 27 | }; 28 | -------------------------------------------------------------------------------- /setup/setup.ts: -------------------------------------------------------------------------------- 1 | import { DynamoDBClient } from "./deps.ts"; 2 | import { createTableAndPutItems, getCredFromEnv } from "./dynamo_util.ts"; 3 | import { user } from "./fixtures/user.ts"; 4 | import { floatTest } from "./fixtures/float_test.ts"; 5 | import { queryTestData0 } from "./fixtures/query_test_data_0.ts"; 6 | import { queryTestData1 } from "./fixtures/query_test_data_1.ts"; 7 | import { renameTestData0 } from "./fixtures/rename_test_data_0.ts"; 8 | import { renameAllCamelCaseTestData0 } from "./fixtures/rename_all_camel_case_test_data_0.ts"; 9 | import { renameAllPascalCaseTestData0 } from "./fixtures/rename_all_pascal_case_test_data_0.ts"; 10 | import { updateTestData0 } from "./fixtures/update_test_data_0.ts"; 11 | import { updateTestData1 } from "./fixtures/update_test_data_1.ts"; 12 | import { putItemConditionData0 } from "./fixtures/put_item_condition_data_0.ts"; 13 | import { lastEvaluateKeyData } from "./fixtures/last_evaluate_key_data.ts"; 14 | import { project } from "./fixtures/project.ts"; 15 | import { batchTest0 } from "./fixtures/batch_test_0.ts"; 16 | import { batchTest1 } from "./fixtures/batch_test_1.ts"; 17 | import { batchTest2 } from "./fixtures/batch_test_2.ts"; 18 | import { batchDeleteTest0 } from "./fixtures/batch_delete_test_0.ts"; 19 | import { batchDeleteTest1 } from "./fixtures/batch_delete_test_1.ts"; 20 | import { testUserStaging } from "./fixtures/test_user_staging.ts"; 21 | import { deleteTest0 } from "./fixtures/delete_test_0.ts"; 22 | import { deleteTest1 } from "./fixtures/delete_test_1.ts"; 23 | import { scanTestData0 } from "./fixtures/scan_test_data_0.ts"; 24 | import { scanWithFilterTestData0 } from "./fixtures/scan_with_filter_test_data_0.ts"; 25 | import { emptySetTestData0 } from "./fixtures/empty_set_test_data_0.ts"; 26 | import { emptyStringTestData0 } from "./fixtures/empty_string_test_data_0.ts"; 27 | import { useDefaultForNull } from "./fixtures/use_default_for_null_data.ts"; 28 | import { updateDeleteTestData0 } from "./fixtures/update_delete_test_data_0.ts"; 29 | import { updateAddTestData0 } from "./fixtures/update_add_test_data_0.ts"; 30 | import { emptyPutTestData0 } from "./fixtures/empty_put_test_data_0.ts"; 31 | import { reservedTestData0 } from "./fixtures/reserved_test_data_0.ts"; 32 | import { useDefaultTestData0 } from "./fixtures/use_default_test_data_0.ts"; 33 | import { txDeleteTestData0 } from "./fixtures/tx_delete_test_data_0.ts"; 34 | import { txConditionalCheckTestData0 } from "./fixtures/tx_conditional_check_test_data_0.ts"; 35 | import { txConditionalCheckTestData1 } from "./fixtures/tx_conditional_check_test_data_1.ts"; 36 | import { updateRemoveTestData0 } from "./fixtures/update_remove_test_data_0.ts"; 37 | import { updateWithContainsInSetCondition } from "./fixtures/update_with_contains_in_set_condition.ts"; 38 | import { queryLargeDataTest } from "./fixtures/query_large_data_test.ts"; 39 | import { scanLargeDataTest } from "./fixtures/scan_large_data_test.ts"; 40 | 41 | const client = new DynamoDBClient({ 42 | region: "ap-northeast-1", 43 | endpoint: "http://localhost:8000", 44 | credentials: getCredFromEnv(), 45 | }); 46 | 47 | const data = [ 48 | user, 49 | floatTest, 50 | queryTestData0, 51 | queryTestData1, 52 | renameTestData0, 53 | renameAllCamelCaseTestData0, 54 | renameAllPascalCaseTestData0, 55 | updateTestData0, 56 | updateTestData1, 57 | putItemConditionData0, 58 | lastEvaluateKeyData, 59 | project, 60 | batchTest0, 61 | batchTest1, 62 | batchTest2, 63 | batchDeleteTest0, 64 | batchDeleteTest1, 65 | testUserStaging, 66 | deleteTest0, 67 | deleteTest1, 68 | scanTestData0, 69 | scanWithFilterTestData0, 70 | emptySetTestData0, 71 | emptyStringTestData0, 72 | updateDeleteTestData0, 73 | updateAddTestData0, 74 | emptyPutTestData0, 75 | reservedTestData0, 76 | useDefaultTestData0, 77 | txDeleteTestData0, 78 | txConditionalCheckTestData0, 79 | txConditionalCheckTestData1, 80 | updateRemoveTestData0, 81 | updateWithContainsInSetCondition, 82 | queryLargeDataTest, 83 | scanLargeDataTest, 84 | useDefaultForNull, 85 | ]; 86 | 87 | // NOTE: Running these operations concurrently with `Promise.all` would lead to running out of write buffer. 88 | for (const d of data) { 89 | console.log(`Processing ${d.table.TableName}...`); 90 | await createTableAndPutItems(client, d); 91 | } 92 | -------------------------------------------------------------------------------- /yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | --------------------------------------------------------------------------------