├── .formatter.exs ├── .github ├── dependabot.yml └── workflows │ └── on-push.yml ├── .gitignore ├── .tool-versions ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── config ├── config.exs ├── dev.exs └── test.exs ├── dialyzer_ignore.exs ├── lib ├── ecto_adapters_dynamodb.ex └── ecto_adapters_dynamodb │ ├── application.ex │ ├── cache.ex │ ├── concurrent_batch.ex │ ├── dynamodbset.ex │ ├── info.ex │ ├── migration.ex │ ├── query.ex │ ├── query_info.ex │ ├── recursive_fetch.ex │ └── repo_config.ex ├── mix.exs ├── mix.lock ├── test ├── adapter_property_test.exs ├── adapter_state_eqc_test.exs ├── ecto_adapters_dynamodb │ ├── dynamodbset_test.exs │ ├── info_test.exs │ ├── migration_test.exs │ └── query_test.exs ├── ecto_adapters_dynamodb_test.exs ├── integration │ ├── ex_aws_dynamo_test.exs │ └── jason_test.exs ├── priv │ └── test_repo │ │ └── migrations │ │ ├── 20190319220335_add_dog_table.exs │ │ ├── 20190319220346_add_cat_table.exs │ │ ├── 20190320432123_add_name_index_to_dog.exs │ │ ├── 20190321234543_add_name_index_to_cat.exs │ │ ├── 20190321543456_add_rabbit_table.exs │ │ ├── 20190322123432_modify_cat_name_index.exs │ │ ├── 20190401123234_add_redundant_name_index_to_cat.exs │ │ ├── 20190401654345_add_billing_mode_test_table.exs │ │ ├── 20190401765456_add_name_index_to_billing_mode_test.exs │ │ ├── 20200519112501_add_ttl_to_cat.exs │ │ ├── 20200519112601_remove_ttl_from_dog.exs │ │ ├── 20220323102500_add_stream_table.exs │ │ ├── 20220323103900_add_stream_to_cat_table.exs │ │ └── 20220323135100_remove_stream_from_table.exs ├── support │ ├── test_repo.ex │ └── test_schema.ex └── test_helper.exs └── upgrade_guides ├── version_1_upgrade_guide.md ├── version_2_upgrade_guide.md └── version_3_upgrade_guide.md /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | inputs: [ 3 | "mix.exs", 4 | "config/*.exs", 5 | "test/**/*.{exs,ex}", 6 | "lib/**/*.ex" 7 | ], 8 | ] 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: mix 5 | directory: "/" 6 | schedule: 7 | interval: daily 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.github/workflows/on-push.yml: -------------------------------------------------------------------------------- 1 | name: on-push 2 | on: [push] 3 | env: 4 | MIX_ENV: test 5 | jobs: 6 | build: 7 | runs-on: ubuntu-24.04 8 | # See https://hexdocs.pm/elixir/compatibility-and-deprecations.html#compatibility-between-elixir-and-erlang-otp 9 | strategy: 10 | matrix: 11 | include: 12 | - pair: 13 | otp: 27.x 14 | elixir: 1.17.x 15 | lint: lint 16 | 17 | - pair: 18 | otp: 26.x 19 | elixir: 1.17.x 20 | - pair: 21 | otp: 26.x 22 | elixir: 1.16.x 23 | - pair: 24 | otp: 26.x 25 | elixir: 1.15.x 26 | 27 | - pair: 28 | otp: 25.x 29 | elixir: 1.17.x 30 | - pair: 31 | otp: 25.x 32 | elixir: 1.16.x 33 | - pair: 34 | otp: 25.x 35 | elixir: 1.15.x 36 | - pair: 37 | otp: 25.x 38 | elixir: 1.14.x 39 | 40 | - pair: 41 | otp: 24.x 42 | elixir: 1.16.x 43 | - pair: 44 | otp: 24.x 45 | elixir: 1.15.x 46 | - pair: 47 | otp: 24.x 48 | elixir: 1.14.x 49 | - pair: 50 | otp: 24.x 51 | elixir: 1.13.x 52 | steps: 53 | - uses: actions/checkout@v4 54 | name: "Checkout" 55 | - uses: erlef/setup-beam@v1 56 | name: "Setup Elixir" 57 | with: 58 | otp-version: ${{matrix.pair.otp}} 59 | elixir-version: ${{matrix.pair.elixir}} 60 | - uses: rrainn/dynamodb-action@v4.0.0 61 | - run: mix deps.get 62 | - run: mix compile 63 | - run: mix format --check-formatted 64 | if: matrix.lint # Only check formatting with the latest version 65 | - run: mix dialyzer 66 | if: matrix.elixir # Only check dialyzer with latest version 67 | - run: mix test 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Stuff generated by EQC 23 | .eqc-info 24 | current_counterexample.eqc 25 | 26 | # vim git ignore files 27 | *~ 28 | *.swp 29 | *.swo 30 | 31 | # DS_Store 32 | *.DS_Store 33 | 34 | mix.lock 35 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | elixir 1.17 2 | erlang 26.2 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog for Ecto.Adapters.DynamoDB v3.x.x 2 | 3 | [v2.x.x -> v3.x.x upgrade guide](/upgrade_guides/version_3_upgrade_guide.md) 4 | 5 | ## v3.5.0 6 | 7 | - Add support for concurrent fetch. See README. 8 | 9 | ## v3.4.1 10 | 11 | - Update minimum Elixir to 1.13, update supported version to 1.17 12 | - Fix warnings on Elixir 1.17 13 | - Fix previously compile-time only config for maximum retry count to be runtime configurable 14 | 15 | ## v3.4.0 16 | 17 | - Support for Ecto 3.11 (note that this breaks support for earlier versions of Ecto). 18 | - Increase minimum Elixir version to 1.11 19 | 20 | ## v3.3.7 21 | 22 | - Add retries to `insert`, `update` and `delete` when a transaction conflict occurs. Default 23 | retry count is 10 but may be configured with `:max_transaction_conflict_retries` 24 | 25 | ## v3.3.6 26 | 27 | - Make `Ecto.Adapters.DynamoDB.decode_item/4` public 28 | 29 | ## v3.3.5 30 | 31 | - Move logging messages from `info` to `debug` level 32 | 33 | ## v3.3.4 34 | 35 | - Add support for logging via `Logger`. 36 | - Fix some deprecation warnings 37 | - Update github workflow to work again 38 | 39 | ## v3.3.3 40 | 41 | - Fix crash when logging binary values that aren't printable strings, by base64 42 | encode them. 43 | 44 | ## v3.3.2 45 | 46 | - Revert "Allow `replace` and `replace_all` to work in more situations" as 47 | it prevented it working in other situations. 48 | 49 | ## v3.3.1 50 | 51 | - Allow `replace` and `replace_all` to work in more situations 52 | - Fix reserved word names in `delete_all` 53 | - Fix removal of empty mapsets when `remove_nil_fields_on_update` is set 54 | 55 | ## v3.3.0 56 | 57 | - Add support for table stream configuration 58 | 59 | ## v3.2.0 60 | 61 | - Fix migrations support for ecto_sql 3.7.2 62 | - Fix warnings on Elixir 1.13 63 | - Raise minimum Elixir version to 1.10 64 | - Add dialyzer run to CI workflow 65 | 66 | ## v3.1.3 67 | 68 | - Support `:empty_mapset_to_nil` for `insert_all` function 69 | - Fix error decoding parameterized field on schema load 70 | 71 | ## v3.1.2 72 | 73 | - Support update operations for the `:empty_map_set_to_nil` option. 74 | 75 | ## v3.1.1 76 | 77 | - Support for `ecto_sql` version 3.6. 78 | 79 | ## v3.1.0 80 | 81 | - Add `:nil_to_empty_mapset` and `:empty_mapset_to_nil` configuration options. 82 | 83 | ## v3.0.3 84 | 85 | - Constrain ecto_sql requirement to 3.5.x. 3.6 introduces interface changes that are not yet supported. 86 | 87 | ## v3.0.2 88 | 89 | - Add handling for `nil` values in `DynamoDBSet.is_equal?` 90 | 91 | ## v3.0.1 92 | 93 | - Maintain backwards compatibility for Ecto versions 3.0 <= 3.4 - all major version 3 releases of Ecto should now be supported 94 | 95 | ## v3.0.0 96 | 97 | ### Enhancements 98 | 99 | #### Configuration 100 | 101 | - Per-repo configuration support 102 | 103 | #### Dependencies 104 | 105 | - Upgrade to and support for [Ecto](https://github.com/elixir-ecto/ecto) version 3.5 or higher (lower versions not supported by this release) 106 | - Upgrade [ExAws.Dynamo](https://github.com/ex-aws/ex_aws_dynamo) to version 4 - recommend reviewing upgrade guide in that repo 107 | - Upgrade Hackney to v1.17.3 108 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at dev@circl.es. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 circles 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | import_config "#{Mix.env()}.exs" 4 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :ecto_adapters_dynamodb, Ecto.Adapters.DynamoDB.TestRepo, 4 | migration_source: "test_schema_migrations", 5 | debug_requests: true, 6 | # Unlike for prod config, we hardcode fake values for local version of DynamoDB 7 | access_key_id: "abcd", 8 | secret_access_key: "1234", 9 | region: "us-east-1", 10 | dynamodb: [ 11 | scheme: "http://", 12 | host: "localhost", 13 | port: 8000, 14 | region: "us-east-1" 15 | ], 16 | scan_tables: ["test_schema_migrations"], 17 | dynamodb_local: true 18 | 19 | config :ecto_adapters_dynamodb, 20 | log_levels: [] 21 | 22 | config :logger, 23 | backends: [:console], 24 | compile_time_purge_matching: [ 25 | [level_lower_than: :debug] 26 | ], 27 | level: :info 28 | -------------------------------------------------------------------------------- /dialyzer_ignore.exs: -------------------------------------------------------------------------------- 1 | [ 2 | # See https://github.com/elixir-lang/elixir/pull/8480 - this is an issue with the way dialyzer 3 | # and MapSet interact 4 | {"lib/ecto_adapters_dynamodb.ex", :call_without_opaque}, 5 | {"lib/ecto_adapters_dynamodb/dynamodbset.ex", :call_without_opaque}, 6 | 7 | # Type mismatch on newer Ecto versions - only exists for support of older ones 8 | {"lib/ecto_adapters_dynamodb.ex", :call} 9 | ] 10 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Application do 2 | # See http://elixir-lang.org/docs/stable/elixir/Application.html 3 | # for more information on OTP Applications 4 | @moduledoc false 5 | 6 | use Application 7 | 8 | def start(_type, _args) do 9 | # Define workers and child supervisors to be supervised 10 | children = [ 11 | Ecto.Adapters.DynamoDB.QueryInfo 12 | ] 13 | 14 | opts = [strategy: :one_for_one, name: Ecto.Adapters.DynamoDB.Supervisor] 15 | Supervisor.start_link(children, opts) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/cache.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Cache do 2 | @moduledoc """ 3 | An Elixir agent to cache DynamoDB table schemas and the first page of results for selected tables 4 | """ 5 | 6 | @typep table_name_t :: String.t() 7 | @typep dynamo_response_t :: %{required(String.t()) => term} 8 | 9 | alias Confex.Resolver 10 | alias Ecto.Adapters.DynamoDB 11 | alias Ecto.Repo 12 | 13 | defstruct [ 14 | :schemas, 15 | :tables, 16 | :ex_aws_config 17 | ] 18 | 19 | @type cached_table :: {String.t(), map()} 20 | @type t :: %__MODULE__{ 21 | schemas: map(), 22 | tables: [cached_table()] 23 | } 24 | 25 | def child_spec([repo]) do 26 | %{ 27 | id: repo, 28 | start: {__MODULE__, :start_link, [repo]} 29 | } 30 | end 31 | 32 | @spec start_link(Repo.t()) :: Agent.on_start() 33 | def start_link(repo) do 34 | cached_table_list = 35 | repo.config() 36 | |> Resolver.resolve!() 37 | |> Keyword.get(:cached_tables, []) 38 | 39 | Agent.start_link( 40 | fn -> 41 | %__MODULE__{ 42 | schemas: %{}, 43 | tables: for(table_name <- cached_table_list, into: %{}, do: {table_name, nil}), 44 | ex_aws_config: DynamoDB.ex_aws_config(repo) 45 | } 46 | end, 47 | name: agent(repo) 48 | ) 49 | end 50 | 51 | @doc """ 52 | Returns the cached value for a call to DynamoDB, describe-table. Performs a DynamoDB scan if not yet cached and raises any errors as a result of the request. The raw json is presented as an elixir map. 53 | """ 54 | @spec describe_table!(Repo.t(), table_name_t) :: dynamo_response_t | no_return 55 | def describe_table!(repo, table_name) do 56 | case describe_table(repo, table_name) do 57 | {:ok, schema} -> schema 58 | {:error, error} -> raise error.type, message: error.message 59 | end 60 | end 61 | 62 | @spec describe_table(Repo.t(), table_name_t) :: {:ok, dynamo_response_t} | {:error, term} 63 | def describe_table(repo, table_name), 64 | do: Agent.get_and_update(agent(repo), &do_describe_table(&1, table_name)) 65 | 66 | @doc """ 67 | Performs a DynamoDB, describe-table, and caches (without returning) the result. Raises any errors as a result of the request 68 | """ 69 | @spec update_table_info!(Repo.t(), table_name_t) :: :ok | no_return 70 | def update_table_info!(repo, table_name) do 71 | case update_table_info(repo, table_name) do 72 | :ok -> :ok 73 | {:error, error} -> raise error.type, message: error.message 74 | end 75 | end 76 | 77 | @spec update_table_info(Repo.t(), table_name_t) :: :ok | {:error, term} 78 | def update_table_info(repo, table_name), 79 | do: Agent.get_and_update(agent(repo), &do_update_table_info(&1, table_name)) 80 | 81 | @doc """ 82 | Returns the cached first page of results for a table. Performs a DynamoDB scan if not yet cached and raises any errors as a result of the request 83 | """ 84 | @spec scan!(Repo.t(), table_name_t) :: dynamo_response_t | no_return 85 | def scan!(repo, table_name) do 86 | case scan(repo, table_name) do 87 | {:ok, scan_result} -> scan_result 88 | {:error, error} -> raise error.type, message: error.message 89 | end 90 | end 91 | 92 | @spec scan(Repo.t(), table_name_t) :: {:ok, dynamo_response_t} | {:error, term} 93 | def scan(repo, table_name), 94 | do: Agent.get_and_update(agent(repo), &do_scan(&1, table_name)) 95 | 96 | @doc """ 97 | Performs a DynamoDB scan and caches (without returning) the first page of results. Raises any errors as a result of the request 98 | """ 99 | @spec update_cached_table!(Repo.t(), table_name_t) :: :ok | no_return 100 | def update_cached_table!(repo, table_name) do 101 | case update_cached_table(repo, table_name) do 102 | :ok -> :ok 103 | {:error, error} -> raise error.type, message: error.message 104 | end 105 | end 106 | 107 | @spec update_cached_table(Repo.t(), table_name_t) :: :ok | {:error, term} 108 | def update_cached_table(repo, table_name), 109 | do: Agent.get_and_update(agent(repo), &do_update_cached_table(&1, table_name)) 110 | 111 | @doc """ 112 | Returns the current cache of table schemas, and cache of first page of results for selected tables, as an Elixir map 113 | """ 114 | # For testing and debugging use only: 115 | def get_cache(repo), 116 | do: Agent.get(agent(repo), & &1) 117 | 118 | defp do_describe_table(cache, table_name) do 119 | case cache.schemas[table_name] do 120 | nil -> 121 | result = ExAws.Dynamo.describe_table(table_name) |> ExAws.request(cache.ex_aws_config) 122 | 123 | case result do 124 | {:ok, %{"Table" => schema}} -> 125 | updated_cache = put_in(cache.schemas[table_name], schema) 126 | {{:ok, schema}, updated_cache} 127 | 128 | {:error, error} -> 129 | {{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect(error)}"}}, 130 | cache} 131 | end 132 | 133 | schema -> 134 | {{:ok, schema}, cache} 135 | end 136 | end 137 | 138 | defp do_update_table_info(cache, table_name) do 139 | result = ExAws.Dynamo.describe_table(table_name) |> ExAws.request(cache.ex_aws_config) 140 | 141 | case result do 142 | {:ok, %{"Table" => schema}} -> 143 | updated_cache = put_in(cache.schemas[table_name], schema) 144 | {:ok, updated_cache} 145 | 146 | {:error, error} -> 147 | {{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect(error)}"}}, cache} 148 | end 149 | end 150 | 151 | defp do_scan(cache, table_name) do 152 | table_name_in_config = Map.has_key?(cache.tables, table_name) 153 | 154 | case cache.tables[table_name] do 155 | nil when table_name_in_config -> 156 | result = ExAws.Dynamo.scan(table_name) |> ExAws.request(cache.ex_aws_config) 157 | 158 | case result do 159 | {:ok, scan_result} -> 160 | updated_cache = put_in(cache.tables[table_name], scan_result) 161 | {{:ok, scan_result}, updated_cache} 162 | 163 | {:error, error} -> 164 | {{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect(error)}"}}, 165 | cache} 166 | end 167 | 168 | nil -> 169 | {{:error, 170 | %{ 171 | type: ArgumentError, 172 | message: 173 | "Could not confirm the table, #{inspect(table_name)}, as listed for caching in the application's configuration. Please see README file for details." 174 | }}, cache} 175 | 176 | cached_scan -> 177 | {{:ok, cached_scan}, cache} 178 | end 179 | end 180 | 181 | defp do_update_cached_table(cache, table_name) do 182 | table_name_in_config = Map.has_key?(cache.tables, table_name) 183 | 184 | case cache.tables[table_name] do 185 | nil when not table_name_in_config -> 186 | {{:error, 187 | %{ 188 | type: ArgumentError, 189 | message: 190 | "Could not confirm the table, #{inspect(table_name)}, as listed for caching in the application's configuration. Please see README file for details." 191 | }}, cache} 192 | 193 | _ -> 194 | result = ExAws.Dynamo.scan(table_name) |> ExAws.request(cache.ex_aws_config) 195 | 196 | case result do 197 | {:ok, scan_result} -> 198 | updated_cache = put_in(cache.tables[table_name], scan_result) 199 | {:ok, updated_cache} 200 | 201 | {:error, error} -> 202 | {{:error, %{type: ExAws.Error, message: "ExAws Request Error! #{inspect(error)}"}}, 203 | cache} 204 | end 205 | end 206 | end 207 | 208 | defp agent(repo), do: Module.concat(repo, Cache) 209 | end 210 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/concurrent_batch.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.ConcurrentBatch do 2 | alias Ecto.Adapters.DynamoDB.RecursiveFetch 3 | 4 | def fetch(repo, query, table, hash_values, opts) do 5 | items = 6 | if Confex.get_env(:ecto_adapters_dynamodb, :concurrent_batch, false) do 7 | concurrent_fetch(repo, query, table, hash_values, opts) 8 | else 9 | do_fetch_recursive(repo, query, table, hash_values, opts) 10 | end 11 | 12 | %{"Responses" => %{table => items}} 13 | end 14 | 15 | def concurrent_fetch(repo, query, table, hash_values, opts) do 16 | max_fetch_concurrency = Confex.get_env(:ecto_adapters_dynamodb, :max_fetch_concurrency, 100) 17 | 18 | min_concurrent_fetch_batch = 19 | Confex.get_env(:ecto_adapters_dynamodb, :min_concurrent_fetch_batch, 10) 20 | 21 | item_count = length(hash_values) 22 | 23 | processes = min(max_fetch_concurrency, ceil(item_count / min_concurrent_fetch_batch)) 24 | 25 | if processes < 2 do 26 | do_fetch_recursive(repo, query, table, hash_values, opts) 27 | else 28 | do_concurrent_fetch(repo, query, table, hash_values, opts, processes) 29 | end 30 | end 31 | 32 | defp do_concurrent_fetch(repo, query, table, hash_values, opts, processes) do 33 | hash_values 34 | |> Enum.chunk_every(ceil(length(hash_values) / processes)) 35 | |> Enum.map(fn chunk -> 36 | Task.async(fn -> do_fetch_recursive(repo, query, table, chunk, opts) end) 37 | end) 38 | |> Enum.map(&Task.await/1) 39 | |> List.flatten() 40 | end 41 | 42 | defp do_fetch_recursive(repo, query, table, hash_values, opts) do 43 | Enum.reduce(hash_values, [], fn hash_value, acc -> 44 | # When receiving a list of values to query on, construct a custom query for each 45 | # of those values to pass into fetch_recursive_query/1. 46 | new_query = 47 | Kernel.put_in(query, [:expression_attribute_values, :hash_key], hash_value) 48 | 49 | %{"Items" => items} = RecursiveFetch.fetch_query(repo, new_query, table, opts) 50 | acc ++ items 51 | end) 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/dynamodbset.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.DynamoDBSet do 2 | @moduledoc """ 3 | An Ecto type for handling MapSet, corresponding with DynamoDB's **set** types. Since ExAws 4 | already encodes and decodes MapSet, we only handle casting and validation here. 5 | """ 6 | 7 | @behaviour Ecto.Type 8 | 9 | @doc """ 10 | This type is actually a MapSet 11 | """ 12 | @impl Ecto.Type 13 | def type, do: MapSet 14 | 15 | @doc """ 16 | Confirm the type is a MapSet and its elements are of one type, number or binary 17 | """ 18 | @impl Ecto.Type 19 | def cast(mapset) do 20 | case mapset do 21 | %MapSet{} -> if valid?(mapset), do: {:ok, mapset}, else: :error 22 | _ -> :error 23 | end 24 | end 25 | 26 | @doc """ 27 | Load as is 28 | """ 29 | @impl Ecto.Type 30 | def load(mapset), do: {:ok, mapset} 31 | 32 | @doc """ 33 | Dump as is 34 | """ 35 | @impl Ecto.Type 36 | def dump(mapset), do: {:ok, mapset} 37 | 38 | @doc """ 39 | Check if two terms are semantically equal 40 | """ 41 | @impl Ecto.Type 42 | def equal?(%MapSet{} = term_a, %MapSet{} = term_b), do: MapSet.equal?(term_a, term_b) 43 | def equal?(nil, %MapSet{}), do: false 44 | def equal?(%MapSet{}, nil), do: false 45 | def equal?(nil, nil), do: true 46 | 47 | @doc """ 48 | Dictates how the type should be treated inside embeds 49 | """ 50 | @impl Ecto.Type 51 | def embed_as(_), do: :self 52 | 53 | defp valid?(mapset) do 54 | Enum.all?(mapset, fn x -> is_number(x) end) or 55 | Enum.all?(mapset, fn x -> is_binary(x) end) 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/info.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Info do 2 | @moduledoc """ 3 | Get information on dynamo tables and schema 4 | """ 5 | 6 | alias Ecto.Adapters.DynamoDB 7 | alias Ecto.Repo 8 | alias ExAws.Dynamo 9 | 10 | @typep table_name_t :: String.t() 11 | @typep dynamo_response_t :: %{required(String.t()) => term} 12 | 13 | @doc """ 14 | Returns the raw amazon dynamo DB table schema information. The raw json is presented as an elixir map. 15 | 16 | Here is an example of what it may look like 17 | ``` 18 | %{"AttributeDefinitions" => [%{"AttributeName" => "id", 19 | "AttributeType" => "S"}, 20 | %{"AttributeName" => "person_id", "AttributeType" => "S"}], 21 | "CreationDateTime" => 1489615412.651, 22 | "GlobalSecondaryIndexes" => [%{"IndexArn" => "arn:aws:dynamodb:ddblocal:000000000000:table/circle_members/index/person_id", 23 | "IndexName" => "person_id", "IndexSizeBytes" => 7109, 24 | "IndexStatus" => "ACTIVE", "ItemCount" => 146, 25 | "KeySchema" => [%{"AttributeName" => "person_id", "KeyType" => "HASH"}], 26 | "Projection" => %{"ProjectionType" => "ALL"}, 27 | "ProvisionedThroughput" => %{"ReadCapacityUnits" => 100, 28 | "WriteCapacityUnits" => 50}}], "ItemCount" => 146, 29 | "KeySchema" => [%{"AttributeName" => "id", "KeyType" => "HASH"}, 30 | %{"AttributeName" => "person_id", "KeyType" => "RANGE"}], 31 | "ProvisionedThroughput" => %{"LastDecreaseDateTime" => 0.0, 32 | "LastIncreaseDateTime" => 0.0, "NumberOfDecreasesToday" => 0, 33 | "ReadCapacityUnits" => 100, "WriteCapacityUnits" => 50}, 34 | "TableArn" => "arn:aws:dynamodb:ddblocal:000000000000:table/circle_members", 35 | "TableName" => "circle_members", "TableSizeBytes" => 7109, 36 | "TableStatus" => "ACTIVE"} 37 | ``` 38 | """ 39 | @spec table_info(Repo.t(), table_name_t) :: dynamo_response_t | no_return 40 | def table_info(repo, tablename) do 41 | # Fetch and cache the raw schema definition from DynamoDB 42 | Ecto.Adapters.DynamoDB.Cache.describe_table!(repo, tablename) 43 | end 44 | 45 | @doc "Get all the raw information on indexes for a given table, returning as a map." 46 | @spec index_details(Repo.t(), table_name_t) :: %{primary: [map], secondary: [map]} 47 | def index_details(repo, tablename) do 48 | # Extract the primary key data (required) and the optional secondary global or local indexes 49 | %{"KeySchema" => primary_key} = schema = table_info(repo, tablename) 50 | 51 | indexes = 52 | Map.get(schema, "GlobalSecondaryIndexes", []) ++ 53 | Map.get(schema, "LocalSecondaryIndexes", []) 54 | 55 | # return only the relevant index/key data 56 | %{:primary => primary_key, :secondary => indexes} 57 | end 58 | 59 | @doc """ 60 | Get a list of the available indexes on a table. The format of this list is described in normalise_dynamo_index! 61 | """ 62 | @spec indexes(Repo.t(), table_name_t) :: [{:primary | String.t(), [String.t()]}] 63 | def indexes(repo, tablename) do 64 | [primary_key!(repo, tablename) | secondary_indexes(repo, tablename)] 65 | end 66 | 67 | @doc """ 68 | Returns the primary key/ID for a table. It may be a single field that is a HASH, OR 69 | it may be the dynamoDB {HASH, SORT} type of index. we return 70 | \\{:primary, [index]} 71 | in a format described in normalise_dynamo_index! 72 | """ 73 | @spec primary_key!(Repo.t(), table_name_t) :: {:primary, [String.t()]} | no_return 74 | def primary_key!(repo, tablename) do 75 | indexes = index_details(repo, tablename) 76 | {:primary, normalise_dynamo_index!(indexes[:primary])} 77 | end 78 | 79 | @spec repo_primary_key(module()) :: String.t() | no_return 80 | def repo_primary_key(repo) do 81 | case repo.__schema__(:primary_key) do 82 | [pkey] -> 83 | Atom.to_string(pkey) 84 | 85 | [] -> 86 | error("DynamoDB repos must have a primary key, but repo #{repo} has none") 87 | 88 | _ -> 89 | error("DynamoDB repos must have a single primary key, but repo #{repo} has more than one") 90 | end 91 | end 92 | 93 | # @doc "return true if this HASH key/{HASH/SORT} key is the table primary key" 94 | # def primary_key?(tablename, key) do 95 | # case primary_key!(tablename) do 96 | # {:primary, ^key} -> true 97 | # _ -> false 98 | # end 99 | # end 100 | 101 | # @doc "return true is this is a secondary key (HASH/{HASH,SORT}) for the table" 102 | # def secondary_key?(tablename, key) do 103 | # indexes = secondary_indexes(tablename) 104 | # Enum.member?(indexes, key) 105 | # end 106 | 107 | @doc """ 108 | returns a simple list of the secondary indexes (global and local) for the table. Uses same format 109 | for each member of the list as 'primary_key!'. 110 | """ 111 | @spec secondary_indexes(Repo.t(), table_name_t) :: [{String.t(), [String.t()]}] | no_return 112 | def secondary_indexes(repo, tablename) do 113 | # Extract the secondary index value from the index_details map 114 | %{:secondary => indexes} = index_details(repo, tablename) 115 | for index <- indexes, do: {index["IndexName"], normalise_dynamo_index!(index["KeySchema"])} 116 | end 117 | 118 | def ttl_info(repo, tablename) do 119 | tablename 120 | |> Dynamo.describe_time_to_live() 121 | |> ExAws.request(DynamoDB.ex_aws_config(repo)) 122 | end 123 | 124 | @doc """ 125 | returns a list of any indexed attributes in the table 126 | """ 127 | @spec indexed_attributes(Repo.t(), table_name_t) :: [String.t()] 128 | def indexed_attributes(repo, table_name) do 129 | indexes(repo, table_name) 130 | |> Enum.map(fn {_, fields} -> fields end) 131 | |> List.flatten() 132 | |> Enum.uniq() 133 | end 134 | 135 | # dynamo raw index data is complex, and can contain either one or two fields along with their type (hash or range) 136 | # This parses it and returns a simple list format. The first element of the list is the HASH key, the second 137 | # (optional) is the range/sort key. eg: 138 | # [hash_field_name, sort_field_name] or [hash_field_name] 139 | 140 | @spec normalise_dynamo_index!([%{required(String.t()) => String.t()}]) :: 141 | [String.t()] | no_return 142 | defp normalise_dynamo_index!(index_fields) do 143 | # The data structure can look a little like these examples: 144 | # [%{"AttributeName" => "person_id", "KeyType" => "HASH"}] 145 | # [%{"AttributeName" => "id", "KeyType" => "HASH"}, %{"AttributeName" => "person_id", "KeyType" => "RANGE"}] 146 | case index_fields do 147 | # Just one entry in the fields list; it must be a simple hash. 148 | [%{"AttributeName" => fieldname}] -> 149 | [fieldname] 150 | 151 | # Two entries, it's a HASH + SORT - but they might not be returned in order - So figure out 152 | # which is the hash and which is the sort by matching for the "HASH" attribute in the first, 153 | # then second element of the list. Match explicitly as we want a crash if we get anything else. 154 | [ 155 | %{"AttributeName" => fieldname_hash, "KeyType" => "HASH"}, 156 | %{"AttributeName" => fieldname_sort} 157 | ] -> 158 | [fieldname_hash, fieldname_sort] 159 | 160 | [ 161 | %{"AttributeName" => fieldname_sort}, 162 | %{"AttributeName" => fieldname_hash, "KeyType" => "HASH"} 163 | ] -> 164 | [fieldname_hash, fieldname_sort] 165 | end 166 | end 167 | 168 | defp error(msg) do 169 | raise ArgumentError, message: msg 170 | end 171 | end 172 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Migration do 2 | import Ecto.Adapters.DynamoDB, only: [ecto_dynamo_log: 2, ecto_dynamo_log: 3, ex_aws_config: 1] 3 | 4 | alias ExAws.Dynamo 5 | alias Ecto.Adapters.DynamoDB.RepoConfig 6 | 7 | @moduledoc """ 8 | Implements Ecto migrations for `create table` and `alter table`. 9 | 10 | The functions, `add`, `remove` and `modify` correspond to indexes on the DynamoDB table. Using `add`, the second parameter, field type (which corresponds with the DynamoDB attribute) must be specified. Use the third parameter to specify a primary key not already specified. For a HASH-only primary key, use `primary_key: true` as the third parameter. For a composite primary key (HASH and RANGE), in addition to the `primary_key` specification, set the third parameter on the range key attribute to `range_key: true`. There should be only one primary key (hash or composite) specified per table. 11 | 12 | To specify index details, such as provisioned throughput, create_if_not_exists/drop_if_exists, billing_mode, and global and local indexes, use the `options` keyword in `create table` and `alter table`, please see the examples below for greater detail. 13 | 14 | *Please note that `change` may not work as expected on rollback. We recommend specifying `up` and `down` instead.* 15 | 16 | ``` 17 | Example: 18 | 19 | # Migration file 1: 20 | 21 | def change do 22 | create table(:post, 23 | primary_key: false, 24 | options: [ 25 | global_indexes: [ 26 | [index_name: "email_content", 27 | keys: [:email, :content], 28 | provisioned_throughput: [100, 100]] # [read_capacity, write_capacity] 29 | ], 30 | provisioned_throughput: [20,20] 31 | ]) do 32 | 33 | add :email, :string, primary_key: true # primary composite key 34 | add :title, :string, range_key: true # primary composite key 35 | add :content, :string 36 | end 37 | end 38 | 39 | 40 | # Migration file 2: 41 | 42 | def up do 43 | create_if_not_exists table(:rabbit, 44 | primary_key: false, 45 | options: [ 46 | billing_mode: :pay_per_request, 47 | global_indexes: [ 48 | [index_name: "name", 49 | keys: [:name]] 50 | ] 51 | ]) do 52 | 53 | add :id, :string, primary_key: true 54 | add :name, :string, hash_key: true 55 | end 56 | end 57 | 58 | def down do 59 | drop_if_exists table(:rabbit) 60 | end 61 | 62 | 63 | # Migration file 3: 64 | 65 | def up do 66 | alter table(:post, 67 | options: [ 68 | global_indexes: [ 69 | [index_name: "content", 70 | keys: [:content], 71 | create_if_not_exists: true, 72 | provisioned_throughput: [1,1], 73 | projection: [projection_type: :include, non_key_attributes: [:email]]] 74 | ] 75 | ]) do 76 | 77 | add :content, string 78 | end 79 | end 80 | 81 | def down do 82 | alter table(:post, 83 | options: [ 84 | global_indexes: [ 85 | [index_name: "content", 86 | drop_if_exists: true]] 87 | ] 88 | ) do 89 | remove :content 90 | end 91 | end 92 | 93 | 94 | # Migration file 4: 95 | 96 | def up do 97 | alter table(:post) do 98 | # modify will not be processed in a rollback if 'change' is used 99 | modify :"email_content", :string, provisioned_throughput: [2,2] 100 | remove :content 101 | end 102 | end 103 | 104 | def down do 105 | alter table(:post, 106 | options: [ 107 | global_indexes: [ 108 | [index_name: "content", 109 | keys: [:content], 110 | projection: [projection_type: :include, non_key_attributes: [:email]]] 111 | ] 112 | ]) do 113 | 114 | modify :"email_content", :string, provisioned_throughput: [100,100] 115 | add :content, :string 116 | end 117 | end 118 | ``` 119 | """ 120 | 121 | # DynamoDB has restrictions on what can be done while tables are being created or 122 | # updated so we allow for a custom wait between requests if certain resource-access 123 | # errors are returned 124 | defp initial_wait(repo), do: RepoConfig.config_val(repo, :migration_initial_wait, 1000) 125 | defp wait_exponent(repo), do: RepoConfig.config_val(repo, :migration_wait_exponent, 1.05) 126 | # 10 minutes 127 | defp max_wait(repo), do: RepoConfig.config_val(repo, :migration_max_wait, 10 * 60 * 1000) 128 | 129 | # Adapted from line 620, https://github.com/michalmuskala/mongodb_ecto/blob/master/lib/mongo_ecto.ex 130 | def execute_ddl(_repo_meta, string, _opts) when is_binary(string) do 131 | raise ArgumentError, 132 | message: "Ecto.Adapters.Dynamodb does not support SQL statements in `execute`" 133 | end 134 | 135 | def execute_ddl(%{repo: repo, migration_source: migration_source}, command, options) do 136 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.execute_ddl", %{ 137 | "#{inspect(__MODULE__)}.execute_ddl-params" => %{ 138 | repo: repo, 139 | command: command, 140 | options: options 141 | } 142 | }) 143 | 144 | # We provide a configuration option for migration_table_capacity 145 | updated_command = maybe_add_schema_migration_table_capacity(repo, migration_source, command) 146 | execute_ddl(repo, updated_command) 147 | end 148 | 149 | defp execute_ddl(repo, {:create_if_not_exists, %Ecto.Migration.Table{} = table, field_clauses}) do 150 | # :schema_migrations might be provided as an atom, while 'table.name' is now usually a binary 151 | table_name = if is_atom(table.name), do: Atom.to_string(table.name), else: table.name 152 | %{"TableNames" => table_list} = Dynamo.list_tables() |> ExAws.request!(ex_aws_config(repo)) 153 | 154 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.execute_ddl: :create_if_not_exists (table)") 155 | 156 | if not Enum.member?(table_list, table_name) do 157 | ecto_dynamo_log( 158 | :debug, 159 | "#{inspect(__MODULE__)}.execute_ddl: create_if_not_exist: creating table", 160 | %{table_name: table.name} 161 | ) 162 | 163 | create_table(repo, table_name, field_clauses, table.options) 164 | else 165 | ecto_dynamo_log( 166 | :debug, 167 | "#{inspect(__MODULE__)}.execute_ddl: create_if_not_exists: table already exists.", 168 | %{table_name: table.name} 169 | ) 170 | end 171 | 172 | {:ok, []} 173 | end 174 | 175 | defp execute_ddl(repo, {:create, %Ecto.Migration.Table{} = table, field_clauses}) do 176 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.execute_ddl: create table: creating table", %{ 177 | table_name: table.name 178 | }) 179 | 180 | create_table(repo, table.name, field_clauses, table.options) 181 | 182 | {:ok, []} 183 | end 184 | 185 | defp execute_ddl(_repo, {command, %Ecto.Migration.Index{}}) do 186 | raise ArgumentError, 187 | message: 188 | "Ecto.Adapters.Dynamodb migration does not support '" <> 189 | to_string(command) <> " index', please use 'alter table' instead, see README.md" 190 | end 191 | 192 | defp execute_ddl(repo, {:drop, %Ecto.Migration.Table{} = table}) do 193 | execute_ddl(repo, {:drop, table, []}) 194 | end 195 | 196 | defp execute_ddl(repo, {:drop, %Ecto.Migration.Table{} = table, opts}) do 197 | ecto_dynamo_log( 198 | :debug, 199 | "#{inspect(__MODULE__)}.execute_ddl: drop: removing table, opts (ignored): #{inspect(opts)}", 200 | %{ 201 | table_name: table.name 202 | } 203 | ) 204 | 205 | Dynamo.delete_table(table.name) |> ExAws.request!(ex_aws_config(repo)) 206 | 207 | {:ok, []} 208 | end 209 | 210 | defp execute_ddl(repo, {:drop_if_exists, %Ecto.Migration.Table{} = table}) do 211 | execute_ddl(repo, {:drop_if_exists, table, []}) 212 | end 213 | 214 | defp execute_ddl(repo, {:drop_if_exists, %Ecto.Migration.Table{} = table, opts}) do 215 | %{"TableNames" => table_list} = Dynamo.list_tables() |> ExAws.request!(ex_aws_config(repo)) 216 | 217 | ecto_dynamo_log( 218 | :debug, 219 | "#{inspect(__MODULE__)}.execute_ddl: drop_if_exists (table) opts (ignored): #{inspect(opts)}" 220 | ) 221 | 222 | if Enum.member?(table_list, table.name) do 223 | ecto_dynamo_log( 224 | :debug, 225 | "#{inspect(__MODULE__)}.execute_ddl: drop_if_exists: removing table", 226 | %{table_name: table.name} 227 | ) 228 | 229 | Dynamo.delete_table(table.name) |> ExAws.request!(ex_aws_config(repo)) 230 | else 231 | ecto_dynamo_log( 232 | :debug, 233 | "#{inspect(__MODULE__)}.execute_ddl: drop_if_exists (table): table does not exist.", 234 | %{table_name: table.name} 235 | ) 236 | end 237 | 238 | {:ok, []} 239 | end 240 | 241 | defp execute_ddl(repo, {:alter, %Ecto.Migration.Table{} = table, field_clauses}) do 242 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.execute_ddl: :alter (table)") 243 | 244 | {delete, update, key_list} = build_delete_and_update(field_clauses) 245 | 246 | attribute_definitions = 247 | for {field, type} <- key_list do 248 | %{ 249 | attribute_name: field, 250 | attribute_type: Dynamo.Encoder.atom_to_dynamo_type(convert_type(type)) 251 | } 252 | end 253 | 254 | to_create = 255 | case table.options[:global_indexes] do 256 | nil -> 257 | nil 258 | 259 | global_indexes -> 260 | Enum.filter(global_indexes, fn index -> 261 | if index[:keys], 262 | do: index[:keys] |> Enum.all?(fn key -> Keyword.has_key?(key_list, key) end) 263 | end) 264 | end 265 | 266 | create = build_secondary_indexes(to_create) |> Enum.map(fn index -> %{create: index} end) 267 | 268 | data = 269 | %{global_secondary_index_updates: create ++ delete ++ update} 270 | |> Map.merge( 271 | if create == [], do: %{}, else: %{attribute_definitions: attribute_definitions} 272 | ) 273 | |> maybe_add_opt(:stream_enabled, table.options[:stream_enabled]) 274 | |> maybe_add_opt(:stream_view_type, table.options[:stream_view_type]) 275 | 276 | result = update_table_recursive(repo, table, data, initial_wait(repo), 0) 277 | set_ttl(repo, table.name, table.options) 278 | result 279 | end 280 | 281 | defp execute_ddl(_repo, {command, struct, _}), 282 | do: 283 | raise(ArgumentError, 284 | message: 285 | "#{inspect(__MODULE__)}.execute_ddl error: '" <> 286 | to_string(command) <> 287 | " #{extract_ecto_migration_type(inspect(struct.__struct__))}' is not supported" 288 | ) 289 | 290 | defp execute_ddl(_repo, {command, struct}), 291 | do: 292 | raise(ArgumentError, 293 | message: 294 | "#{inspect(__MODULE__)}.execute_ddl error: '" <> 295 | to_string(command) <> 296 | " #{extract_ecto_migration_type(inspect(struct.__struct__))}' is not supported" 297 | ) 298 | 299 | # We provide a configuration option for migration_table_capacity 300 | defp maybe_add_schema_migration_table_capacity( 301 | repo, 302 | migration_source, 303 | {:create_if_not_exists, %Ecto.Migration.Table{} = table, field_clauses} = command 304 | ) do 305 | if to_string(table.name) == migration_source do 306 | migration_table_capacity = RepoConfig.config_val(repo, :migration_table_capacity, [1, 1]) 307 | 308 | updated_table_options = 309 | case table.options do 310 | nil -> [provisioned_throughput: migration_table_capacity] 311 | opts -> Keyword.put(opts, :provisioned_throughput, migration_table_capacity) 312 | end 313 | 314 | {:create_if_not_exists, Map.put(table, :options, updated_table_options), field_clauses} 315 | else 316 | command 317 | end 318 | end 319 | 320 | defp maybe_add_schema_migration_table_capacity(_repo, _migration_source, command), do: command 321 | 322 | defp poll_table(repo, table_name) do 323 | table_info = Dynamo.describe_table(table_name) |> ExAws.request(ex_aws_config(repo)) 324 | 325 | case table_info do 326 | {:ok, %{"Table" => table}} -> 327 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.poll_table: table", %{ 328 | "#{inspect(__MODULE__)}.poll_table-table" => %{table_name: table_name, table: table} 329 | }) 330 | 331 | table 332 | 333 | {:error, error_tuple} -> 334 | ecto_dynamo_log( 335 | :debug, 336 | "#{inspect(__MODULE__)}.poll_table: error attempting to poll table. Stopping...", 337 | %{ 338 | "#{inspect(__MODULE__)}.poll_table-error" => %{ 339 | table_name: table_name, 340 | error_tuple: error_tuple 341 | } 342 | } 343 | ) 344 | 345 | raise ExAws.Error, message: "ExAws Request Error! #{inspect(error_tuple)}" 346 | end 347 | end 348 | 349 | defp list_non_active_statuses(table_info) do 350 | secondary_index_statuses = 351 | (table_info["GlobalSecondaryIndexes"] || []) 352 | |> Enum.map(fn index -> {index["IndexName"], index["IndexStatus"]} end) 353 | 354 | ([{"TableStatus", table_info["TableStatus"]}] ++ secondary_index_statuses) 355 | |> Enum.filter(fn {_, y} -> y != "ACTIVE" end) 356 | end 357 | 358 | defp update_table_recursive(repo, table, data, wait_interval, time_waited) do 359 | ecto_dynamo_log(:debug, "#{inspect(__MODULE__)}.update_table_recursive: polling table", %{ 360 | table_name: table.name 361 | }) 362 | 363 | table_info = poll_table(repo, table.name) 364 | non_active_statuses = list_non_active_statuses(table_info) 365 | 366 | if non_active_statuses != [] do 367 | ecto_dynamo_log( 368 | :debug, 369 | "#{inspect(__MODULE__)}.update_table_recursive: non-active status found in table", 370 | %{ 371 | "#{inspect(__MODULE__)}.update_table_recursive-non_active_status" => %{ 372 | table_name: table.name, 373 | non_active_statuses: non_active_statuses 374 | } 375 | } 376 | ) 377 | 378 | to_wait = 379 | if time_waited == 0, 380 | do: wait_interval, 381 | else: round(:math.pow(wait_interval, wait_exponent(repo))) 382 | 383 | if time_waited + to_wait <= max_wait(repo) do 384 | ecto_dynamo_log( 385 | :debug, 386 | "#{inspect(__MODULE__)}.update_table_recursive: waiting #{inspect(to_wait)} milliseconds (waited so far: #{inspect(time_waited)} ms)" 387 | ) 388 | 389 | :timer.sleep(to_wait) 390 | update_table_recursive(repo, table, data, to_wait, time_waited + to_wait) 391 | else 392 | raise "Wait exceeding configured max wait time, stopping migration at update table #{inspect(table.name)}...\nData: #{inspect(data)}" 393 | end 394 | else 395 | # Before passinng the index data to Dynamo, do a little extra preparation: 396 | # - filter the data based on the presence of :create_if_not_exists or :drop_if_exists_options 397 | # - if the user is running against Dynamo's local development version (in config, dynamodb_local: true), 398 | # we may need to add provisioned_throughput to indexes to handle situations where the local table is provisioned 399 | # but the index will be added to a production table that is on-demand. 400 | prepared_data = 401 | data 402 | |> make_safe_index_requests(repo, table) 403 | |> maybe_add_opt(:stream_enabled, data[:stream_enabled]) 404 | |> maybe_add_opt(:stream_view_type, data[:stream_view_type]) 405 | |> maybe_default_throughput_local(repo, table_info) 406 | |> Enum.reject(fn {_, v} -> Enum.member?([[], %{}, nil], v) end) 407 | 408 | if Enum.count(prepared_data) != 0 do 409 | result = 410 | table.name 411 | |> Dynamo.update_table(prepared_data) 412 | |> ExAws.request(ex_aws_config(repo)) 413 | 414 | ecto_dynamo_log( 415 | :debug, 416 | "#{inspect(__MODULE__)}.update_table_recursive: DynamoDB/ExAws response", 417 | %{"#{inspect(__MODULE__)}.update_table_recursive-result" => inspect(result)} 418 | ) 419 | 420 | case result do 421 | {:ok, _} -> 422 | ecto_dynamo_log( 423 | :debug, 424 | "#{inspect(__MODULE__)}.update_table_recursive: table altered successfully.", 425 | %{table_name: table.name} 426 | ) 427 | 428 | {:ok, []} 429 | 430 | {:error, {error, _message}} 431 | when error in [ 432 | "LimitExceededException", 433 | "ProvisionedThroughputExceededException", 434 | "ThrottlingException" 435 | ] -> 436 | to_wait = 437 | if time_waited == 0, 438 | do: wait_interval, 439 | else: round(:math.pow(wait_interval, wait_exponent(repo))) 440 | 441 | if time_waited + to_wait <= max_wait(repo) do 442 | ecto_dynamo_log( 443 | :debug, 444 | "#{inspect(__MODULE__)}.update_table_recursive: #{inspect(error)} ... waiting #{inspect(to_wait)} milliseconds (waited so far: #{inspect(time_waited)} ms)" 445 | ) 446 | 447 | :timer.sleep(to_wait) 448 | update_table_recursive(repo, table, data, to_wait, time_waited + to_wait) 449 | else 450 | raise "#{inspect(error)} ... wait exceeding configured max wait time, stopping migration at update table #{inspect(table.name)}...\nData: #{inspect(data)}" 451 | end 452 | 453 | {:error, error_tuple} -> 454 | ecto_dynamo_log( 455 | :debug, 456 | "#{inspect(__MODULE__)}.update_table_recursive: error attempting to update table. Stopping...", 457 | %{ 458 | "#{inspect(__MODULE__)}.update_table_recursive-error" => %{ 459 | table_name: table.name, 460 | error_tuple: error_tuple, 461 | data: inspect(data) 462 | } 463 | } 464 | ) 465 | 466 | raise ExAws.Error, message: "ExAws Request Error! #{inspect(error_tuple)}" 467 | end 468 | else 469 | {:ok, []} 470 | end 471 | end 472 | end 473 | 474 | # When running against local Dynamo, we may need to perform some additional special handling for indexes. 475 | defp maybe_default_throughput_local(data, repo, table_info), 476 | do: 477 | do_maybe_default_throughput_local( 478 | RepoConfig.config_val(repo, :dynamodb_local), 479 | data, 480 | table_info 481 | ) 482 | 483 | # When running against production Dynamo, don't alter the index data. Production DDB will reject the migration if there's 484 | # disagreement between the table's billing mode and the options specified in the index migration. 485 | defp do_maybe_default_throughput_local(false, data, _table_info), do: data 486 | 487 | # However, when running against the local dev version of Dynamo, it will hang on index migrations 488 | # that attempt to add an index to a provisioned table without specifying throughput. The problem doesn't exist 489 | # the other way around; local Dynamo will ignore throughput specified for indexes where the table is on-demand. 490 | defp do_maybe_default_throughput_local(_using_ddb_local, data, table_info) do 491 | # As of spring 2020, production and local DDB (version 1.11.478) no longer return a "BillingModeSummary" key 492 | # for provisioned tables. In order to allow for backwards compatibility, we've retained the original condition 493 | # following the or in the if statement below, but that can probably be removed in the future. 494 | if not Map.has_key?(table_info, "BillingModeSummary") or 495 | table_info["BillingModeSummary"]["BillingMode"] == "PROVISIONED" do 496 | updated_global_secondary_index_updates = 497 | for index_update <- data.global_secondary_index_updates, 498 | {action, index_info} <- index_update do 499 | if action in [:create, :update] do 500 | # If the table is provisioned but the index_info lacks :provisioned_throughput, add a map of "default" values. 501 | %{ 502 | action => 503 | Map.put_new(index_info, :provisioned_throughput, %{ 504 | read_capacity_units: 1, 505 | write_capacity_units: 1 506 | }) 507 | } 508 | else 509 | index_update 510 | end 511 | end 512 | 513 | Map.replace!(data, :global_secondary_index_updates, updated_global_secondary_index_updates) 514 | else 515 | data 516 | end 517 | end 518 | 519 | defp create_table(repo, table_name, field_clauses, options) do 520 | {key_schema, key_definitions} = 521 | build_key_schema_and_definitions(table_name, field_clauses, options) 522 | 523 | [read_capacity, write_capacity] = options[:provisioned_throughput] || [nil, nil] 524 | 525 | opts = 526 | [ 527 | global_indexes: build_secondary_indexes(options[:global_indexes]), 528 | local_indexes: build_secondary_indexes(options[:local_indexes]), 529 | billing_mode: options[:billing_mode] || :provisioned 530 | ] 531 | |> maybe_add_opt(:read_capacity, read_capacity) 532 | |> maybe_add_opt(:write_capacity, write_capacity) 533 | |> maybe_add_opt(:stream_enabled, options[:stream_enabled]) 534 | |> maybe_add_opt(:stream_view_type, options[:stream_view_type]) 535 | 536 | create_table_recursive( 537 | repo, 538 | table_name, 539 | key_schema, 540 | key_definitions, 541 | opts, 542 | initial_wait(repo), 543 | 0 544 | ) 545 | 546 | set_ttl(repo, table_name, options) 547 | end 548 | 549 | defp maybe_add_opt(opts, _opt, nil), do: opts 550 | defp maybe_add_opt(opts, opt, value) when is_list(opts), do: Keyword.put(opts, opt, value) 551 | defp maybe_add_opt(opts, opt, value) when is_map(opts), do: Map.put(opts, opt, value) 552 | 553 | defp create_table_recursive( 554 | repo, 555 | table_name, 556 | key_schema, 557 | key_definitions, 558 | opts, 559 | wait_interval, 560 | time_waited 561 | ) do 562 | result = 563 | Dynamo.create_table( 564 | table_name, 565 | key_schema, 566 | key_definitions, 567 | opts 568 | ) 569 | |> ExAws.request(ex_aws_config(repo)) 570 | 571 | ecto_dynamo_log( 572 | :debug, 573 | "#{inspect(__MODULE__)}.create_table_recursive: DynamoDB/ExAws response", 574 | %{"#{inspect(__MODULE__)}.create_table_recursive-result" => inspect(result)} 575 | ) 576 | 577 | case result do 578 | {:ok, _} -> 579 | ecto_dynamo_log( 580 | :debug, 581 | "#{inspect(__MODULE__)}.create_table_recursive: table created successfully.", 582 | %{table_name: table_name} 583 | ) 584 | 585 | :ok 586 | 587 | {:error, {error, _message}} 588 | when error in [ 589 | "LimitExceededException", 590 | "ProvisionedThroughputExceededException", 591 | "ThrottlingException" 592 | ] -> 593 | to_wait = 594 | if time_waited == 0, 595 | do: wait_interval, 596 | else: round(:math.pow(wait_interval, wait_exponent(repo))) 597 | 598 | if time_waited + to_wait <= max_wait(repo) do 599 | ecto_dynamo_log( 600 | :debug, 601 | "#{inspect(__MODULE__)}.create_table_recursive: #{inspect(error)} ... waiting #{inspect(to_wait)} milliseconds (waited so far: #{inspect(time_waited)} ms)" 602 | ) 603 | 604 | :timer.sleep(to_wait) 605 | 606 | create_table_recursive( 607 | repo, 608 | table_name, 609 | key_schema, 610 | key_definitions, 611 | opts, 612 | to_wait, 613 | time_waited + to_wait 614 | ) 615 | else 616 | raise "#{inspect(error)} ... wait exceeding configured max wait time, stopping migration at create table #{inspect(table_name)}..." 617 | end 618 | 619 | {:error, error_tuple} -> 620 | ecto_dynamo_log( 621 | :debug, 622 | "#{inspect(__MODULE__)}.create_table_recursive: error attempting to create table. Stopping...", 623 | %{ 624 | "#{inspect(__MODULE__)}.create_table_recursive-error" => %{ 625 | table_name: table_name, 626 | error_tuple: error_tuple 627 | } 628 | } 629 | ) 630 | 631 | raise ExAws.Error, message: "ExAws Request Error! #{inspect(error_tuple)}" 632 | end 633 | end 634 | 635 | defp set_ttl(_repo, _table_name, nil), do: :ok 636 | 637 | defp set_ttl(repo, table_name, table_options) do 638 | if Keyword.has_key?(table_options, :ttl_attribute) do 639 | do_set_ttl(repo, table_name, table_options[:ttl_attribute]) 640 | end 641 | end 642 | 643 | defp do_set_ttl(repo, table_name, nil), do: do_set_ttl(repo, table_name, "ttl", false) 644 | 645 | defp do_set_ttl(repo, table_name, attribute, enabled? \\ true) do 646 | result = 647 | table_name 648 | |> Dynamo.update_time_to_live(attribute, enabled?) 649 | |> ExAws.request(ex_aws_config(repo)) 650 | 651 | case result do 652 | {:error, {"ValidationException", "TimeToLive is already disabled"}} when not enabled? -> :ok 653 | {:ok, _} -> :ok 654 | end 655 | end 656 | 657 | defp build_key_schema_and_definitions(table_name, field_clauses, options) do 658 | secondary_index_atoms = 659 | ((options[:global_indexes] || []) ++ (options[:local_indexes] || [])) 660 | |> Enum.flat_map(fn indexes -> indexes[:keys] || [] end) 661 | 662 | {hash_key, range_key, key_list} = 663 | Enum.reduce(field_clauses, {nil, nil, []}, fn {cmd, field, type, opts}, 664 | {hash, range, key_list} -> 665 | cond do 666 | cmd == :add and opts[:primary_key] == true -> 667 | {field, range, [{field, type} | key_list]} 668 | 669 | cmd == :add and opts[:range_key] == true -> 670 | {hash, field, [{field, type} | key_list]} 671 | 672 | cmd == :add and Enum.member?(secondary_index_atoms, field) -> 673 | {hash, range, [{field, type} | key_list]} 674 | 675 | true -> 676 | {hash, range, key_list} 677 | end 678 | end) 679 | 680 | if is_nil(hash_key), 681 | do: 682 | raise( 683 | "#{inspect(__MODULE__)}.build_key_schema error: no primary key was found for table #{inspect(table_name)}. Please specify one primary key in migration." 684 | ) 685 | 686 | key_definitions = for {field, type} <- key_list, do: {field, convert_type(type)} 687 | 688 | case range_key do 689 | nil -> 690 | {[{hash_key, :hash}], key_definitions} 691 | 692 | range_key -> 693 | {[{hash_key, :hash}, {range_key, :range}], key_definitions} 694 | end 695 | end 696 | 697 | defp build_secondary_indexes(nil), do: [] 698 | 699 | defp build_secondary_indexes(global_indexes) do 700 | Enum.map(global_indexes, fn index -> 701 | %{ 702 | index_name: index[:index_name], 703 | key_schema: build_secondary_key_schema(index[:keys]), 704 | projection: build_secondary_projection(index[:projection]) 705 | } 706 | |> maybe_add_throughput(index[:provisioned_throughput]) 707 | end) 708 | end 709 | 710 | defp build_secondary_key_schema(keys) do 711 | case keys do 712 | [hash] -> 713 | [%{attribute_name: Atom.to_string(hash), key_type: "HASH"}] 714 | 715 | [hash, range] -> 716 | [ 717 | %{attribute_name: Atom.to_string(hash), key_type: "HASH"}, 718 | %{attribute_name: Atom.to_string(range), key_type: "RANGE"} 719 | ] 720 | end 721 | end 722 | 723 | defp build_secondary_projection(nil), do: %{projection_type: "ALL"} 724 | 725 | defp build_secondary_projection(projection) do 726 | case projection[:projection_type] do 727 | :include -> 728 | %{projection_type: "INCLUDE", non_key_attributes: projection[:non_key_attributes]} 729 | 730 | type when type in [:all, :keys_only] -> 731 | %{projection_type: ExAws.Utils.upcase(type)} 732 | end 733 | end 734 | 735 | defp build_delete_and_update(field_clauses) do 736 | Enum.reduce(proper_list(field_clauses), {[], [], []}, fn field_clause, 737 | {delete, update, key_list} -> 738 | case field_clause do 739 | {:remove, field} -> 740 | {[%{delete: %{index_name: field}} | delete], update, key_list} 741 | 742 | {:modify, field, _type, opts} -> 743 | {delete, 744 | [ 745 | %{ 746 | update: %{index_name: field} |> maybe_add_throughput(opts[:provisioned_throughput]) 747 | } 748 | | update 749 | ], key_list} 750 | 751 | {:add, field, type, _opts} -> 752 | {delete, update, [{field, type} | key_list]} 753 | 754 | _ -> 755 | {delete, update, key_list} 756 | end 757 | end) 758 | end 759 | 760 | # Include provisioned_throughput only when it has been explicitly provided. 761 | defp maybe_add_throughput(index_map, nil), do: Map.merge(index_map, %{}) 762 | 763 | defp maybe_add_throughput(index_map, [read_capacity, write_capacity]), 764 | do: 765 | Map.merge(index_map, %{ 766 | provisioned_throughput: %{ 767 | read_capacity_units: read_capacity, 768 | write_capacity_units: write_capacity 769 | } 770 | }) 771 | 772 | defp convert_type(type) do 773 | case type do 774 | :bigint -> :number 775 | :serial -> :number 776 | :binary -> :blob 777 | :binary_id -> :blob 778 | _ -> type 779 | end 780 | end 781 | 782 | # Compare the list of existing global secondary indexes with the indexes flagged with 783 | # :create_if_not_exists and/or :drop_if_exists options and filter them accordingly - 784 | # skipping any that already exist or do not exist, respectively. 785 | defp make_safe_index_requests(data, repo, table) do 786 | existing_index_names = list_existing_global_secondary_index_names(repo, table.name) 787 | {create_if_not_exist_indexes, drop_if_exists_indexes} = get_existence_options(table.options) 788 | 789 | filter_fun = 790 | &assess_conditional_index_operations( 791 | &1, 792 | existing_index_names, 793 | create_if_not_exist_indexes, 794 | drop_if_exists_indexes 795 | ) 796 | 797 | filtered_global_secondary_index_updates = 798 | Enum.filter(data[:global_secondary_index_updates], filter_fun) 799 | 800 | # In the case of creating an index, the data will have an :attribute_definitions key, 801 | # which has additional info about the index being created. If that index has been removed 802 | # in this filtering process, remove its :attribute_definitions as well. 803 | # Note that this is not technically necessary and does not affect the behavior of the adapter. 804 | # If the index is missing from filtered_global_secondary_index_updates, unmatched data[:attribute_definitions] 805 | # will be overlooked in the call to Dynamo.update_table(). However, to avoid passing around unused data, 806 | # we have opted to filter the attribute_definitions to match the global_secondary_index_updates. 807 | filtered_attribute_definitions = 808 | case data[:attribute_definitions] do 809 | nil -> 810 | nil 811 | 812 | _ -> 813 | Enum.filter(data[:attribute_definitions], fn attribute_definition -> 814 | attribute_name = Atom.to_string(attribute_definition.attribute_name) 815 | 816 | if attribute_name not in create_if_not_exist_indexes, 817 | do: true, 818 | else: attribute_name not in existing_index_names 819 | end) 820 | end 821 | 822 | %{global_secondary_index_updates: filtered_global_secondary_index_updates} 823 | |> Map.merge( 824 | if is_nil(filtered_attribute_definitions), 825 | do: %{}, 826 | else: %{attribute_definitions: filtered_attribute_definitions} 827 | ) 828 | end 829 | 830 | # Check for the presence/absence of the option and assess its relationship to the list of existing indexes 831 | defp assess_conditional_index_operations( 832 | global_secondary_index_update, 833 | existing_index_names, 834 | create_if_not_exist_indexes, 835 | drop_if_exists_indexes 836 | ) do 837 | [{operation, index_info}] = Map.to_list(global_secondary_index_update) 838 | 839 | index_name = 840 | if Kernel.is_atom(index_info.index_name), 841 | do: Atom.to_string(index_info.index_name), 842 | else: index_info.index_name 843 | 844 | assess_index_operation( 845 | operation, 846 | index_name, 847 | index_name in create_if_not_exist_indexes, 848 | index_name in drop_if_exists_indexes, 849 | existing_index_names 850 | ) 851 | end 852 | 853 | # If an existence option has not been provided, or if the action is an update, return 'true' so 854 | # the index is included in the results of Enum.filter(). Otherwise, compare :create_if_not_exists 855 | # and :drop_if_exists with the list of existing indexes and decide how to proceed. 856 | defp assess_index_operation( 857 | :create, 858 | index_name, 859 | in_create_if_not_exist_indexes, 860 | _in_drop_if_exists_indexes, 861 | existing_index_names 862 | ) 863 | when in_create_if_not_exist_indexes do 864 | if index_name not in existing_index_names do 865 | true 866 | else 867 | ecto_dynamo_log( 868 | :debug, 869 | "#{inspect(__MODULE__)}.assess_index_operation: index already exists. Skipping create...", 870 | %{"#{inspect(__MODULE__)}.assess_index_operation_skip-create-index" => index_name} 871 | ) 872 | 873 | false 874 | end 875 | end 876 | 877 | defp assess_index_operation( 878 | :delete, 879 | index_name, 880 | _in_create_if_not_exist_indexes, 881 | in_drop_if_exists_indexes, 882 | existing_index_names 883 | ) 884 | when in_drop_if_exists_indexes do 885 | if index_name in existing_index_names do 886 | true 887 | else 888 | ecto_dynamo_log( 889 | :debug, 890 | "#{inspect(__MODULE__)}.assess_index_operation: index does not exist. Skipping drop...", 891 | %{"#{inspect(__MODULE__)}.assess_index_operation_skip-drop-index" => index_name} 892 | ) 893 | 894 | false 895 | end 896 | end 897 | 898 | defp assess_index_operation( 899 | _operation, 900 | _index_name, 901 | _in_create_if_not_exist_indexes, 902 | _in_drop_if_exists_indexes, 903 | _existing_index_names 904 | ), 905 | do: true 906 | 907 | defp list_existing_global_secondary_index_names(repo, table_name) do 908 | case poll_table(repo, table_name)["GlobalSecondaryIndexes"] do 909 | nil -> 910 | [] 911 | 912 | existing_indexes -> 913 | Enum.map(existing_indexes, fn existing_index -> existing_index["IndexName"] end) 914 | end 915 | end 916 | 917 | # Return a tuple with all of the indexes flagged with :create_if_not_exists or :drop_if_exists options 918 | defp get_existence_options(table_options) do 919 | case table_options do 920 | nil -> 921 | {[], []} 922 | 923 | _ -> 924 | global_index_options = Keyword.get(table_options, :global_indexes, []) 925 | 926 | {parse_existence_options(global_index_options, :create_if_not_exists), 927 | parse_existence_options(global_index_options, :drop_if_exists)} 928 | end 929 | end 930 | 931 | # Sort the existence options based on the option provided 932 | defp parse_existence_options(global_index_options, option) do 933 | for global_index_option <- global_index_options, 934 | Keyword.has_key?(global_index_option, option), 935 | do: global_index_option[:index_name] 936 | end 937 | 938 | defp proper_list(l), do: proper_list(l, []) 939 | defp proper_list([], res), do: Enum.reverse(res) 940 | defp proper_list([a | b], res) when not is_list(b), do: Enum.reverse([a | res]) 941 | defp proper_list([a | b], res), do: proper_list(b, [a | res]) 942 | 943 | defp extract_ecto_migration_type(str), 944 | do: str |> String.split(".") |> List.last() |> String.downcase() 945 | end 946 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/query.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Query do 2 | @moduledoc """ 3 | Some query wrapper functions for helping us query dynamo db. Selects indexes to use, etc. 4 | Not to be confused with `Ecto.Query`. 5 | """ 6 | 7 | import Ecto.Adapters.DynamoDB.Info 8 | 9 | alias Ecto.Adapters.DynamoDB 10 | alias Ecto.Adapters.DynamoDB.ConcurrentBatch 11 | alias Ecto.Adapters.DynamoDB.RecursiveFetch 12 | alias Ecto.Adapters.DynamoDB.RepoConfig 13 | alias Ecto.Repo 14 | 15 | @type table_name :: String.t() 16 | @type dynamo_response :: %{required(String.t()) => term} 17 | @typep key :: String.t() 18 | @typep query_op :: :== | :> | :< | :>= | :<= | :is_nil | :between | :begins_with | :in 19 | @typep boolean_op :: :and | :or 20 | @typep match_clause :: {term, query_op} 21 | @typep search_clause :: {key, match_clause} | {boolean_op, [search_clause]} 22 | @typep search :: [search_clause] 23 | @typep query_opts :: [{atom(), any()}] 24 | 25 | # DynamoDB will reject an entire batch get query if the query is for more than 100 records, so these need to be batched. 26 | # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchGetItem.html 27 | @batch_get_item_limit 100 28 | @logical_ops [:and, :or] 29 | 30 | # parameters for get_item: 31 | # TABLE_NAME::string, 32 | # [{LOGICAL_OP::atom, [{ATTRIBUTE::string, {VALUE::string, OPERATOR::atom}}]} | {ATTRIBUTE::string, {VALUE::string, OPERATOR::atom}}] 33 | # 34 | 35 | # Repo.all(model), provide cached results for tables designated in :cached_tables 36 | @spec get_item(Repo.t(), table_name, search, keyword) :: dynamo_response | no_return 37 | def get_item(repo, table, search, opts) when search == [] do 38 | maybe_scan(repo, table, search, opts) 39 | end 40 | 41 | # Regular queries 42 | def get_item(repo, table, search, opts) do 43 | parsed_index = 44 | case get_best_index!(repo, table, search, opts) do 45 | # Primary key without range 46 | {:primary, [_idx] = idxs} -> 47 | {:primary, idxs} 48 | 49 | {:primary, idxs} -> 50 | {_, op2} = deep_find_key(search, Enum.at(idxs, 1)) 51 | # Maybe query on composite primary 52 | if op2 in [:between, :begins_with, :<, :>, :<=, :>=] do 53 | {:primary_partial, idxs} 54 | else 55 | {:primary, idxs} 56 | end 57 | 58 | parsed -> 59 | parsed 60 | end 61 | 62 | results = 63 | case parsed_index do 64 | # primary key based lookup uses the efficient 'get_item' operation 65 | {:primary, indexes} = index -> 66 | {hash_values, op} = deep_find_key(search, hd(indexes)) 67 | 68 | if op == :in do 69 | responses_element = "Responses" 70 | unprocessed_keys_element = "UnprocessedKeys" 71 | # The default format of the response from Dynamo. 72 | response_map = %{responses_element => %{table => []}, unprocessed_keys_element => %{}} 73 | 74 | Enum.chunk_every(hash_values, @batch_get_item_limit) 75 | |> Enum.reduce(response_map, fn hash_batch, acc -> 76 | # Modify the 'search' arg so that it only contains values from the current hash_batch. 77 | batched_search = make_batched_search(search, hash_batch) 78 | 79 | %{ 80 | ^responses_element => %{^table => results}, 81 | ^unprocessed_keys_element => unprocessed_key_map 82 | } = 83 | ExAws.Dynamo.batch_get_item( 84 | construct_batch_get_item_query( 85 | table, 86 | indexes, 87 | hash_batch, 88 | batched_search, 89 | construct_opts(:get_item, opts) 90 | ) 91 | ) 92 | |> ExAws.request!(DynamoDB.ex_aws_config(repo)) 93 | 94 | Kernel.put_in( 95 | acc, 96 | [responses_element, table], 97 | acc[responses_element][table] ++ results 98 | ) 99 | |> maybe_put_unprocessed_keys(unprocessed_key_map, table, unprocessed_keys_element) 100 | end) 101 | else 102 | # https://hexdocs.pm/ex_aws/ExAws.Dynamo.html#get_item/3 103 | query = construct_search(index, search, opts) 104 | 105 | ExAws.Dynamo.get_item(table, query, construct_opts(:get_item, opts)) 106 | |> ExAws.request!(DynamoDB.ex_aws_config(repo)) 107 | end 108 | 109 | # secondary index based lookups need the query functionality. 110 | index when is_tuple(index) -> 111 | index_fields = get_hash_range_key_list(index) 112 | {hash_values, op} = deep_find_key(search, hd(index_fields)) 113 | 114 | # https://hexdocs.pm/ex_aws/ExAws.Dynamo.html#query/2 115 | 116 | query = construct_search(index, search, opts) 117 | 118 | if op == :in do 119 | ConcurrentBatch.fetch(repo, query, table, hash_values, opts) 120 | else 121 | RecursiveFetch.fetch_query(repo, query, table, opts) 122 | end 123 | 124 | :scan -> 125 | maybe_scan(repo, table, search, opts) 126 | end 127 | 128 | results 129 | end 130 | 131 | # In the case of a partial query on a composite key secondary index, the value of index in get_item/2 will be a three-element tuple, ex. {:secondary_partial, "person_id_entity", ["person_id"]}. 132 | # Otherwise, we can expect it to be a two-element tuple. 133 | defp get_hash_range_key_list({_index_type, index_name, index_fields}), 134 | do: get_hash_range_key_list({index_name, index_fields}) 135 | 136 | defp get_hash_range_key_list({_index_name, index_fields}), do: index_fields 137 | 138 | # If a batch_get_item request returns unprocessed keys, update the accumulator with those values. 139 | defp maybe_put_unprocessed_keys(acc, unprocessed_key_map, _table, _unprocessed_keys_element) 140 | when unprocessed_key_map == %{}, 141 | do: acc 142 | 143 | defp maybe_put_unprocessed_keys(acc, unprocessed_key_map, table, unprocessed_keys_element) do 144 | if Map.has_key?(acc[unprocessed_keys_element], table) do 145 | keys_element = "Keys" 146 | 147 | Kernel.put_in( 148 | acc, 149 | [unprocessed_keys_element, table, keys_element], 150 | acc[unprocessed_keys_element][table][keys_element] ++ 151 | unprocessed_key_map[table][keys_element] 152 | ) 153 | else 154 | Map.put(acc, unprocessed_keys_element, unprocessed_key_map) 155 | end 156 | end 157 | 158 | # The initial 'search' arg will have a list of all of the values being queried for; 159 | # when passing this data to construct_batch_get_item_query/5 during a batched operation, 160 | # use a modified form of the 'search' arg that contains only the values from the current batch. 161 | defp make_batched_search([and: [range_query, {hash_key, {_vals, op}}]], hash_batch), 162 | do: [{hash_key, {hash_batch, op}}, range_query] 163 | 164 | defp make_batched_search([{index, {_vals, op}}], hash_batch), do: [{index, {hash_batch, op}}] 165 | 166 | @doc """ 167 | Returns an atom, :scan or :query, specifying whether the current search will be a DynamoDB scan or a query. 168 | """ 169 | def scan_or_query?(repo, table, search) do 170 | if get_best_index!(repo, table, search) == :scan, do: :scan, else: :query 171 | end 172 | 173 | # we've a list of fields from an index that matches (some of) the search fields, so construct 174 | # a DynamoDB search criteria map with only the given fields and their search objects! 175 | @spec construct_search( 176 | {:primary | :primary_partial | nil | String.t(), [String.t()]}, 177 | search, 178 | keyword 179 | ) :: keyword 180 | @spec construct_search({:secondary_partial, String.t(), [String.t()]}, search, keyword) :: 181 | keyword 182 | def construct_search({:primary, index_fields}, search, opts), 183 | do: construct_search(%{}, index_fields, search, opts) 184 | 185 | def construct_search({:primary_partial, index_fields}, search, opts) do 186 | # do not provide index_name for primary partial 187 | construct_search({nil, index_fields}, search, opts) 188 | end 189 | 190 | def construct_search({index_name, index_fields}, search, opts) do 191 | # Construct a DynamoDB FilterExpression (since it cannot be provided blank but may be, 192 | # we merge it with the full query) 193 | {filter_expression_tuple, expression_attribute_names, expression_attribute_values} = 194 | construct_filter_expression(search, index_fields) 195 | 196 | updated_ops = construct_opts(:query, opts) 197 | 198 | # :primary_partial might not provide an index name 199 | criteria = if index_name != nil, do: [index_name: index_name], else: [] 200 | 201 | criteria ++ 202 | case index_fields do 203 | [hash, range] -> 204 | {hash_val, _op} = deep_find_key(search, hash) 205 | 206 | {range_expression, range_attribute_names, range_attribute_values} = 207 | construct_range_params(range, deep_find_key(search, range)) 208 | 209 | [ 210 | # We need ExpressionAttributeNames when field-names are reserved, for example "name" or "role" 211 | key_condition_expression: "##{hash} = :hash_key AND #{range_expression}", 212 | expression_attribute_names: 213 | Enum.reduce( 214 | [%{"##{hash}" => hash}, range_attribute_names, expression_attribute_names], 215 | &Map.merge/2 216 | ), 217 | expression_attribute_values: 218 | [hash_key: hash_val] ++ range_attribute_values ++ expression_attribute_values 219 | ] ++ filter_expression_tuple ++ updated_ops 220 | 221 | [hash] -> 222 | {hash_val, _op} = deep_find_key(search, hash) 223 | 224 | [ 225 | key_condition_expression: "##{hash} = :hash_key", 226 | expression_attribute_names: 227 | Map.merge(%{"##{hash}" => hash}, expression_attribute_names), 228 | expression_attribute_values: [hash_key: hash_val] ++ expression_attribute_values 229 | ] ++ filter_expression_tuple ++ updated_ops 230 | end 231 | end 232 | 233 | def construct_search({:secondary_partial, index_name, index_fields}, search, opts) do 234 | construct_search({index_name, index_fields}, search, opts) 235 | end 236 | 237 | defp construct_search(criteria, [], _, _), do: criteria 238 | 239 | defp construct_search(criteria, [index_field | index_fields], search, opts) do 240 | Map.put(criteria, index_field, elem(deep_find_key(search, index_field), 0)) 241 | |> construct_search(index_fields, search, opts) 242 | end 243 | 244 | @spec construct_range_params(key, match_clause) :: 245 | {String.t(), %{required(String.t()) => key}, keyword()} 246 | defp construct_range_params(range, {[range_start, range_end], :between}) do 247 | {"##{range} between :range_start and :range_end", %{"##{range}" => range}, 248 | [range_start: range_start, range_end: range_end]} 249 | end 250 | 251 | defp construct_range_params(range, {prefix, :begins_with}) do 252 | {"begins_with(##{range}, :prefix)", %{"##{range}" => range}, [prefix: prefix]} 253 | end 254 | 255 | defp construct_range_params(range, {range_val, :==}) do 256 | {"##{range} = :range_key", %{"##{range}" => range}, [range_key: range_val]} 257 | end 258 | 259 | defp construct_range_params(range, {range_val, op}) when op in [:<, :>, :<=, :>=] do 260 | {"##{range} #{to_string(op)} :range_key", %{"##{range}" => range}, [range_key: range_val]} 261 | end 262 | 263 | @spec construct_opts(atom, keyword) :: keyword 264 | defp construct_opts(query_type, opts) do 265 | take_opts = 266 | case query_type do 267 | :get_item -> [:consistent_read] 268 | :query -> [:exclusive_start_key, :limit, :scan_index_forward, :consistent_read] 269 | end 270 | 271 | case opts[:projection_expression] do 272 | nil -> [select: opts[:select] || :all_attributes] 273 | _ -> Keyword.take(opts, [:projection_expression]) 274 | end ++ Keyword.take(opts, take_opts) 275 | end 276 | 277 | # returns a tuple: {filter_expression_tuple, expression_attribute_names, expression_attribute_values} 278 | @spec construct_filter_expression(search, [String.t()]) :: 279 | {[filter_expression: String.t()], map, keyword} 280 | defp construct_filter_expression(search, index_fields) do 281 | # We can only construct a FilterExpression on attributes not in key-conditions. 282 | non_indexed_filters = collect_non_indexed_search(search, index_fields, []) 283 | 284 | case non_indexed_filters do 285 | [] -> 286 | {[], %{}, []} 287 | 288 | _ -> 289 | {filter_expression_list, expression_attribute_names, expression_attribute_values} = 290 | build_filter_expression_data(non_indexed_filters, {[], %{}, %{}}) 291 | 292 | {[filter_expression: Enum.join(filter_expression_list, " and ")], 293 | expression_attribute_names, Enum.into(expression_attribute_values, [])} 294 | end 295 | end 296 | 297 | # Recursively strip out the fields for key-conditions; they could be mixed with non key-conditions. 298 | # TODO: this may be redundant - the indexed fields can just be skipped during the expression construction 299 | @spec collect_non_indexed_search(search, [String.t()], search) :: search 300 | defp collect_non_indexed_search([], _index_fields, acc), do: acc 301 | 302 | defp collect_non_indexed_search([search_clause | search_clauses], index_fields, acc) do 303 | case search_clause do 304 | {field, {_val, _op}} = complete_tuple when field not in @logical_ops -> 305 | if Enum.member?(index_fields, field), 306 | do: collect_non_indexed_search(search_clauses, index_fields, acc), 307 | else: collect_non_indexed_search(search_clauses, index_fields, [complete_tuple | acc]) 308 | 309 | {logical_op, deeper_clauses} when logical_op in @logical_ops -> 310 | filtered_clauses = collect_non_indexed_search(deeper_clauses, index_fields, []) 311 | # don't keep empty logical_op groups 312 | if filtered_clauses == [], 313 | do: collect_non_indexed_search(search_clauses, index_fields, acc), 314 | else: 315 | collect_non_indexed_search(search_clauses, index_fields, [ 316 | {logical_op, filtered_clauses} | acc 317 | ]) 318 | end 319 | end 320 | 321 | # Recursively reconstruct parentheticals 322 | @type expression_data_acc :: {[String.t()], map, map} 323 | @spec build_filter_expression_data(search, expression_data_acc) :: expression_data_acc 324 | defp build_filter_expression_data([], acc), do: acc 325 | 326 | defp build_filter_expression_data([expr | exprs], {filter_exprs, attr_names, attr_values}) do 327 | case expr do 328 | # a list of lookup fields; iterate. 329 | {field, {val, op} = val_op_tuple} = complete_tuple when is_tuple(val_op_tuple) -> 330 | updated_filter_exprs = [construct_conditional_statement(complete_tuple) | filter_exprs] 331 | updated_attr_names = Map.merge(%{"##{field}" => field}, attr_names) 332 | 333 | updated_attr_values = 334 | Map.merge(format_expression_attribute_value(field, val, op), attr_values) 335 | 336 | build_filter_expression_data( 337 | exprs, 338 | {updated_filter_exprs, updated_attr_names, updated_attr_values} 339 | ) 340 | 341 | {logical_op, exprs_list} when is_list(exprs_list) -> 342 | {deeper_filter_exprs, deeper_attr_names, deeper_attr_values} = 343 | build_filter_expression_data(exprs_list, {[], %{}, %{}}) 344 | 345 | # We don't parenthesize only one expression 346 | updated_filter_exprs = 347 | case deeper_filter_exprs do 348 | [one_expression] -> 349 | [one_expression | filter_exprs] 350 | 351 | many_expressions -> 352 | [ 353 | "(" <> Enum.join(many_expressions, " #{to_string(logical_op)} ") <> ")" 354 | | filter_exprs 355 | ] 356 | end 357 | 358 | updated_attr_names = Map.merge(deeper_attr_names, attr_names) 359 | updated_attr_values = Map.merge(deeper_attr_values, attr_values) 360 | 361 | build_filter_expression_data( 362 | exprs, 363 | {updated_filter_exprs, updated_attr_names, updated_attr_values} 364 | ) 365 | end 366 | end 367 | 368 | @spec format_expression_attribute_value(key, term, query_op | [query_op]) :: map 369 | defp format_expression_attribute_value(field, _val, :is_nil), 370 | do: %{String.to_atom(field <> "_val") => nil} 371 | 372 | defp format_expression_attribute_value(field, val, :in) do 373 | {result, _count} = 374 | Enum.reduce(val, {%{}, 1}, fn v, {acc, count} -> 375 | {Map.merge(acc, %{String.to_atom(field <> "_val#{to_string(count)}") => v}), count + 1} 376 | end) 377 | 378 | result 379 | end 380 | 381 | # double op 382 | defp format_expression_attribute_value(field, [val1, val2], [_op1, _op2]) do 383 | %{String.to_atom(field <> "_val1") => val1, String.to_atom(field <> "_val2") => val2} 384 | end 385 | 386 | defp format_expression_attribute_value(field, [start_val, end_val], :between) do 387 | %{ 388 | String.to_atom(field <> "_start_val") => start_val, 389 | String.to_atom(field <> "_end_val") => end_val 390 | } 391 | end 392 | 393 | defp format_expression_attribute_value(field, val, _op), 394 | do: %{String.to_atom(field <> "_val") => val} 395 | 396 | # double op (neither of them ought be :==) 397 | @spec construct_conditional_statement({key, {term, query_op} | {[term], [query_op]}}) :: 398 | String.t() 399 | defp construct_conditional_statement({field, {[_val1, _val2], [op1, op2]}}) do 400 | "##{field} #{to_string(op1)} :#{field <> "_val1"} and ##{field} #{to_string(op2)} :#{field <> "_val2"}" 401 | end 402 | 403 | defp construct_conditional_statement({field, {_val, :is_nil}}) do 404 | "(##{field} = :#{field <> "_val"} or attribute_not_exists(##{field}))" 405 | end 406 | 407 | defp construct_conditional_statement({field, {val, :in}}) do 408 | {result, _count} = 409 | Enum.reduce(val, {[], 1}, fn _val, {acc, count} -> 410 | {[":#{field}_val#{to_string(count)}" | acc], count + 1} 411 | end) 412 | 413 | "(##{field} in (#{Enum.join(result, ",")}))" 414 | end 415 | 416 | defp construct_conditional_statement({field, {_val, :==}}) do 417 | "##{field} = :#{field <> "_val"}" 418 | end 419 | 420 | defp construct_conditional_statement({field, {_val, op}}) when op in [:<, :>, :<=, :>=] do 421 | "##{field} #{to_string(op)} :#{field <> "_val"}" 422 | end 423 | 424 | defp construct_conditional_statement({field, {[_start_val, _end_val], :between}}) do 425 | "##{field} between :#{field <> "_start_val"} and :#{field <> "_end_val"}" 426 | end 427 | 428 | defp construct_conditional_statement({field, {_val, :begins_with}}) do 429 | "begins_with(##{field}, :#{field}_val)" 430 | end 431 | 432 | defp construct_batch_get_item_query(table, indexes, hash_values, search, opts) do 433 | take_opts = Keyword.take(opts, [:consistent_read, :projection_expression]) 434 | 435 | keys = 436 | case indexes do 437 | [hash_key] -> 438 | Enum.map(hash_values, fn hash_value -> [{String.to_atom(hash_key), hash_value}] end) 439 | 440 | [hash_key, range_key] -> 441 | {range_values, :in} = deep_find_key(search, range_key) 442 | zipped = Enum.zip(hash_values, range_values) 443 | 444 | Enum.map(zipped, fn {hash_value, range_value} -> 445 | [{String.to_atom(hash_key), hash_value}, {String.to_atom(range_key), range_value}] 446 | end) 447 | end 448 | 449 | %{table => [keys: keys] ++ take_opts} 450 | end 451 | 452 | @doc """ 453 | Given a map with a search criteria, finds the best index to search against it. 454 | Returns a tuple indicating whether it's a primary key index, or a secondary index. 455 | To query against a secondary index in Dynamo, we NEED to have it's index name, 456 | so secondary indexes are returned as a tuple with the field name, whilst 457 | the primary key uses the atom :primary to distinguish it. 458 | 459 | {:primary, [indexed_fields_list]} | {"index_name", [indexed_fields_list]} 460 | 461 | Exception if the index doesn't exist. 462 | """ 463 | @spec get_best_index(Repo.t(), table_name, search, query_opts) :: 464 | :not_found 465 | | {:primary, [String.t()]} 466 | | {:primary_partial, [String.t()]} 467 | | {String.t(), [String.t()]} 468 | | {:secondary_partial, String.t(), [String.t()]} 469 | | no_return 470 | def get_best_index(repo, tablename, search, opts) do 471 | case get_matching_primary_index(repo, tablename, search) do 472 | # if we found a primary index with hash+range match, it's probably the best index. 473 | {:primary, _} = index -> 474 | index 475 | 476 | # we've found a primary hash index, but lets check if there's a more specific 477 | # secondary index with hash+sort available... 478 | {:primary_partial, _primary_hash} = index -> 479 | case get_matching_secondary_index(repo, tablename, search, opts) do 480 | # we've found a better, more specific index. 481 | {_, [_, _]} = sec_index -> sec_index 482 | # :not_found, or any other hash? default back to the primary. 483 | _ -> index 484 | end 485 | 486 | # no primary found, so try for a secondary. 487 | :not_found -> 488 | get_matching_secondary_index(repo, tablename, search, opts) 489 | end 490 | end 491 | 492 | @doc """ 493 | Same as get_best_index, but refers to a scan option on failure 494 | """ 495 | def get_best_index!(repo, tablename, search, opts \\ []) do 496 | case get_best_index(repo, tablename, search, opts) do 497 | :not_found -> :scan 498 | index -> index 499 | end 500 | end 501 | 502 | @doc """ 503 | Given a search criteria of 1 or more fields, we try find out if the primary key is a 504 | good match and can be used to forfill this search. Returns the tuple 505 | {:primary, [hash] | [hash, range]} 506 | or 507 | :not_found 508 | """ 509 | def get_matching_primary_index(repo, tablename, search) do 510 | primary_key = primary_key!(repo, tablename) 511 | 512 | case match_index(primary_key, search) do 513 | # We found a full primary index 514 | {:primary, _} = index -> index 515 | # We might be able to use a range query for all results with 516 | # the hash part, such as all circle_member with a specific person_id (all a user's circles). 517 | :not_found -> match_index_hash_part(primary_key, search) 518 | end 519 | end 520 | 521 | @doc """ 522 | Given a keyword list containing search field, value and operator to search for (which may also be nested under logical operators, e.g., `[{"id", {"franko", :==}}]`, or `[{:and, [{"circle_id", {"123", :==}}, {"person_id", {"abc", :>}}]}`), will return the dynamo db index description that will help us match this search. return :not_found if no index is found. 523 | 524 | Returns a tuple of {"index_name", [ hash_key or hash,range_key]]} or :not_found 525 | TODO: Does not help with range queries. -> The match_index_hash_part function is 526 | beginning to address this. 527 | """ 528 | def get_matching_secondary_index(repo, tablename, search, opts) do 529 | secondary_indexes = secondary_indexes(repo, tablename) 530 | 531 | # A user may provide an :index opt in a query, in which case we will prioritize choosing that index. 532 | case opts[:index] do 533 | nil -> 534 | find_best_match(secondary_indexes, search, :not_found) 535 | 536 | index_option -> 537 | case maybe_select_index_option(index_option, secondary_indexes) do 538 | nil -> index_option_error(index_option, secondary_indexes) 539 | index -> index 540 | end 541 | end 542 | end 543 | 544 | defp maybe_select_index_option(index_option, secondary_indexes) 545 | when is_atom(index_option) do 546 | index_option 547 | |> Atom.to_string() 548 | |> maybe_select_index_option(secondary_indexes) 549 | end 550 | 551 | defp maybe_select_index_option(index_option, secondary_indexes), 552 | do: Enum.find(secondary_indexes, fn {name, _keys} -> name == index_option end) 553 | 554 | @spec index_option_error(String.t() | atom(), [{String.t(), [String.t()]}]) :: no_return 555 | defp index_option_error(_index_option, []) do 556 | raise ArgumentError, 557 | message: 558 | "#{inspect(__MODULE__)}.get_matching_secondary_index/4 error: :index option does not match existing secondary index names." 559 | end 560 | 561 | defp index_option_error(index_option, secondary_indexes) do 562 | index_option = if is_atom(index_option), do: Atom.to_string(index_option), else: index_option 563 | 564 | {nearest_index_name, jaro_distance} = 565 | secondary_indexes 566 | |> Enum.map(fn {name, _keys} -> {name, String.jaro_distance(index_option, name)} end) 567 | |> Enum.max_by(fn {_name, jaro_distance} -> jaro_distance end) 568 | 569 | case jaro_distance >= 0.75 do 570 | true -> 571 | raise ArgumentError, 572 | message: 573 | "#{inspect(__MODULE__)}.get_matching_secondary_index/4 error: :index option does not match existing secondary index names. Did you mean #{nearest_index_name}?" 574 | 575 | false -> 576 | index_option_error(index_option, []) 577 | end 578 | end 579 | 580 | defp find_best_match([], _search, best), do: best 581 | 582 | defp find_best_match([index | indexes], search, best) do 583 | case match_index(index, search) do 584 | # Matching on both hash + sort makes this the best index (as well as we can tell) 585 | {_, [_, _]} -> 586 | index 587 | 588 | {_, [hash]} -> 589 | case search do 590 | # If we're only querying on a single field and we find a matching hash-only index, that's the index to use, no need to check others. 591 | [{field, {_, _}}] -> 592 | if field == hash, do: index, else: find_best_match(indexes, search, best) 593 | 594 | # we have a candidate for best match, though it's a hash key only. Look for better. 595 | _ -> 596 | find_best_match(indexes, search, index) 597 | end 598 | 599 | :not_found -> 600 | case match_index_hash_part(index, search) do 601 | # haven't found anything good, keep looking, retain our previous best match. 602 | :not_found -> 603 | find_best_match(indexes, search, best) 604 | 605 | index_partial -> 606 | # If the current best is a hash-only index (formatted like {"idx_name", ["hash"]}), always choose it over a partial secondary. 607 | # Note that this default behavior would cause a hash-only key to be selected over a partial with a different hash 608 | # in cases where a query might be ambiguous - for example, a query on a user's first_name and last_name where 609 | # a hash-only index exists on first_name and a composite index exists on last_name_something_else. 610 | case best do 611 | {_, [_]} -> find_best_match(indexes, search, best) 612 | _ -> find_best_match(indexes, search, index_partial) 613 | end 614 | end 615 | end 616 | end 617 | 618 | # The parameter, 'search', is a map: %{field_name::string => {value::string, operator::atom}} 619 | defp match_index(index, search) do 620 | case index do 621 | {_, [hash, range]} -> 622 | # Part of the query could be a nil filter on an indexed attribute; 623 | # in that case, we need a check in addition to the key, so we check the operator. 624 | # Also, the hash part can only accept an :== operator. 625 | hash_key = deep_find_key(search, hash) 626 | range_key = deep_find_key(search, range) 627 | 628 | if hash_key != nil and elem(hash_key, 1) in [:==, :in] and 629 | range_key != nil and elem(range_key, 1) != :is_nil, 630 | do: index, 631 | else: :not_found 632 | 633 | {_, [hash]} -> 634 | hash_key = deep_find_key(search, hash) 635 | if hash_key != nil and elem(hash_key, 1) in [:==, :in], do: index, else: :not_found 636 | end 637 | end 638 | 639 | @spec match_index_hash_part({:primary, [key]}, search) :: {:primary_partial, [key]} | :not_found 640 | @spec match_index_hash_part({String.t(), [key]}, search) :: 641 | {:secondary_partial, String.t(), [key]} | :not_found 642 | defp match_index_hash_part(index, search) do 643 | case index do 644 | {:primary, [hash, _range]} -> 645 | hash_key = deep_find_key(search, hash) 646 | 647 | if hash_key != nil and elem(hash_key, 1) in [:==, :in], 648 | do: {:primary_partial, [hash]}, 649 | else: :not_found 650 | 651 | {index_name, [hash, _range]} -> 652 | hash_key = deep_find_key(search, hash) 653 | 654 | if hash_key != nil and elem(hash_key, 1) in [:==, :in], 655 | do: {:secondary_partial, index_name, [hash]}, 656 | else: :not_found 657 | 658 | _ -> 659 | :not_found 660 | end 661 | end 662 | 663 | # TODO: multiple use of deep_find_key could be avoided by using the recursion in the main module to provide a set of indexed attributes in addition to the nested logical clauses. 664 | @spec deep_find_key(search, key) :: nil | {term, query_op} 665 | defp deep_find_key([], _), do: nil 666 | 667 | defp deep_find_key([clause | clauses], key) do 668 | case clause do 669 | {field, {val, op}} when field not in @logical_ops -> 670 | if field == key, do: {val, op}, else: deep_find_key(clauses, key) 671 | 672 | {logical_op, deeper_clauses} when logical_op in @logical_ops -> 673 | found = deep_find_key(deeper_clauses, key) 674 | if found != nil, do: found, else: deep_find_key(clauses, key) 675 | end 676 | end 677 | 678 | @doc """ 679 | Formats the recursive option according to whether the query is a DynamoDB scan or query. (The adapter defaults to recursive fetch in case of the latter but not the former) 680 | """ 681 | def parse_recursive_option(scan_or_query, opts) do 682 | case opts[:page_limit] do 683 | page_limit when is_integer(page_limit) and page_limit > 0 -> 684 | page_limit 685 | 686 | page_limit when is_integer(page_limit) and page_limit < 1 -> 687 | raise ArgumentError, 688 | message: 689 | "#{inspect(__MODULE__)}.parse_recursive_option/2 error: :page_limit option must be greater than 0." 690 | 691 | _ when scan_or_query == :scan -> 692 | # scan defaults to no recursion, opts[:recursive] must equal true to enable it 693 | opts[:recursive] == true 694 | 695 | _ when scan_or_query == :query -> 696 | # query defaults to recursion, opts[:recursive] must equal false to disable it 697 | opts[:recursive] != false 698 | end 699 | end 700 | 701 | # scan 702 | defp maybe_scan(repo, table, [], opts) do 703 | scan_enabled = 704 | opts[:scan] == true || RepoConfig.config_val(repo, :scan_all) == true || 705 | scannable_table?(repo, table) 706 | 707 | cond do 708 | # TODO: we could use the cached scan and apply the search filters 709 | # ourselves when they are provided. 710 | cached_table?(repo, table) and opts[:scan] != true -> 711 | Ecto.Adapters.DynamoDB.Cache.scan!(repo, table) 712 | 713 | scan_enabled -> 714 | limit_option = opts[:limit] || RepoConfig.config_val(repo, :scan_limit) 715 | scan_limit = if is_integer(limit_option), do: [limit: limit_option], else: [] 716 | updated_opts = Keyword.drop(opts, [:recursive, :limit, :scan]) ++ scan_limit 717 | 718 | RecursiveFetch.fetch( 719 | repo, 720 | &ExAws.Dynamo.scan/2, 721 | table, 722 | updated_opts, 723 | parse_recursive_option(:scan, opts), 724 | %{} 725 | ) 726 | 727 | true -> 728 | maybe_scan_error(table) 729 | end 730 | end 731 | 732 | defp maybe_scan(repo, table, search, opts) do 733 | scan_enabled = 734 | opts[:scan] == true || RepoConfig.config_val(repo, :scan_all) == true || 735 | scannable_table?(repo, table) 736 | 737 | limit_option = opts[:limit] || RepoConfig.config_val(repo, :scan_limit) 738 | scan_limit = if is_integer(limit_option), do: [limit: limit_option], else: [] 739 | updated_opts = Keyword.drop(opts, [:recursive, :limit, :scan]) ++ scan_limit 740 | 741 | if scan_enabled do 742 | {filter_expression_tuple, expression_attribute_names, expression_attribute_values} = 743 | construct_filter_expression(search, []) 744 | 745 | expressions = 746 | [ 747 | expression_attribute_names: expression_attribute_names, 748 | expression_attribute_values: expression_attribute_values 749 | ] ++ updated_opts ++ filter_expression_tuple 750 | 751 | RecursiveFetch.fetch( 752 | repo, 753 | &ExAws.Dynamo.scan/2, 754 | table, 755 | expressions, 756 | parse_recursive_option(:scan, opts), 757 | %{} 758 | ) 759 | else 760 | maybe_scan_error(table) 761 | end 762 | end 763 | 764 | @spec maybe_scan_error(table_name) :: no_return 765 | defp maybe_scan_error(table) do 766 | raise ArgumentError, 767 | message: 768 | "#{inspect(__MODULE__)}.maybe_scan/3 error: :scan option or configuration have not been specified, and could not confirm the table, #{inspect(table)}, as listed for scan or caching in the application's configuration. Please see README file for details." 769 | end 770 | 771 | defp scannable_table?(repo, table), do: RepoConfig.table_in_list?(repo, table, :scan_tables) 772 | defp cached_table?(repo, table), do: RepoConfig.table_in_list?(repo, table, :cached_tables) 773 | end 774 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/query_info.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.QueryInfo do 2 | @moduledoc """ 3 | An Elixir agent to optionally record DynamoDB query information (like LastEvaluatedKey) that's not part of expected Ecto return values. 4 | """ 5 | 6 | def child_spec(_) do 7 | %{ 8 | id: __MODULE__, 9 | start: {__MODULE__, :start_link, []} 10 | } 11 | end 12 | 13 | def start_link, do: Agent.start_link(fn -> %{} end, name: __MODULE__) 14 | 15 | @doc """ 16 | Provides a random 32 character, base 64 encoded string. 17 | """ 18 | def get_key, do: :crypto.strong_rand_bytes(32) |> Base.url_encode64() 19 | 20 | @doc """ 21 | Updates the value of a given key in the Agent map. 22 | """ 23 | def put(key, val), do: Agent.update(__MODULE__, fn map -> Map.put(map, key, val) end) 24 | 25 | @doc """ 26 | Updates the value of a given key in the Agent map according to a specific function. 27 | """ 28 | def update(key, initial, fun), 29 | do: Agent.update(__MODULE__, fn map -> Map.update(map, key, initial, fun) end) 30 | 31 | @doc """ 32 | Returns the value (query info) in the QueryInfo agent associated with the provided key. 33 | """ 34 | def get(key), 35 | do: Agent.get_and_update(__MODULE__, fn map -> {map[key], Map.delete(map, key)} end) 36 | 37 | @doc """ 38 | Returns the complete current map recorded by the agent. 39 | """ 40 | def get_map, do: Agent.get(__MODULE__, fn map -> map end) 41 | end 42 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/recursive_fetch.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.RecursiveFetch do 2 | alias Ecto.Adapters.DynamoDB 3 | alias Ecto.Adapters.DynamoDB.Query 4 | 5 | @typep fetch_func :: (Query.table_name(), keyword -> ExAws.Operation.JSON.t()) 6 | 7 | def fetch_query(repo, query, table, opts) do 8 | fetch( 9 | repo, 10 | &ExAws.Dynamo.query/2, 11 | table, 12 | query, 13 | Query.parse_recursive_option(:query, opts), 14 | %{} 15 | ) 16 | end 17 | 18 | @spec fetch(Repo.t(), fetch_func, Query.table_name(), keyword, boolean | number, map) :: 19 | Query.dynamo_response() 20 | def fetch(repo, func, table, expressions, recursive, result) do 21 | updated_expressions = 22 | if recursive == true, do: Keyword.delete(expressions, :limit), else: expressions 23 | 24 | fetch_result = 25 | func.(table, updated_expressions) |> ExAws.request!(DynamoDB.ex_aws_config(repo)) 26 | 27 | # recursive can be a boolean or a page limit 28 | updated_recursive = update_recursive_option(recursive) 29 | 30 | if fetch_result["LastEvaluatedKey"] != nil and updated_recursive.continue do 31 | fetch( 32 | repo, 33 | func, 34 | table, 35 | updated_expressions ++ [exclusive_start_key: fetch_result["LastEvaluatedKey"]], 36 | updated_recursive.new_value, 37 | combine_results(result, fetch_result) 38 | ) 39 | else 40 | combine_results(result, fetch_result) 41 | end 42 | end 43 | 44 | @doc """ 45 | Updates the recursive option during a recursive fetch, according to whether the option is a 46 | boolean or an integer (as in the case of page_limit) 47 | """ 48 | def update_recursive_option(r) when is_boolean(r), do: %{continue: r, new_value: r} 49 | def update_recursive_option(r) when is_integer(r), do: %{continue: r > 1, new_value: r - 1} 50 | 51 | @spec combine_results(map, map) :: map 52 | defp combine_results(result, scan_result) do 53 | if result == %{} do 54 | scan_result 55 | else 56 | %{"Count" => result_count, "Items" => result_items, "ScannedCount" => result_scanned_count} = 57 | result 58 | 59 | %{ 60 | "Count" => scanned_count, 61 | "Items" => scanned_items, 62 | "ScannedCount" => scanned_scanned_count 63 | } = scan_result 64 | 65 | %{ 66 | "Count" => result_count + scanned_count, 67 | "Items" => result_items ++ scanned_items, 68 | "ScannedCount" => result_scanned_count + scanned_scanned_count 69 | } 70 | end 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /lib/ecto_adapters_dynamodb/repo_config.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.RepoConfig do 2 | alias Confex.Resolver 3 | 4 | def table_in_list?(repo, table, list) do 5 | repo.config() 6 | |> Resolver.resolve!() 7 | |> Keyword.get(list, []) 8 | |> Enum.member?(table) 9 | end 10 | 11 | def config_val(repo, key, default \\ nil) do 12 | repo.config() 13 | |> Resolver.resolve!() 14 | |> Keyword.get(key, default) 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Mixfile do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :ecto_adapters_dynamodb, 7 | version: "3.5.0", 8 | elixir: "~> 1.13", 9 | build_embedded: Mix.env() == :prod, 10 | start_permanent: Mix.env() == :prod, 11 | deps: deps(), 12 | test_coverage: [tool: ExCoveralls], 13 | dialyzer: [ 14 | ignore_warnings: "dialyzer_ignore.exs", 15 | plt_add_apps: [:ecto] 16 | ], 17 | docs: [main: "readme", extras: ["README.md"]], 18 | description: 19 | "A DynamoDB adapter for Ecto supporting basic queries. See https://github.com/circles-learning-labs/ecto_adapters_dynamodb for detailed instructions.", 20 | package: package(), 21 | source_url: "https://github.com/circles-learning-labs/ecto_adapters_dynamodb", 22 | preferred_cli_env: [ 23 | coveralls: :test, 24 | "coveralls.detail": :test, 25 | "coveralls.post": :test, 26 | "coveralls.html": :test 27 | ] 28 | ] 29 | end 30 | 31 | # Configuration for the OTP application 32 | # 33 | # Type "mix help compile.app" for more information 34 | def application do 35 | # Specify extra applications you'll use from Erlang/Elixir 36 | [ 37 | extra_applications: [:logger], 38 | mod: {Ecto.Adapters.DynamoDB.Application, []}, 39 | env: [ 40 | cached_tables: [], 41 | insert_nil_fields: true, 42 | dynamodb_local: false, 43 | log_levels: [:info], 44 | log_colours: %{info: :green, debug: :normal}, 45 | log_in_colour: System.get_env("MIX_ENV") == "dev", 46 | log_path: "", 47 | remove_nil_fields_on_update: false, 48 | scan_all: false, 49 | scan_limit: 100, 50 | scan_tables: [] 51 | ] 52 | ] 53 | end 54 | 55 | # Dependencies can be Hex packages: 56 | # 57 | # {:my_dep, "~> 0.3.0"} 58 | # 59 | # Or git/path repositories: 60 | # 61 | # {:my_dep, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"} 62 | # 63 | # Type "mix help deps" for more examples and options 64 | defp deps do 65 | [ 66 | {:confex, "~> 3.5.0"}, 67 | {:ecto_sql, "~> 3.11"}, 68 | {:ex_aws_dynamo, "~> 4.1"}, 69 | {:excoveralls, "~> 0.18", only: :test}, 70 | {:jason, "~> 1.0"}, 71 | {:hackney, "~> 1.6"}, 72 | {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false}, 73 | {:ex_doc, "~> 0.35.1", only: :dev, runtime: false}, 74 | {:mock, "~> 0.3.0", only: :test, runtime: false} 75 | ] 76 | end 77 | 78 | defp package do 79 | [ 80 | # files: ["lib", "priv", "mix.exs", "README*", "readme*", "LICENSE*", "license*"], 81 | maintainers: ["Franko Franicevich", "Darren Klein", "Gilad Barkan", "Nick Marino"], 82 | licenses: ["Apache 2.0"], 83 | exclude_patterns: [~r/.*~/, ~r/src\/.*\.erl/], 84 | links: %{"GitHub" => "https://github.com/circles-learning-labs/ecto_adapters_dynamodb"} 85 | ] 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "certifi": {:hex, :certifi, "2.12.0", "2d1cca2ec95f59643862af91f001478c9863c2ac9cb6e2f89780bfd8de987329", [:rebar3], [], "hexpm", "ee68d85df22e554040cdb4be100f33873ac6051387baf6a8f6ce82272340ff1c"}, 3 | "confex": {:hex, :confex, "3.5.1", "c29ea741715de06afe6294ea2e127ee7a54382f2b33656b9ed7eedf3e7b6fcaf", [:mix], [], "hexpm", "c72824a267edc96c09a35f6556f48c139d00393dd1b726b18faeb8efee62c0b0"}, 4 | "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, 5 | "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, 6 | "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"}, 7 | "earmark_parser": {:hex, :earmark_parser, "1.4.41", "ab34711c9dc6212dda44fcd20ecb87ac3f3fce6f0ca2f28d4a00e4154f8cd599", [:mix], [], "hexpm", "a81a04c7e34b6617c2792e291b5a2e57ab316365c2644ddc553bb9ed863ebefa"}, 8 | "ecto": {:hex, :ecto, "3.11.2", "e1d26be989db350a633667c5cda9c3d115ae779b66da567c68c80cfb26a8c9ee", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3c38bca2c6f8d8023f2145326cc8a80100c3ffe4dcbd9842ff867f7fc6156c65"}, 9 | "ecto_sql": {:hex, :ecto_sql, "3.11.1", "e9abf28ae27ef3916b43545f9578b4750956ccea444853606472089e7d169470", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.11.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 0.17.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ce14063ab3514424276e7e360108ad6c2308f6d88164a076aac8a387e1fea634"}, 10 | "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, 11 | "ex_aws": {:hex, :ex_aws, "2.5.5", "5dc378eff99c3c46c917b7a96a75ad0d4c300ab7250df668d0819bcd18c0213d", [:mix], [{:configparser_ex, "~> 4.0", [hex: :configparser_ex, repo: "hexpm", optional: true]}, {:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:jsx, "~> 2.8 or ~> 3.0", [hex: :jsx, repo: "hexpm", optional: true]}, {:mime, "~> 1.2 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:req, "~> 0.3", [hex: :req, repo: "hexpm", optional: true]}, {:sweet_xml, "~> 0.7", [hex: :sweet_xml, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ed7ee39c56012c56600e021953c6487ecce9c49320ec3b4655a15d785f221ca6"}, 12 | "ex_aws_dynamo": {:hex, :ex_aws_dynamo, "4.2.2", "7f7975b14f9999749b1dfb5bfff87fd80367dffcc2fe2dfea5a540ac216f5fe3", [:mix], [{:ex_aws, ">= 2.4.0", [hex: :ex_aws, repo: "hexpm", optional: false]}], "hexpm", "e61ee3e6b9e25794592059cd81356ebfc57676d9ff82755316925bf7feca672e"}, 13 | "ex_doc": {:hex, :ex_doc, "0.35.1", "de804c590d3df2d9d5b8aec77d758b00c814b356119b3d4455e4b8a8687aecaf", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "2121c6402c8d44b05622677b761371a759143b958c6c19f6558ff64d0aed40df"}, 14 | "excoveralls": {:hex, :excoveralls, "0.18.3", "bca47a24d69a3179951f51f1db6d3ed63bca9017f476fe520eb78602d45f7756", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "746f404fcd09d5029f1b211739afb8fb8575d775b21f6a3908e7ce3e640724c6"}, 15 | "hackney": {:hex, :hackney, "1.20.1", "8d97aec62ddddd757d128bfd1df6c5861093419f8f7a4223823537bad5d064e2", [:rebar3], [{:certifi, "~>2.12.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.4.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "fe9094e5f1a2a2c0a7d10918fee36bfec0ec2a979994cff8cfe8058cd9af38e3"}, 16 | "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, 17 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 18 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 19 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.0", "74bb8348c9b3a51d5c589bf5aebb0466a84b33274150e3b6ece1da45584afc82", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "49159b7d7d999e836bedaf09dcf35ca18b312230cf901b725a64f3f42e407983"}, 20 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.1", "c7f58c120b2b5aa5fd80d540a89fdf866ed42f1f3994e4fe189abebeab610839", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "8a89a1eeccc2d798d6ea15496a6e4870b75e014d1af514b1b71fa33134f57814"}, 21 | "meck": {:hex, :meck, "0.9.2", "85ccbab053f1db86c7ca240e9fc718170ee5bda03810a6292b5306bf31bae5f5", [:rebar3], [], "hexpm", "81344f561357dc40a8344afa53767c32669153355b626ea9fcbc8da6b3045826"}, 22 | "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, 23 | "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, 24 | "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, 25 | "mock": {:hex, :mock, "0.3.8", "7046a306b71db2488ef54395eeb74df0a7f335a7caca4a3d3875d1fc81c884dd", [:mix], [{:meck, "~> 0.9.2", [hex: :meck, repo: "hexpm", optional: false]}], "hexpm", "7fa82364c97617d79bb7d15571193fc0c4fe5afd0c932cef09426b3ee6fe2022"}, 26 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, 27 | "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, 28 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, 29 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, 30 | "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, 31 | } 32 | -------------------------------------------------------------------------------- /test/adapter_property_test.exs: -------------------------------------------------------------------------------- 1 | # Skip EQC testing if we don't have it installed: 2 | if Code.ensure_compiled(:eqc) == {:module, :eqc} do 3 | defmodule AdapterPropertyTest do 4 | use ExUnit.Case 5 | use EQC.ExUnit 6 | 7 | alias Ecto.Adapters.DynamoDB.TestRepo 8 | alias Ecto.Adapters.DynamoDB.TestSchema.Person 9 | 10 | setup_all do 11 | TestHelper.setup_all() 12 | end 13 | 14 | property "test insert/get returns the same value" do 15 | forall person <- TestGenerators.person_generator() do 16 | when_fail(IO.puts("Failed for person #{inspect(person)}")) do 17 | TestRepo.insert!(Person.changeset(person), on_conflict: :replace_all) 18 | result = TestRepo.get(Person, person.id) 19 | ensure(person == result) 20 | end 21 | end 22 | end 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /test/adapter_state_eqc_test.exs: -------------------------------------------------------------------------------- 1 | # Skip EQC testing if we don't have it installed: 2 | if Code.ensure_compiled(:eqc) == {:module, :eqc} do 3 | defmodule AdapterStateEqcTest do 4 | # This test runs a series of database operations through the adapter 5 | # and checks that everything appears to stay consistent and follow 6 | # all the rules the way we expect it to. 7 | 8 | use ExUnit.Case 9 | use EQC.ExUnit 10 | use EQC.StateM 11 | 12 | import Ecto.Query 13 | import TestGenerators 14 | 15 | alias Ecto.Adapters.DynamoDB.TestRepo 16 | alias Ecto.Adapters.DynamoDB.TestSchema.Person 17 | 18 | @keys ~w[a b c d e] 19 | 20 | setup_all do 21 | TestHelper.setup_all() 22 | end 23 | 24 | defmodule State do 25 | defstruct db: %{} 26 | end 27 | 28 | # Generators 29 | def key, do: oneof(@keys) 30 | def value, do: person_with_id(key()) 31 | 32 | def key_list, do: @keys |> Enum.shuffle() |> sublist 33 | 34 | def value_list do 35 | # Generates a list of people, all with different keys: 36 | let keys <- key_list() do 37 | for k <- keys, do: person_with_id(k) 38 | end 39 | end 40 | 41 | def change_list do 42 | let fs <- fields() do 43 | for {name, type} <- fs, into: %{}, do: {name, gen_field_val(type)} 44 | end 45 | end 46 | 47 | def fields do 48 | Person.get_fields() 49 | |> Enum.filter(&(elem(&1, 1) != :binary_id)) 50 | # order probably doesn't matter here, but can't hurt to mix it up! 51 | |> Enum.shuffle() 52 | |> sublist 53 | end 54 | 55 | def gen_field_val(:string), do: nonempty_str() 56 | def gen_field_val(:integer), do: int() 57 | def gen_field_val({:array, type}), do: type |> gen_field_val |> list |> non_empty 58 | def gen_field_val({:embed, %Ecto.Embedded{cardinality: :many}}), do: [] 59 | 60 | def insert_opts do 61 | oneof([ 62 | [on_conflict: :nothing], 63 | [on_conflict: :replace_all], 64 | [on_conflict: :raise], 65 | # Should have same behavior as :raise, per Ecto docs 66 | [] 67 | ]) 68 | end 69 | 70 | # Properties 71 | property "stateful adapter test" do 72 | forall cmds <- commands(__MODULE__) do 73 | for k <- @keys, do: delete_row(k) 74 | 75 | results = run_commands(cmds) 76 | pretty_commands(cmds, results, results[:result] == :ok) 77 | end 78 | end 79 | 80 | # Helper functions 81 | 82 | def delete_row(id) do 83 | TestRepo.delete_all(from(p in Person, where: p.id == ^id)) 84 | end 85 | 86 | def cmp_people(a, b) do 87 | a = Map.delete(a, :__meta__) 88 | b = Map.delete(b, :__meta__) 89 | a == b 90 | end 91 | 92 | # StateM callbacks 93 | 94 | # We'll keep a simple map as our state which represents 95 | # the expected contents of the database 96 | def initial_state, do: %State{} 97 | 98 | # INSERT 99 | 100 | def insert_args(_s) do 101 | [value(), insert_opts()] 102 | end 103 | 104 | def insert(value, opts) do 105 | value |> Person.changeset() |> TestRepo.insert(opts) 106 | end 107 | 108 | def insert_post(s, [value, opts], result) do 109 | on_conflict = Keyword.get(opts, :on_conflict, :raise) 110 | value_exists = Map.has_key?(s.db, value.id) 111 | 112 | case {on_conflict, value_exists, result} do 113 | {:raise, true, {:error, %Ecto.Changeset{errors: [id: {"has already been taken", []}]}}} -> 114 | true 115 | 116 | {:nothing, true, {:ok, []}} -> 117 | true 118 | 119 | {_, _, {:ok, result_value}} -> 120 | cmp_people(value, result_value) 121 | end 122 | end 123 | 124 | def insert_next(s, _result, [value, opts]) do 125 | on_conflict = Keyword.get(opts, :on_conflict, :raise) 126 | 127 | new_db = 128 | case on_conflict do 129 | :replace_all -> 130 | Map.put(s.db, value.id, value) 131 | 132 | _ -> 133 | Map.put_new(s.db, value.id, value) 134 | end 135 | 136 | %State{s | db: new_db} 137 | end 138 | 139 | # INSERT_ALL 140 | 141 | def insert_all_args(_s) do 142 | [value_list()] 143 | end 144 | 145 | def insert_all(values) do 146 | map_values = for v <- values, do: Map.drop(v, [:__meta__, :__struct__]) 147 | TestRepo.insert_all(Person, map_values) 148 | end 149 | 150 | def insert_all_post(_s, [values], result) do 151 | result == {length(values), nil} 152 | end 153 | 154 | def insert_all_next(s, _result, [values]) do 155 | new_db = for v <- values, into: s.db, do: {v.id, v} 156 | %State{s | db: new_db} 157 | end 158 | 159 | # GET 160 | 161 | def get_args(_s) do 162 | [key()] 163 | end 164 | 165 | def get(key) do 166 | TestRepo.get(Person, key) 167 | end 168 | 169 | def get_post(s, [key], result) do 170 | case Map.get(s.db, key) do 171 | nil -> 172 | result == nil 173 | 174 | value -> 175 | result == value 176 | end 177 | end 178 | 179 | # UPDATE 180 | 181 | def update_args(_s) do 182 | [key(), change_list()] 183 | end 184 | 185 | def update(key, change_list) do 186 | case TestRepo.get(Person, key) do 187 | nil -> 188 | :not_found 189 | 190 | res -> 191 | res 192 | |> Person.changeset(change_list) 193 | |> TestRepo.update!() 194 | end 195 | end 196 | 197 | def update_post(s, [key, change_list], result) do 198 | case Map.get(s.db, key) do 199 | nil -> 200 | result == :not_found 201 | 202 | state_val -> 203 | next_val = Map.merge(state_val, change_list) 204 | cmp_people(next_val, result) 205 | end 206 | end 207 | 208 | def update_next(s, _result, [key, change_list]) do 209 | case Map.get(s.db, key) do 210 | nil -> 211 | s 212 | 213 | val -> 214 | new_val = Map.merge(val, change_list) 215 | new_db = %{s.db | key => new_val} 216 | %State{s | db: new_db} 217 | end 218 | end 219 | 220 | # DELETE 221 | 222 | def delete_args(_s) do 223 | [key()] 224 | end 225 | 226 | def delete(key) do 227 | try do 228 | TestRepo.delete(%Person{id: key}) 229 | rescue 230 | # Raising a "StaleEntryError" sure seems like a weird, unintuitive way 231 | # to signal that we tried to delete a non-existent value, but this is 232 | # also the way it works for other adapters so I'm assuming this is normal... 233 | Ecto.StaleEntryError -> :not_found 234 | end 235 | end 236 | 237 | def delete_post(s, [key], {:ok, _}) do 238 | Map.has_key?(s.db, key) 239 | end 240 | 241 | def delete_post(s, [key], :not_found) do 242 | !Map.has_key?(s.db, key) 243 | end 244 | 245 | def delete_next(s, _result, [key]) do 246 | new_db = Map.delete(s.db, key) 247 | %State{s | db: new_db} 248 | end 249 | end 250 | end 251 | -------------------------------------------------------------------------------- /test/ecto_adapters_dynamodb/dynamodbset_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.DynamoDBSet.Test do 2 | @moduledoc """ 3 | Unit tests for Ecto.Adapters.DynamoDB.DynamoDBSet 4 | """ 5 | 6 | use ExUnit.Case 7 | 8 | import Ecto.Adapters.DynamoDB.DynamoDBSet 9 | 10 | test "type" do 11 | assert type() == MapSet 12 | end 13 | 14 | test "cast" do 15 | valid_mapset = MapSet.new([1, 2, 3]) 16 | invalid_mapset = MapSet.new([1, 2, :foo]) 17 | 18 | assert cast(valid_mapset) == {:ok, valid_mapset} 19 | assert cast(invalid_mapset) == :error 20 | assert cast(%{foo: :bar}) == :error 21 | end 22 | 23 | test "load" do 24 | mapset = MapSet.new([1, 2, 3]) 25 | 26 | assert load(mapset) == {:ok, mapset} 27 | end 28 | 29 | test "dump" do 30 | mapset = MapSet.new([1, 2, 3]) 31 | 32 | assert dump(mapset) == {:ok, mapset} 33 | end 34 | 35 | test "equal?" do 36 | mapset_a = MapSet.new([1, 2, 3]) 37 | mapset_b = MapSet.new([1, 2, 3]) 38 | mapset_c = MapSet.new([:a, :b, :c]) 39 | 40 | assert equal?(mapset_a, mapset_b) 41 | refute equal?(mapset_b, mapset_c) 42 | end 43 | 44 | test "embed_as" do 45 | assert embed_as(MapSet) == :self 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /test/ecto_adapters_dynamodb/info_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Info.Test do 2 | @moduledoc """ 3 | Unit tests for Ecto.Adapters.DynamoDB.Info 4 | """ 5 | 6 | use ExUnit.Case 7 | 8 | import Ecto.Adapters.DynamoDB.Info 9 | 10 | alias Ecto.Adapters.DynamoDB.TestRepo 11 | 12 | setup_all do 13 | TestHelper.setup_all() 14 | 15 | on_exit(fn -> 16 | TestHelper.on_exit() 17 | end) 18 | end 19 | 20 | test "table_info" do 21 | info = normalise_info(table_info(TestRepo, "test_planet")) 22 | 23 | assert normalise_info(%{ 24 | "AttributeDefinitions" => [ 25 | %{"AttributeName" => "id", "AttributeType" => "S"}, 26 | %{"AttributeName" => "mass", "AttributeType" => "N"}, 27 | %{"AttributeName" => "name", "AttributeType" => "S"} 28 | ], 29 | "CreationDateTime" => info["CreationDateTime"], 30 | "DeletionProtectionEnabled" => false, 31 | "GlobalSecondaryIndexes" => [ 32 | %{ 33 | "IndexArn" => 34 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_planet/index/name_mass", 35 | "IndexName" => "name_mass", 36 | "IndexSizeBytes" => 0, 37 | "IndexStatus" => "ACTIVE", 38 | "ItemCount" => 0, 39 | "KeySchema" => [ 40 | %{"AttributeName" => "name", "KeyType" => "HASH"}, 41 | %{"AttributeName" => "mass", "KeyType" => "RANGE"} 42 | ], 43 | "Projection" => %{"ProjectionType" => "ALL"}, 44 | "ProvisionedThroughput" => %{ 45 | "ReadCapacityUnits" => 100, 46 | "WriteCapacityUnits" => 100 47 | } 48 | } 49 | ], 50 | "ItemCount" => 0, 51 | "KeySchema" => [ 52 | %{"AttributeName" => "id", "KeyType" => "HASH"}, 53 | %{"AttributeName" => "name", "KeyType" => "RANGE"} 54 | ], 55 | "ProvisionedThroughput" => %{ 56 | "LastDecreaseDateTime" => 0.0, 57 | "LastIncreaseDateTime" => 0.0, 58 | "NumberOfDecreasesToday" => 0, 59 | "ReadCapacityUnits" => 100, 60 | "WriteCapacityUnits" => 100 61 | }, 62 | "TableArn" => "arn:aws:dynamodb:ddblocal:000000000000:table/test_planet", 63 | "TableName" => "test_planet", 64 | "TableSizeBytes" => 0, 65 | "TableStatus" => "ACTIVE" 66 | }) == info 67 | end 68 | 69 | test "index_details" do 70 | info = normalise_info(index_details(TestRepo, "test_person")) 71 | 72 | assert normalise_info(%{ 73 | primary: [%{"AttributeName" => "id", "KeyType" => "HASH"}], 74 | secondary: [ 75 | %{ 76 | "IndexArn" => 77 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_person/index/first_name_age", 78 | "IndexName" => "first_name_age", 79 | "IndexSizeBytes" => 0, 80 | "IndexStatus" => "ACTIVE", 81 | "ItemCount" => 0, 82 | "KeySchema" => [ 83 | %{"AttributeName" => "first_name", "KeyType" => "HASH"}, 84 | %{"AttributeName" => "age", "KeyType" => "RANGE"} 85 | ], 86 | "Projection" => %{"ProjectionType" => "ALL"}, 87 | "ProvisionedThroughput" => %{ 88 | "ReadCapacityUnits" => 100, 89 | "WriteCapacityUnits" => 100 90 | } 91 | }, 92 | %{ 93 | "IndexArn" => 94 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_person/index/age_first_name", 95 | "IndexName" => "age_first_name", 96 | "IndexSizeBytes" => 0, 97 | "IndexStatus" => "ACTIVE", 98 | "ItemCount" => 0, 99 | "KeySchema" => [ 100 | %{"AttributeName" => "age", "KeyType" => "HASH"}, 101 | %{"AttributeName" => "first_name", "KeyType" => "RANGE"} 102 | ], 103 | "Projection" => %{"ProjectionType" => "ALL"}, 104 | "ProvisionedThroughput" => %{ 105 | "ReadCapacityUnits" => 100, 106 | "WriteCapacityUnits" => 100 107 | } 108 | }, 109 | %{ 110 | "IndexArn" => 111 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_person/index/first_name", 112 | "IndexName" => "first_name", 113 | "IndexSizeBytes" => 0, 114 | "IndexStatus" => "ACTIVE", 115 | "ItemCount" => 0, 116 | "KeySchema" => [%{"AttributeName" => "first_name", "KeyType" => "HASH"}], 117 | "Projection" => %{"ProjectionType" => "ALL"}, 118 | "ProvisionedThroughput" => %{ 119 | "ReadCapacityUnits" => 100, 120 | "WriteCapacityUnits" => 100 121 | } 122 | }, 123 | %{ 124 | "IndexArn" => 125 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_person/index/first_name_email", 126 | "IndexName" => "first_name_email", 127 | "IndexSizeBytes" => 0, 128 | "IndexStatus" => "ACTIVE", 129 | "ItemCount" => 0, 130 | "KeySchema" => [ 131 | %{"AttributeName" => "first_name", "KeyType" => "HASH"}, 132 | %{"AttributeName" => "email", "KeyType" => "RANGE"} 133 | ], 134 | "Projection" => %{"ProjectionType" => "ALL"}, 135 | "ProvisionedThroughput" => %{ 136 | "ReadCapacityUnits" => 100, 137 | "WriteCapacityUnits" => 100 138 | } 139 | }, 140 | %{ 141 | "IndexArn" => 142 | "arn:aws:dynamodb:ddblocal:000000000000:table/test_person/index/email", 143 | "IndexName" => "email", 144 | "IndexSizeBytes" => 0, 145 | "IndexStatus" => "ACTIVE", 146 | "ItemCount" => 0, 147 | "KeySchema" => [%{"AttributeName" => "email", "KeyType" => "HASH"}], 148 | "Projection" => %{"ProjectionType" => "ALL"}, 149 | "ProvisionedThroughput" => %{ 150 | "ReadCapacityUnits" => 100, 151 | "WriteCapacityUnits" => 100 152 | } 153 | } 154 | ] 155 | }) == info 156 | end 157 | 158 | test "indexes" do 159 | assert indexes(TestRepo, "test_person") == [ 160 | {:primary, ["id"]}, 161 | {"first_name_age", ["first_name", "age"]}, 162 | {"age_first_name", ["age", "first_name"]}, 163 | {"first_name", ["first_name"]}, 164 | {"first_name_email", ["first_name", "email"]}, 165 | {"email", ["email"]} 166 | ] 167 | end 168 | 169 | test "primary_key!" do 170 | assert primary_key!(TestRepo, "test_planet") == {:primary, ["id", "name"]} 171 | end 172 | 173 | test "repo_primary_key" do 174 | assert repo_primary_key(Ecto.Adapters.DynamoDB.TestSchema.Person) == "id" 175 | 176 | assert_raise ArgumentError, 177 | "DynamoDB repos must have a single primary key, but repo Elixir.Ecto.Adapters.DynamoDB.TestSchema.BookPage has more than one", 178 | fn -> 179 | repo_primary_key(Ecto.Adapters.DynamoDB.TestSchema.BookPage) 180 | end 181 | end 182 | 183 | test "secondary_indexes" do 184 | assert secondary_indexes(TestRepo, "test_person") == [ 185 | {"first_name_age", ["first_name", "age"]}, 186 | {"age_first_name", ["age", "first_name"]}, 187 | {"first_name", ["first_name"]}, 188 | {"first_name_email", ["first_name", "email"]}, 189 | {"email", ["email"]} 190 | ] 191 | end 192 | 193 | test "indexed_attributes" do 194 | assert indexed_attributes(TestRepo, "test_planet") == ["id", "name", "mass"] 195 | end 196 | 197 | defp normalise_info(info) when is_map(info) do 198 | info 199 | |> Enum.map(fn {k, v} -> {k, normalise_info(v)} end) 200 | |> Map.new() 201 | end 202 | 203 | defp normalise_info(info) when is_list(info) do 204 | info 205 | |> Enum.map(&normalise_info/1) 206 | |> Enum.sort() 207 | end 208 | 209 | defp normalise_info(info), do: info 210 | end 211 | -------------------------------------------------------------------------------- /test/ecto_adapters_dynamodb/migration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Migration.Test do 2 | @moduledoc """ 3 | Unit tests for migrations. 4 | 5 | Test migrations will be tracked in the test_schema_migrations table (see config/test.exs) 6 | 7 | Note that migration tests must be run in order, so in test_helper.exs, we use the command `ExUnit.start(seed: 0)` 8 | 9 | The order of tests in this file MUST match the order of execution of the files in test/priv/test_repo/migrations 10 | """ 11 | 12 | # When the "down" tests are run at the end, suppress the "redefining modules" warnings. 13 | # https://stackoverflow.com/questions/36926388/how-can-i-avoid-the-warning-redefining-module-foo-when-running-exunit-tests-m 14 | Code.compiler_options(ignore_module_conflict: true) 15 | 16 | use ExUnit.Case 17 | 18 | alias Ecto.Adapters.DynamoDB 19 | alias Ecto.Adapters.DynamoDB.TestRepo 20 | 21 | @migration_path Path.expand("test/priv/test_repo/migrations") 22 | 23 | setup_all do 24 | TestHelper.setup_all(:migration) 25 | 26 | on_exit(fn -> 27 | TestHelper.on_exit(:migration) 28 | end) 29 | end 30 | 31 | describe "execute_ddl" do 32 | test "create_if_not_exists: on-demand table" do 33 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 34 | table_info = Ecto.Adapters.DynamoDB.Info.table_info(TestRepo, "dog") 35 | 36 | assert length(result) == 1 37 | assert table_info["BillingModeSummary"]["BillingMode"] == "PAY_PER_REQUEST" 38 | 39 | {:ok, ttl_description} = Ecto.Adapters.DynamoDB.Info.ttl_info(TestRepo, "dog") 40 | 41 | assert %{ 42 | "TimeToLiveDescription" => %{ 43 | "AttributeName" => "ttl", 44 | "TimeToLiveStatus" => "ENABLED" 45 | } 46 | } == ttl_description 47 | end 48 | 49 | test "create: provisioned table" do 50 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 51 | 52 | assert length(result) == 1 53 | end 54 | 55 | test "alter table: add index to on-demand table" do 56 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 57 | {:ok, table_info} = ExAws.Dynamo.describe_table("dog") |> request() 58 | [index] = table_info["Table"]["GlobalSecondaryIndexes"] 59 | 60 | assert length(result) == 1 61 | assert index["IndexName"] == "name" 62 | assert index["OnDemandThroughput"]["MaxReadRequestUnits"] == -1 63 | assert index["OnDemandThroughput"]["MaxWriteRequestUnits"] == -1 64 | 65 | {:ok, ttl_description} = Ecto.Adapters.DynamoDB.Info.ttl_info(TestRepo, "dog") 66 | 67 | assert %{ 68 | "TimeToLiveDescription" => %{ 69 | "AttributeName" => "ttl", 70 | "TimeToLiveStatus" => "ENABLED" 71 | } 72 | } == ttl_description 73 | end 74 | 75 | test "alter table: add index to provisioned table" do 76 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 77 | {:ok, table_info} = ExAws.Dynamo.describe_table("cat") |> request() 78 | [index] = table_info["Table"]["GlobalSecondaryIndexes"] 79 | 80 | assert length(result) == 1 81 | assert index["IndexName"] == "name" 82 | assert index["ProvisionedThroughput"]["ReadCapacityUnits"] == 2 83 | assert index["ProvisionedThroughput"]["WriteCapacityUnits"] == 1 84 | end 85 | 86 | test "create_if_not_exists: on-demand table with index" do 87 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 88 | {:ok, table_info} = ExAws.Dynamo.describe_table("rabbit") |> request() 89 | [foo_index, name_index] = table_info["Table"]["GlobalSecondaryIndexes"] 90 | 91 | assert length(result) == 1 92 | assert name_index["IndexName"] == "name" 93 | assert foo_index["IndexName"] == "foo" 94 | end 95 | 96 | test "alter table: modify index throughput" do 97 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 98 | {:ok, table_info} = ExAws.Dynamo.describe_table("cat") |> request() 99 | [index] = table_info["Table"]["GlobalSecondaryIndexes"] 100 | 101 | assert length(result) == 1 102 | assert index["IndexName"] == "name" 103 | assert index["ProvisionedThroughput"]["ReadCapacityUnits"] == 3 104 | assert index["ProvisionedThroughput"]["WriteCapacityUnits"] == 2 105 | end 106 | 107 | test "alter table: attempt to add an index that already exists" do 108 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 109 | {:ok, table_info} = ExAws.Dynamo.describe_table("cat") |> request() 110 | [index] = table_info["Table"]["GlobalSecondaryIndexes"] 111 | 112 | assert length(result) == 1 113 | assert index["IndexName"] == "name" 114 | 115 | # If the migration is successful, the throughput specified by the preceding migration will not have been altered. 116 | assert index["ProvisionedThroughput"]["ReadCapacityUnits"] == 3 117 | assert index["ProvisionedThroughput"]["WriteCapacityUnits"] == 2 118 | end 119 | end 120 | 121 | describe "execute_ddl - local vs. production discrepancies" do 122 | # In the pair of migrations in this test, we create a provisioned table and then attempt to add an index with no specified throughput, as you would for an on-demand table. 123 | # This is meant to replicate a scenario where a provisioned table is set to on-demand via the AWS dashboard... 124 | # some time later, a developer writes a migration to add an index to the (now on-demand) table in production, but her local table is still provisioned. 125 | # The index migration will not specify provisioned_throughput, but this will fail locally - the dev version of DDB will just hang rather than raising an error. 126 | # When Mix.env is :dev or :test, we'll need to quietly add provisioned_throughput to the index so that the migration can be run. 127 | # The logic associated with this can be found in lib/migration.ex, under the private method maybe_default_throughput/3. 128 | test "create_if_not_exists and alter table: add an index to a table where the billing mode has been manually changed to on-demand in production" do 129 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 2) 130 | 131 | assert length(result) == 2 132 | end 133 | end 134 | 135 | describe "TTL modification" do 136 | test "add TTL" do 137 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 138 | assert length(result) == 1 139 | 140 | {:ok, ttl_description} = Ecto.Adapters.DynamoDB.Info.ttl_info(TestRepo, "cat") 141 | 142 | assert %{ 143 | "TimeToLiveDescription" => %{ 144 | "AttributeName" => "ttl", 145 | "TimeToLiveStatus" => "ENABLED" 146 | } 147 | } == ttl_description 148 | end 149 | 150 | test "remove TTL" do 151 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 152 | assert length(result) == 1 153 | 154 | {:ok, ttl_description} = Ecto.Adapters.DynamoDB.Info.ttl_info(TestRepo, "dog") 155 | 156 | assert %{ 157 | "TimeToLiveDescription" => %{ 158 | "TimeToLiveStatus" => "DISABLED" 159 | } 160 | } == ttl_description 161 | end 162 | end 163 | 164 | describe "Stream-enabled table" do 165 | test "Create table with streaming" do 166 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 167 | assert length(result) == 1 168 | 169 | table_description = Ecto.Adapters.DynamoDB.Info.table_info(TestRepo, "stream") 170 | 171 | assert %{ 172 | "StreamSpecification" => %{ 173 | "StreamEnabled" => true, 174 | "StreamViewType" => "KEYS_ONLY" 175 | } 176 | } = table_description 177 | end 178 | 179 | test "Add streaming to existing table" do 180 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) 181 | assert length(result) == 1 182 | 183 | table_description = Ecto.Adapters.DynamoDB.Info.table_info(TestRepo, "cat") 184 | 185 | assert %{ 186 | "StreamSpecification" => %{ 187 | "StreamEnabled" => true, 188 | "StreamViewType" => "NEW_IMAGE" 189 | } 190 | } = table_description 191 | end 192 | 193 | @tag :skip 194 | # It looks like this request currently crashes DDB local. As far as I can tell 195 | # the request itself is being generated correctly 196 | test "remove streaming from existing table" do 197 | result = Ecto.Migrator.run(TestRepo, @migration_path, :up, step: 1) |> IO.inspect() 198 | assert length(result) == 1 199 | 200 | table_description = Ecto.Adapters.DynamoDB.Info.table_info(TestRepo, "stream") 201 | 202 | assert %{ 203 | "StreamSpecification" => %{ 204 | "StreamEnabled" => false 205 | } 206 | } = table_description 207 | end 208 | end 209 | 210 | test "run migrations down" do 211 | migrations = 212 | @migration_path 213 | |> File.ls!() 214 | |> Enum.filter(&(Path.extname(&1) == ".exs")) 215 | 216 | result = Ecto.Migrator.run(TestRepo, @migration_path, :down, all: true) |> IO.inspect() 217 | 218 | # -1 required because we're skipping one test for now 219 | assert length(result) == length(migrations) - 1 220 | end 221 | 222 | defp request(request), do: ExAws.request(request, DynamoDB.ex_aws_config(TestRepo)) 223 | end 224 | -------------------------------------------------------------------------------- /test/ecto_adapters_dynamodb/query_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Query.Test do 2 | @moduledoc """ 3 | Unit tests for the query module. 4 | """ 5 | use ExUnit.Case 6 | 7 | import Ecto.Adapters.DynamoDB.Query, only: [get_matching_secondary_index: 4] 8 | 9 | alias Ecto.Adapters.DynamoDB.TestRepo 10 | 11 | setup_all do 12 | TestHelper.setup_all() 13 | 14 | on_exit(fn -> 15 | TestHelper.on_exit() 16 | end) 17 | end 18 | 19 | # When we have a hash-only key that also appears as the hash part of a composite key, 20 | # query on the key that best matches the situation. In the example below, we have two indexes 21 | # on the test_person table, first_name and first_name_email. If we just query on a hash indexed field 22 | # (either on its own, or with additional conditions), use the hash-only key rather than the composite key; 23 | # otherwise, querying with the composite key would fail to return records where a first_name was provided but email was nil. 24 | test "get_matching_secondary_index/4" do 25 | tablename = "test_person" 26 | 27 | hash_idx_result = 28 | get_matching_secondary_index(TestRepo, tablename, [{"first_name", {"Jerry", :==}}], []) 29 | 30 | composite_idx_result = 31 | get_matching_secondary_index( 32 | TestRepo, 33 | tablename, 34 | [and: [{"first_name", {"Jerry", :==}}, {"email", {"jerry@test.com", :==}}]], 35 | [] 36 | ) 37 | 38 | multi_cond_hash_idx_result = 39 | get_matching_secondary_index( 40 | TestRepo, 41 | tablename, 42 | [and: [{"first_name", {"Jerry", :==}}, {"last_name", {"Garcia", :==}}]], 43 | [] 44 | ) 45 | 46 | # If a user provides an explicit :index option, select that index if it is available. 47 | string_idx_option_result = 48 | get_matching_secondary_index( 49 | TestRepo, 50 | tablename, 51 | [and: [{"first_name", {"Jerry", :==}}, {"last_name", {"Garcia", :==}}]], 52 | index: "email" 53 | ) 54 | 55 | atom_idx_option_result = 56 | get_matching_secondary_index( 57 | TestRepo, 58 | tablename, 59 | [and: [{"first_name", {"Jerry", :==}}, {"last_name", {"Garcia", :==}}]], 60 | index: :email 61 | ) 62 | 63 | assert hash_idx_result == {"first_name", ["first_name"]} 64 | assert composite_idx_result == {"first_name_email", ["first_name", "email"]} 65 | assert multi_cond_hash_idx_result == {"first_name", ["first_name"]} 66 | assert string_idx_option_result == {"email", ["email"]} 67 | assert atom_idx_option_result == {"email", ["email"]} 68 | 69 | assert_raise( 70 | ArgumentError, 71 | "Ecto.Adapters.DynamoDB.Query.get_matching_secondary_index/4 error: :index option does not match existing secondary index names. Did you mean email?", 72 | fn -> 73 | get_matching_secondary_index(TestRepo, tablename, [{"first_name", {"Jerry", :==}}], 74 | index: "emai" 75 | ) 76 | end 77 | ) 78 | 79 | assert_raise( 80 | ArgumentError, 81 | "Ecto.Adapters.DynamoDB.Query.get_matching_secondary_index/4 error: :index option does not match existing secondary index names.", 82 | fn -> 83 | get_matching_secondary_index(TestRepo, tablename, [{"first_name", {"Jerry", :==}}], 84 | index: :foobar 85 | ) 86 | end 87 | ) 88 | end 89 | end 90 | -------------------------------------------------------------------------------- /test/integration/ex_aws_dynamo_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Integration.ExAws.Dynamo.Test do 2 | @moduledoc """ 3 | Integration tests for ExAws.Dynamo. 4 | """ 5 | 6 | use ExUnit.Case 7 | 8 | alias Ecto.Adapters.DynamoDB 9 | alias Ecto.Adapters.DynamoDB.TestRepo 10 | alias ExAws.Dynamo 11 | 12 | @ex_aws_dynamo_test_table_name "ex_aws_dynamo_test_table" 13 | 14 | test "create_table" do 15 | Dynamo.create_table( 16 | @ex_aws_dynamo_test_table_name, 17 | [email: :hash, age: :range], 18 | [email: :string, age: :number], 19 | 1, 20 | 1 21 | ) 22 | |> request!() 23 | 24 | {:ok, table_info} = ExAws.Dynamo.describe_table(@ex_aws_dynamo_test_table_name) |> request() 25 | 26 | assert table_info["Table"]["TableName"] == @ex_aws_dynamo_test_table_name 27 | end 28 | 29 | test "update_table" do 30 | Dynamo.update_table(@ex_aws_dynamo_test_table_name, billing_mode: :pay_per_request) 31 | |> request!() 32 | 33 | {:ok, table_info} = ExAws.Dynamo.describe_table(@ex_aws_dynamo_test_table_name) |> request() 34 | 35 | assert table_info["Table"]["BillingModeSummary"]["BillingMode"] == "PAY_PER_REQUEST" 36 | end 37 | 38 | test "delete_table" do 39 | result = Dynamo.delete_table(@ex_aws_dynamo_test_table_name) |> request!() 40 | 41 | assert result["TableDescription"]["TableName"] == @ex_aws_dynamo_test_table_name 42 | end 43 | 44 | test "Decoder.decode()" do 45 | assert Dynamo.Decoder.decode(%{"BOOL" => true}) == true 46 | assert Dynamo.Decoder.decode(%{"BOOL" => false}) == false 47 | assert Dynamo.Decoder.decode(%{"BOOL" => "true"}) == true 48 | assert Dynamo.Decoder.decode(%{"BOOL" => "false"}) == false 49 | assert Dynamo.Decoder.decode(%{"NULL" => true}) == nil 50 | assert Dynamo.Decoder.decode(%{"NULL" => "true"}) == nil 51 | assert Dynamo.Decoder.decode(%{"B" => "Zm9vYmFy"}) == "foobar" 52 | assert Dynamo.Decoder.decode(%{"S" => "foo"}) == "foo" 53 | assert Dynamo.Decoder.decode(%{"M" => %{"M" => %{foo: %{"S" => "bar"}}}}) == %{foo: "bar"} 54 | 55 | assert Dynamo.Decoder.decode(%{"BS" => ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]}) == 56 | MapSet.new(["U3Vubnk=", "UmFpbnk=", "U25vd3k="]) 57 | 58 | assert Dynamo.Decoder.decode(%{"SS" => ["foo", "bar", "baz"]}) == 59 | MapSet.new(["foo", "bar", "baz"]) 60 | 61 | assert Dynamo.Decoder.decode(%{"NS" => [1, 2, 3]}) == MapSet.new([1, 2, 3]) 62 | assert Dynamo.Decoder.decode(%{"NS" => ["1", "2", "3"]}) == MapSet.new([1, 2, 3]) 63 | assert Dynamo.Decoder.decode(%{"L" => [%{"S" => "asdf"}, %{"N" => "1"}]}) == ["asdf", 1] 64 | end 65 | 66 | defp request(request), do: ExAws.request(request, DynamoDB.ex_aws_config(TestRepo)) 67 | defp request!(request), do: ExAws.request!(request, DynamoDB.ex_aws_config(TestRepo)) 68 | end 69 | -------------------------------------------------------------------------------- /test/integration/jason_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.Integration.Jason.Test do 2 | @moduledoc """ 3 | Integration tests for Jason. 4 | """ 5 | 6 | use ExUnit.Case 7 | 8 | test "encode" do 9 | assert Jason.encode(%{foo: "bar"}) == {:ok, "{\"foo\":\"bar\"}"} 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190319220335_add_dog_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddDogTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a dog table, set to pay_per_request (AKA on-demand) billing mode. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | create_if_not_exists table(:dog, 11 | primary_key: false, 12 | options: [ 13 | billing_mode: :pay_per_request, 14 | ttl_attribute: "ttl" 15 | ] 16 | ) do 17 | add(:id, :string, primary_key: true) 18 | 19 | timestamps() 20 | end 21 | end 22 | 23 | def down do 24 | drop_if_exists(table(:dog)) 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190319220346_add_cat_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddCatTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a cat table, set to provisioned billing mode. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | create table(:cat, 11 | primary_key: false, 12 | options: [ 13 | provisioned_throughput: [1, 1] 14 | ] 15 | ) do 16 | add(:id, :string, primary_key: true) 17 | 18 | timestamps() 19 | end 20 | end 21 | 22 | def down do 23 | drop(table(:cat)) 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190320432123_add_name_index_to_dog.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddNameIndexToDog do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Add an index on name to the dog table. That table is set as an on-demand table in a previous 6 | migration, so we don't need to specify any throughput here. 7 | """ 8 | use Ecto.Migration 9 | 10 | def up do 11 | alter table(:dog, 12 | options: [ 13 | global_indexes: [ 14 | [index_name: "name", keys: [:name], create_if_not_exists: true] 15 | ] 16 | ] 17 | ) do 18 | add(:name, :string, hash_key: true) 19 | end 20 | end 21 | 22 | def down do 23 | alter table(:dog, 24 | options: [ 25 | global_indexes: [ 26 | [index_name: "name", drop_if_exists: true] 27 | ] 28 | ] 29 | ) do 30 | remove(:name) 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190321234543_add_name_index_to_cat.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddNameIndexToCat do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Add an index on name to the cat table. The table's provisioned throughput is [1,1], 6 | so here we'll apply different settings to the index. 7 | """ 8 | use Ecto.Migration 9 | 10 | def up do 11 | alter table(:cat, 12 | options: [ 13 | global_indexes: [ 14 | [ 15 | index_name: "name", 16 | keys: [:name], 17 | provisioned_throughput: [2, 1], 18 | create_if_not_exists: true 19 | ] 20 | ] 21 | ] 22 | ) do 23 | add(:name, :string, hash_key: true) 24 | end 25 | end 26 | 27 | def down do 28 | alter table(:cat, 29 | options: [ 30 | global_indexes: [ 31 | [index_name: "name", drop_if_exists: true] 32 | ] 33 | ] 34 | ) do 35 | remove(:name) 36 | end 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190321543456_add_rabbit_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddRabbitTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a rabbit table with an index, set to pay_per_request (AKA on-demand) billing mode. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | create_if_not_exists table(:rabbit, 11 | primary_key: false, 12 | options: [ 13 | billing_mode: :pay_per_request, 14 | global_indexes: [ 15 | [index_name: "name", keys: [:name]], 16 | [index_name: "foo", keys: [:foo]] 17 | ] 18 | ] 19 | ) do 20 | add(:id, :string, primary_key: true) 21 | add(:name, :string, hash_key: true) 22 | add(:foo, :string, hash_key: true) 23 | 24 | timestamps() 25 | end 26 | end 27 | 28 | def down do 29 | drop_if_exists(table(:rabbit)) 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190322123432_modify_cat_name_index.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.ModifyCatNameIndex do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Modify the throughput on the name index for the cat table. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:cat) do 11 | modify(:name, :string, provisioned_throughput: [3, 2]) 12 | end 13 | end 14 | 15 | def down do 16 | alter table(:cat) do 17 | modify(:name, :string, provisioned_throughput: [2, 1]) 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190401123234_add_redundant_name_index_to_cat.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddRedundantNameIndexToCat do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Attempt to add a redundant index to the cat table. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:cat, 11 | options: [ 12 | global_indexes: [ 13 | [ 14 | index_name: "name", 15 | keys: [:name], 16 | provisioned_throughput: [2, 1], 17 | create_if_not_exists: true 18 | ] 19 | ] 20 | ] 21 | ) do 22 | add(:name, :string, hash_key: true) 23 | end 24 | end 25 | 26 | def down do 27 | # For testing, we'll skip dropping the index here, as that would break the down 28 | # function in the preceding migration, which undoes a modification made to the name index. 29 | # This would normally cause an error, but we'll ignore that here, it does us no good. 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190401654345_add_billing_mode_test_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddBillingModeTestTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a billing_mode_test table. This will be used with the following migration to test a particular scenario... 6 | 7 | A table's billing mode (on-demand or provisioned) can be set either through migrations or through the AWS dashboard; 8 | it's possible to have a scenario where a developer would create a provisioned table via migration which an admin 9 | then flips to pay_per_request via the dashboard. The dev may then create a migration to add an index to that table, 10 | which is now on-demand in production but provisioned locally; the migration would lack a specified provisioned throughput, 11 | which would work in production but would fail locally. 12 | 13 | This migration and the following one aim to replicate such a scenario - the table is created as provisioned, but the index does not specify a provisioned throughput. 14 | 15 | In production, this kind of discrepancy produces one of the following errors, depending on the disagreement: 16 | 17 | (ExAws.Error) ExAws Request Error! {"ValidationException", "One or more parameter values were invalid: Both ReadCapacityUnits and WriteCapacityUnits must be specified for index: name"} 18 | 19 | (ExAws.Error) ExAws Request Error! {"ValidationException", "One or more parameter values were invalid: Neither ReadCapacityUnits nor WriteCapacityUnits can be specified for index: name when BillingMode is PAY_PER_REQUEST"} 20 | 21 | However, in local development, the first error won't be thrown, the migration will just hang until it times out; 22 | the second won't occur at all, local dev DDB will just ignore any specified provisioned throughput. 23 | 24 | The logic associated with this can be found in lib/migration.ex, under the private method maybe_default_throughput/3. 25 | """ 26 | use Ecto.Migration 27 | 28 | def up do 29 | create_if_not_exists table(:billing_mode_test, 30 | primary_key: false, 31 | options: [ 32 | provisioned_throughput: [1, 1] 33 | ] 34 | ) do 35 | add(:id, :string, primary_key: true) 36 | 37 | timestamps() 38 | end 39 | end 40 | 41 | def down do 42 | drop_if_exists(table(:billing_mode_test)) 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20190401765456_add_name_index_to_billing_mode_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddNameIndexToBillingModeTest do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | See the moduledoc for the previous migration for an explanation of this migration's purpose. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:billing_mode_test, 11 | options: [ 12 | global_indexes: [ 13 | [index_name: "name", keys: [:name], create_if_not_exists: true] 14 | ] 15 | ] 16 | ) do 17 | add(:name, :string, hash_key: true) 18 | end 19 | end 20 | 21 | def down do 22 | alter table(:billing_mode_test, 23 | options: [ 24 | global_indexes: [ 25 | [index_name: "name", drop_if_exists: true] 26 | ] 27 | ] 28 | ) do 29 | remove(:name) 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20200519112501_add_ttl_to_cat.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddTTLToCat do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | See the moduledoc for the previous migration for an explanation of this migration's purpose. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:cat, 11 | options: [ 12 | ttl_attribute: "ttl" 13 | ] 14 | ) do 15 | :ok 16 | end 17 | end 18 | 19 | def down do 20 | alter table(:cat, 21 | options: [ 22 | ttl_attribute: nil 23 | ] 24 | ) do 25 | :ok 26 | end 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20200519112601_remove_ttl_from_dog.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.RemoveTTLFromDog do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | See the moduledoc for the previous migration for an explanation of this migration's purpose. 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:dog, 11 | options: [ 12 | ttl_attribute: nil 13 | ] 14 | ) do 15 | :ok 16 | end 17 | end 18 | 19 | def down do 20 | alter table(:dog, 21 | options: [ 22 | ttl_attribute: "ttl" 23 | ] 24 | ) do 25 | :ok 26 | end 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20220323102500_add_stream_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddStreamTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a table which has streaming enabled 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | create_if_not_exists table(:stream, 11 | primary_key: false, 12 | options: [ 13 | billing_mode: :pay_per_request, 14 | stream_enabled: true, 15 | stream_view_type: :keys_only 16 | ] 17 | ) do 18 | add(:id, :string, primary_key: true) 19 | 20 | timestamps() 21 | end 22 | end 23 | 24 | def down do 25 | drop_if_exists(table(:stream)) 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20220323103900_add_stream_to_cat_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.AddStreamToCatTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a table which has streaming enabled 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:cat, 11 | options: [ 12 | stream_enabled: true, 13 | stream_view_type: :new_image 14 | ] 15 | ) do 16 | end 17 | end 18 | 19 | def down do 20 | alter table(:cat, 21 | options: [ 22 | stream_enabled: false 23 | ] 24 | ) do 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /test/priv/test_repo/migrations/20220323135100_remove_stream_from_table.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo.Migrations.RemoveStreamFromTable do 2 | @moduledoc """ 3 | Used when testing migrations. 4 | 5 | Create a table which has streaming enabled 6 | """ 7 | use Ecto.Migration 8 | 9 | def up do 10 | alter table(:stream, 11 | options: [ 12 | stream_enabled: false 13 | ] 14 | ) do 15 | end 16 | end 17 | 18 | def down do 19 | alter table(:stream, 20 | options: [ 21 | stream_enabled: true, 22 | stream_view_type: :keys_only 23 | ] 24 | ) do 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /test/support/test_repo.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestRepo do 2 | use Ecto.Repo, 3 | otp_app: :ecto_adapters_dynamodb, 4 | adapter: Ecto.Adapters.DynamoDB 5 | end 6 | -------------------------------------------------------------------------------- /test/support/test_schema.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.DynamoDB.TestSchema.Address do 2 | use Ecto.Schema 3 | @timestamps_opts [type: :utc_datetime] 4 | 5 | embedded_schema do 6 | field(:street_number, :integer) 7 | field(:street_name, :string) 8 | field(:type, Ecto.Enum, values: [:foo, :bar]) 9 | 10 | timestamps() 11 | end 12 | end 13 | 14 | defmodule Ecto.Adapters.DynamoDB.TestSchema.Person do 15 | use Ecto.Schema 16 | @primary_key {:id, :binary_id, autogenerate: true} 17 | @foreign_key_type :binary_id 18 | @timestamps_opts [type: :naive_datetime_usec] 19 | 20 | alias Ecto.Adapters.DynamoDB.TestSchema.Address 21 | alias Ecto.Adapters.DynamoDB.DynamoDBSet 22 | 23 | schema "test_person" do 24 | field(:first_name, :string) 25 | field(:last_name, :string) 26 | field(:age, :integer) 27 | field(:email, :string) 28 | field(:country, :string, source: :data1) 29 | field(:tags_to_tags, DynamoDBSet) 30 | field(:nil_to_tags, DynamoDBSet) 31 | field(:type, Ecto.Enum, values: [foo: 1, bar: 2]) 32 | embeds_many(:addresses, Address) 33 | 34 | timestamps() 35 | end 36 | 37 | def changeset(person, params \\ %{}) do 38 | person 39 | |> Ecto.Changeset.cast(params, [:first_name, :last_name, :age, :email, :tags_to_tags]) 40 | |> Ecto.Changeset.validate_required([:first_name, :last_name]) 41 | |> Ecto.Changeset.unique_constraint(:id) 42 | end 43 | end 44 | 45 | # This is used to test records that have a hash+range primary key. 46 | # Use the `primary_key: true` option on the field for the range key. 47 | defmodule Ecto.Adapters.DynamoDB.TestSchema.BookPage do 48 | use Ecto.Schema 49 | @primary_key {:id, :binary_id, autogenerate: true} 50 | @foreign_key_type :binary_id 51 | @timestamps_opts [type: :utc_datetime_usec] 52 | 53 | schema "test_book_page" do 54 | field(:page_num, :integer, primary_key: true) 55 | field(:text, :string) 56 | 57 | timestamps() 58 | end 59 | 60 | def changeset(page, params \\ %{}) do 61 | page 62 | |> Ecto.Changeset.cast(params, [:page_num, :text]) 63 | |> Ecto.Changeset.validate_required([:page_num]) 64 | |> Ecto.Changeset.unique_constraint(:id) 65 | 66 | # See this page for why we only put a constraint on :id even though 67 | # the real constraint is on the full primary key of hash+range: 68 | # https://hexdocs.pm/ecto/Ecto.Changeset.html#unique_constraint/3-complex-constraints 69 | end 70 | end 71 | 72 | defmodule Ecto.Adapters.DynamoDB.TestSchema.Planet do 73 | use Ecto.Schema 74 | @primary_key {:id, :binary_id, autogenerate: true} 75 | @foreign_key_type :binary_id 76 | 77 | schema "test_planet" do 78 | field(:name, :string) 79 | field(:mass, :integer) 80 | field(:moons, Ecto.Adapters.DynamoDB.DynamoDBSet) 81 | 82 | # default timestamps_opts is :naive_datetime 83 | timestamps() 84 | end 85 | 86 | def changeset(struct, params \\ %{}) do 87 | struct 88 | |> Ecto.Changeset.cast(params, [:name, :moons]) 89 | |> Ecto.Changeset.validate_required([:id, :name]) 90 | |> Ecto.Changeset.unique_constraint(:name) 91 | 92 | # In order to use the test_planet table for testing fragment queries 93 | # on a composite primary key, we'll allow for duplicate ids but enforce unique names. 94 | end 95 | end 96 | 97 | defmodule Ecto.Adapters.DynamoDB.TestSchema.Fruit do 98 | use Ecto.Schema 99 | @primary_key {:id, :binary_id, autogenerate: true} 100 | 101 | schema "test_fruit" do 102 | field(:name, :string) 103 | 104 | timestamps() 105 | end 106 | 107 | def changeset(struct, params \\ %{}) do 108 | struct 109 | |> Ecto.Changeset.cast(params, [:name]) 110 | |> Ecto.Changeset.validate_required([:id, :name]) 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | # Load all our support files first, since we need some of them to define our helper modules 2 | 3 | files = File.ls!("./test/support") |> Enum.filter(&String.ends_with?(&1, [".ex", ".exs"])) 4 | 5 | Enum.each(files, fn file -> 6 | Code.require_file("support/#{file}", __DIR__) 7 | end) 8 | 9 | defmodule TestHelper do 10 | alias ExAws.Dynamo 11 | alias Ecto.Adapters.DynamoDB 12 | alias Ecto.Adapters.DynamoDB.TestRepo 13 | 14 | def setup_all() do 15 | IO.puts("========== main test suite ==========") 16 | 17 | IO.puts("starting test repo") 18 | TestRepo.start_link() 19 | 20 | IO.puts("deleting any leftover test tables that may exist") 21 | Dynamo.delete_table("test_person") |> request() 22 | Dynamo.delete_table("test_book_page") |> request() 23 | Dynamo.delete_table("test_planet") |> request() 24 | Dynamo.delete_table("test_fruit") |> request() 25 | 26 | IO.puts("creating test_person table") 27 | # Only need to define types for indexed fields: 28 | key_definitions = %{id: :string, email: :string, first_name: :string, age: :number} 29 | 30 | indexes = [ 31 | %{ 32 | index_name: "email", 33 | key_schema: [ 34 | %{ 35 | attribute_name: "email", 36 | key_type: "HASH" 37 | } 38 | ], 39 | provisioned_throughput: %{ 40 | read_capacity_units: 100, 41 | write_capacity_units: 100 42 | }, 43 | projection: %{projection_type: "ALL"} 44 | }, 45 | %{ 46 | index_name: "first_name", 47 | key_schema: [ 48 | %{ 49 | attribute_name: "first_name", 50 | key_type: "HASH" 51 | } 52 | ], 53 | provisioned_throughput: %{ 54 | read_capacity_units: 100, 55 | write_capacity_units: 100 56 | }, 57 | projection: %{projection_type: "ALL"} 58 | }, 59 | %{ 60 | index_name: "first_name_email", 61 | key_schema: [ 62 | %{ 63 | attribute_name: "first_name", 64 | key_type: "HASH" 65 | }, 66 | %{ 67 | attribute_name: "email", 68 | key_type: "RANGE" 69 | } 70 | ], 71 | provisioned_throughput: %{ 72 | read_capacity_units: 100, 73 | write_capacity_units: 100 74 | }, 75 | projection: %{projection_type: "ALL"} 76 | }, 77 | %{ 78 | index_name: "first_name_age", 79 | key_schema: [ 80 | %{ 81 | attribute_name: "first_name", 82 | key_type: "HASH" 83 | }, 84 | %{ 85 | attribute_name: "age", 86 | key_type: "RANGE" 87 | } 88 | ], 89 | provisioned_throughput: %{ 90 | read_capacity_units: 100, 91 | write_capacity_units: 100 92 | }, 93 | projection: %{projection_type: "ALL"} 94 | }, 95 | %{ 96 | index_name: "age_first_name", 97 | key_schema: [ 98 | %{ 99 | attribute_name: "age", 100 | key_type: "HASH" 101 | }, 102 | %{ 103 | attribute_name: "first_name", 104 | key_type: "RANGE" 105 | } 106 | ], 107 | provisioned_throughput: %{ 108 | read_capacity_units: 100, 109 | write_capacity_units: 100 110 | }, 111 | projection: %{projection_type: "ALL"} 112 | } 113 | ] 114 | 115 | Dynamo.create_table("test_person", [id: :hash], key_definitions, 100, 100, indexes, []) 116 | |> request() 117 | 118 | IO.puts("creating test_book_page table") 119 | key_definitions = %{id: :string, page_num: :number} 120 | 121 | Dynamo.create_table( 122 | "test_book_page", 123 | [id: :hash, page_num: :range], 124 | key_definitions, 125 | 100, 126 | 100, 127 | [], 128 | [] 129 | ) 130 | |> request() 131 | 132 | IO.puts("creating test_planet table") 133 | key_definitions = %{id: :string, name: :string, mass: :number} 134 | 135 | indexes = [ 136 | %{ 137 | index_name: "name_mass", 138 | key_schema: [ 139 | %{ 140 | attribute_name: "name", 141 | key_type: "HASH" 142 | }, 143 | %{ 144 | attribute_name: "mass", 145 | key_type: "RANGE" 146 | } 147 | ], 148 | provisioned_throughput: %{ 149 | read_capacity_units: 100, 150 | write_capacity_units: 100 151 | }, 152 | projection: %{projection_type: "ALL"} 153 | } 154 | ] 155 | 156 | Dynamo.create_table( 157 | "test_planet", 158 | [id: :hash, name: :range], 159 | key_definitions, 160 | 100, 161 | 100, 162 | indexes, 163 | [] 164 | ) 165 | |> request() 166 | 167 | IO.puts("creating test_fruit table") 168 | 169 | Dynamo.create_table("test_fruit", [id: :hash], %{id: :string}, 100, 100, [], []) 170 | |> request() 171 | 172 | :ok 173 | end 174 | 175 | def setup_all(:migration) do 176 | IO.puts("========== migration test suite ==========") 177 | Dynamo.delete_table("test_schema_migrations") |> request() 178 | 179 | IO.puts("starting test repo") 180 | TestRepo.start_link() 181 | end 182 | 183 | def on_exit() do 184 | IO.puts("deleting main test tables") 185 | Dynamo.delete_table("test_person") |> request() 186 | Dynamo.delete_table("test_book_page") |> request() 187 | Dynamo.delete_table("test_planet") |> request() 188 | Dynamo.delete_table("test_fruit") |> request() 189 | end 190 | 191 | def on_exit(:migration) do 192 | IO.puts("deleting migration test tables") 193 | 194 | # Except for test_schema_migrations, these tables should be deleted during the "down" migration test. 195 | # Just to make sure, we'll clean up here anyway. 196 | Dynamo.delete_table("dog") |> request() 197 | Dynamo.delete_table("cat") |> request() 198 | Dynamo.delete_table("stream") |> request() 199 | Dynamo.delete_table("rabbit") |> request() 200 | Dynamo.delete_table("billing_mode_test") |> request() 201 | Dynamo.delete_table("test_schema_migrations") |> request() 202 | end 203 | 204 | defp request(operation), do: ExAws.request(operation, DynamoDB.ex_aws_config(TestRepo)) 205 | end 206 | 207 | # Skip EQC testing if we don't have it installed: 208 | if Code.ensure_compiled(:eqc) == {:module, :eqc} do 209 | defmodule TestGenerators do 210 | use EQC 211 | 212 | alias Ecto.Adapters.DynamoDB.TestSchema.Person 213 | 214 | def nonempty_str() do 215 | such_that s <- utf8() do 216 | # Ecto.Changeset.validate_required checks for all-whitespace 217 | # strings in addition to empty ones, hence the trimming: 218 | String.trim_leading(s) != "" 219 | end 220 | end 221 | 222 | def circle_list() do 223 | non_empty(list(nonempty_str())) 224 | end 225 | 226 | def person_generator() do 227 | person_with_id(nonempty_str()) 228 | end 229 | 230 | def person_with_id(key_gen) do 231 | let {id, first, last, age, email, pass, circles} <- 232 | {key_gen, nonempty_str(), nonempty_str(), int(), nonempty_str(), nonempty_str(), 233 | circle_list()} do 234 | %Person{ 235 | id: id, 236 | first_name: first, 237 | last_name: last, 238 | age: age, 239 | email: email, 240 | password: pass, 241 | circles: circles 242 | } 243 | end 244 | end 245 | end 246 | else 247 | IO.puts("Could not find eqc module - skipping property based testing!") 248 | end 249 | 250 | # Set seed: 0 so that tests are run in order - critical for our migration tests. 251 | ExUnit.start(seed: 0) 252 | -------------------------------------------------------------------------------- /upgrade_guides/version_1_upgrade_guide.md: -------------------------------------------------------------------------------- 1 | # Upgrading from version 0.X.X -> 1.X.X 2 | 3 | ## Billing Mode 4 | 5 | In version `1.X.X`, we have added migration support for DynamoDB's *pay-per-request* (AKA *on-demand*) billing. Thus, we no longer provide default provisioned throughput of `[1,1]` when creating tables and indexes via migrations. If any of your table/index creation files do not explicitly specify values for `provisioned_throughput`, you'll want to update those. 6 | 7 | To create a *pay-per-request* table, you would add the `billing_mode: :pay_per_request` option to that table; you should not provide `provisioned_throughput` for *pay-per-request* tables, nor their indexes. 8 | 9 | ## `:dynamodb_local` config option 10 | 11 | In version `1.1.0`, we introduced a new configuration option, the boolean `:dynamodb_local`. Due to slight differences in behaviour between production and local versions of DynamoDB, there are some times when special handling needs to be applied. 12 | 13 | This configuration option defaults to `false`, so it assumes that you are running against production DynamoDB unless you explicitly set it to `true`. Although you would probably be fine if you didn't set this, we *highly* recommend setting it in any environment that you will be running against the local development version of DynamoDB. 14 | 15 | ## Local DynamoDB version 16 | 17 | In order to make sure your local version of DynamoDB is up to date with the current production features, please use the latest release of DynamoDB local. As of spring 2019, the latest version is `1.11.477`, released on February 6, 2019. 18 | -------------------------------------------------------------------------------- /upgrade_guides/version_2_upgrade_guide.md: -------------------------------------------------------------------------------- 1 | # Upgrading from version 1.X.X -> 2.X.X 2 | 3 | ## `adapter` definition 4 | 5 | Upgrading to the latest version of this adapter should be relatively painless, since you'd really just be following Ecto's instructions for setting the adapter (albeit with some of our specific configuration details, specified in the README). 6 | 7 | Probably the most notable change is that you no longer define the adapter in your application's `config/` file(s), but rather in the `Repo` file itself. For example: 8 | 9 | ```elixir 10 | defmodule MyApp.Repo do 11 | use Ecto.Repo, 12 | otp_app: :my_app, 13 | adapter: Ecto.Adapters.DynamoDB 14 | end 15 | ``` 16 | 17 | ## Local DynamoDB version 18 | 19 | In order to make sure your local version of DynamoDB is up to date with the current production features, please use the latest release of DynamoDB local. As of spring 2020, the latest version is `1.11.478`, released on January 16, 2020. This major version release of the adapter should still be compatible with version `1.11.477`, but we strongly recommending upgrading. 20 | -------------------------------------------------------------------------------- /upgrade_guides/version_3_upgrade_guide.md: -------------------------------------------------------------------------------- 1 | # Upgrading from version 2.X.X -> 3.X.X 2 | 3 | ## Config changes 4 | 5 | With the exception of logging configuration, all config options are now per-repo. For example: 6 | 7 | ```elixir 8 | config :ecto_adapters_dynamodb, 9 | dynamodb: [ 10 | scheme: "http://", 11 | host: "localhost", 12 | port: 8000, 13 | region: "us-east-1" 14 | ], 15 | scan_tables: ["test_schema_migrations"] 16 | ``` 17 | 18 | now needs to be: 19 | 20 | ```elixir 21 | config :ecto_adapters_dynamodb, MyApp.MyRepo 22 | dynamodb: [ 23 | scheme: "http://", 24 | host: "localhost", 25 | port: 8000, 26 | region: "us-east-1" 27 | ], 28 | scan_tables: ["test_schema_migrations"] 29 | ``` 30 | 31 | (replacing `MyApp.MyRepo` with your repo). 32 | 33 | ### Global ExAws config 34 | 35 | Prior to v3, the adapter would overwrite any existing glboal ExAws config with its own values on 36 | startup. From v3 the adapter will only use the config it's given in the calls it makes itself. 37 | This may mean that you need to explicitly specify ExAws configuration options outside of the 38 | adapter's config if you're making your own ExAws calls elsewhere. 39 | 40 | ## ExAws.Dynamo version 41 | 42 | This release of the adapter includes support for the latest major version of [ExAws.Dynamo]( 43 | https://github.com/ex-aws/ex_aws_dynamo). As that major version release includes potentially 44 | breaking changes regarding the way empty string values are handled, we recommend reviewing the 45 | [upgrade guide](https://github.com/ex-aws/ex_aws_dynamo/blob/master/upgrade_guides/v4.md) for 46 | that repo to make sure you understand how your application may be affected. 47 | --------------------------------------------------------------------------------