├── .formatter.exs ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug.yml │ └── config.yml └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Earthfile ├── LICENSE.md ├── README.md ├── bench ├── README.md ├── bench_helper.exs ├── config.yml ├── scripts │ ├── macro │ │ ├── all_bench.exs │ │ └── insert_bench.exs │ └── micro │ │ ├── load_bench.exs │ │ └── to_sql_bench.exs └── support │ ├── migrations.exs │ ├── repo.exs │ ├── schemas.exs │ └── setup.exs ├── integration_test ├── myxql │ ├── all_test.exs │ ├── constraints_test.exs │ ├── explain_test.exs │ ├── migrations_test.exs │ ├── myxql_type_test.exs │ ├── prepare_test.exs │ ├── storage_test.exs │ ├── test_helper.exs │ └── upsert_all_test.exs ├── pg │ ├── all_test.exs │ ├── constraints_test.exs │ ├── copy_test.exs │ ├── exceptions_test.exs │ ├── explain_test.exs │ ├── migrations_test.exs │ ├── prepare_test.exs │ ├── storage_test.exs │ ├── test_helper.exs │ └── transaction_test.exs ├── sql │ ├── alter.exs │ ├── lock.exs │ ├── logging.exs │ ├── migration.exs │ ├── migrator.exs │ ├── query_many.exs │ ├── sandbox.exs │ ├── sql.exs │ ├── stream.exs │ ├── subquery.exs │ └── transaction.exs ├── support │ ├── file_helpers.exs │ ├── migration.exs │ └── repo.exs └── tds │ ├── all_test.exs │ ├── constraints_test.exs │ ├── explain_test.exs │ ├── lock_test.exs │ ├── migrations_test.exs │ ├── storage_test.exs │ ├── tds_type_test.exs │ └── test_helper.exs ├── lib ├── ecto │ ├── adapter │ │ ├── migration.ex │ │ └── structure.ex │ ├── adapters │ │ ├── myxql.ex │ │ ├── myxql │ │ │ └── connection.ex │ │ ├── postgres.ex │ │ ├── postgres │ │ │ └── connection.ex │ │ ├── sql.ex │ │ ├── sql │ │ │ ├── application.ex │ │ │ ├── connection.ex │ │ │ ├── sandbox.ex │ │ │ └── stream.ex │ │ ├── tds.ex │ │ └── tds │ │ │ ├── connection.ex │ │ │ └── types.ex │ ├── migration.ex │ ├── migration │ │ ├── runner.ex │ │ └── schema_migration.ex │ └── migrator.ex └── mix │ ├── ecto_sql.ex │ └── tasks │ ├── ecto.dump.ex │ ├── ecto.gen.migration.ex │ ├── ecto.load.ex │ ├── ecto.migrate.ex │ ├── ecto.migrations.ex │ └── ecto.rollback.ex ├── mix.exs ├── mix.lock └── test ├── ecto ├── adapters │ ├── myxql_test.exs │ ├── postgres_test.exs │ └── tds_test.exs ├── migration_test.exs ├── migrator_repo_test.exs ├── migrator_test.exs ├── tenant_migrator_test.exs └── type_test.exs ├── mix ├── ecto_sql_test.exs └── tasks │ ├── ecto.dump_load_test.exs │ ├── ecto.gen.migration_test.exs │ ├── ecto.migrate_test.exs │ ├── ecto.migrations_test.exs │ └── ecto.rollback_test.exs ├── support ├── connection_helpers.exs └── test_repo.exs └── test_helper.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | locals_without_parens = [ 2 | add: 2, 3 | add: 3, 4 | add_if_not_exists: 2, 5 | add_if_not_exists: 3, 6 | alter: 2, 7 | create: 1, 8 | create: 2, 9 | create_if_not_exists: 1, 10 | create_if_not_exists: 2, 11 | drop: 1, 12 | drop: 2, 13 | drop_if_exists: 1, 14 | drop_if_exists: 2, 15 | execute: 1, 16 | execute: 2, 17 | modify: 2, 18 | modify: 3, 19 | remove: 1, 20 | remove: 2, 21 | remove: 3, 22 | remove_if_exists: 1, 23 | remove_if_exists: 2, 24 | rename: 2, 25 | rename: 3, 26 | timestamps: 1 27 | ] 28 | 29 | [ 30 | import_deps: [:ecto], 31 | locals_without_parens: locals_without_parens, 32 | export: [ 33 | locals_without_parens: locals_without_parens 34 | ], 35 | inputs: ["{lib,test}/**/*.{ex,exs}"] 36 | ] 37 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug Report 2 | description: Tell us about something that's not working the way we (probably) intend. 3 | labels: ["Kind:Bug", "State:Triage"] 4 | body: 5 | - type: input 6 | id: elixir-version 7 | attributes: 8 | label: Elixir version 9 | description: Use `elixir -v` to find the Elixir version. 10 | validations: 11 | required: true 12 | 13 | - type: input 14 | id: db-version 15 | attributes: 16 | label: Database and Version 17 | description: > 18 | The database and its version (PostgreSQL 9.4, MongoDB 3.2, etc.) 19 | validations: 20 | required: true 21 | 22 | - type: input 23 | id: ecto-version 24 | attributes: 25 | label: Ecto Versions 26 | description: Use `mix deps` to find the dependency versions. 27 | validations: 28 | required: true 29 | 30 | - type: input 31 | id: db-adapter-version 32 | attributes: 33 | label: Database Adapter and Versions (postgrex, myxql, etc) 34 | description: Use `mix deps` to find the dependency versions. 35 | validations: 36 | required: true 37 | 38 | - type: textarea 39 | id: current-behavior 40 | attributes: 41 | label: Current behavior 42 | description: How can we reproduce what you're seeing? Include code samples, errors and stacktraces if appropriate. 43 | placeholder: |- 44 | 1. foo 45 | 2. bar 46 | 3. baz 47 | validations: 48 | required: true 49 | 50 | - type: textarea 51 | id: expected-behavior 52 | attributes: 53 | label: Expected behavior 54 | validations: 55 | required: true 56 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | 3 | contact_links: 4 | - name: Discuss proposals 5 | url: https://groups.google.com/g/elixir-ecto 6 | about: Send proposals for new ideas in the mailing list. 7 | - name: Ask questions 8 | url: https://elixirforum.com/ 9 | about: Ask and answer questions on ElixirForum. 10 | 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | name: unittest 6 | runs-on: ubuntu-24.04 7 | env: 8 | MIX_ENV: test 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | include: 13 | - elixir: 1.17.2 14 | otp: 25.0.4 15 | - elixir: 1.17.2 16 | otp: 27.0.1 17 | lint: lint 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | - name: Install Elixir and Erlang 22 | uses: erlef/setup-beam@v1 23 | with: 24 | elixir-version: ${{ matrix.elixir }} 25 | otp-version: ${{ matrix.otp }} 26 | - name: Restore deps and _build cache 27 | uses: actions/cache@v3 28 | with: 29 | path: | 30 | deps 31 | _build 32 | key: ${{ runner.os }}-${{ matrix.elixir }}-${{ matrix.otp }}-${{ hashFiles('**/mix.lock') }} 33 | restore-keys: | 34 | ${{ runner.os }}-${{ matrix.elixir }}-${{ matrix.otp }}- 35 | - name: Install dependencies 36 | run: mix deps.get 37 | - name: Check unused dependencies 38 | run: mix deps.unlock --check-unused 39 | if: ${{ matrix.lint }} 40 | - name: Check formatting 41 | run: mix format --check-formatted 42 | if: ${{ matrix.lint }} 43 | - name: Compile 44 | run: mix compile 45 | - name: Run tests 46 | run: mix test 47 | 48 | test-postgres: 49 | name: postgres integration test 50 | runs-on: ubuntu-24.04 51 | strategy: 52 | fail-fast: false 53 | matrix: 54 | elixirbase: 55 | - "1.14.5-erlang-23.3.4.9-alpine-3.16.9" 56 | postgres: 57 | - "16.2-alpine" 58 | - "11.11-alpine" 59 | - "9.6-alpine" 60 | - "9.5-alpine" 61 | pool_count: 62 | - "1" 63 | include: 64 | - elixirbase: "1.14.5-erlang-23.3.4.9-alpine-3.16.9" 65 | postgres: "16.2-alpine" 66 | pool_count: "4" 67 | steps: 68 | - uses: earthly/actions-setup@v1 69 | - uses: actions/checkout@v3 70 | - name: test ecto_sql 71 | env: 72 | POOL_COUNT: ${{ matrix.pool_count || '1' }} 73 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg POSTGRES=${{matrix.postgres}} +integration-test-postgres 74 | 75 | test-mysql: 76 | name: mysql integration test 77 | runs-on: ubuntu-24.04 78 | strategy: 79 | fail-fast: false 80 | matrix: 81 | elixirbase: 82 | - "1.14.5-erlang-23.3.4.9-alpine-3.16.9" 83 | mysql: 84 | - "5.7" 85 | - "8.0" 86 | steps: 87 | - uses: earthly/actions-setup@v1 88 | - uses: actions/checkout@v3 89 | - name: test ecto_sql 90 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg MYSQL=${{matrix.mysql}} +integration-test-mysql 91 | 92 | test-mssql: 93 | name: mssql integration test 94 | runs-on: ubuntu-24.04 95 | strategy: 96 | fail-fast: false 97 | matrix: 98 | elixirbase: 99 | - "1.14.5-erlang-23.3.4.9-alpine-3.16.9" 100 | mssql: 101 | - "2019" 102 | - "2022" 103 | steps: 104 | - uses: earthly/actions-setup@v1 105 | - uses: actions/checkout@v3 106 | - name: test ecto_sql 107 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg MSSQL=${{matrix.mssql}} +integration-test-mssql 108 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /_build 2 | /bench/results 3 | /deps 4 | /doc 5 | /tmp 6 | erl_crash.dump 7 | -------------------------------------------------------------------------------- /Earthfile: -------------------------------------------------------------------------------- 1 | VERSION 0.6 2 | 3 | all: 4 | ARG ELIXIR_BASE=1.15.6-erlang-25.3.2.6-alpine-3.18.4 5 | BUILD \ 6 | --build-arg POSTGRES=16.2-alpine \ 7 | --build-arg POSTGRES=11.11-alpine \ 8 | --build-arg POSTGRES=9.6-alpine \ 9 | --build-arg POSTGRES=9.5-alpine \ 10 | +integration-test-postgres 11 | 12 | BUILD \ 13 | --build-arg MYSQL=5.7 \ 14 | --build-arg MYSQL=8.0 \ 15 | +integration-test-mysql 16 | 17 | BUILD \ 18 | --build-arg MSSQL=2017 \ 19 | --build-arg MSSQL=2019 \ 20 | +integration-test-mssql 21 | 22 | setup-base: 23 | ARG ELIXIR_BASE=1.15.6-erlang-25.3.2.6-alpine-3.18.4 24 | FROM hexpm/elixir:$ELIXIR_BASE 25 | RUN apk add --no-progress --update git build-base 26 | ENV ELIXIR_ASSERT_TIMEOUT=10000 27 | WORKDIR /src/ecto_sql 28 | RUN apk add --no-progress --update docker docker-compose 29 | RUN mix local.rebar --force 30 | RUN mix local.hex --force 31 | 32 | COMMON_SETUP_AND_MIX: 33 | COMMAND 34 | COPY mix.exs mix.lock .formatter.exs . 35 | COPY --dir bench integration_test lib test ./ 36 | RUN mix deps.get 37 | RUN mix deps.compile 38 | RUN mix compile #--warnings-as-errors 39 | 40 | integration-test-postgres: 41 | FROM +setup-base 42 | ARG POSTGRES="11.11" 43 | 44 | IF [ "$POSTGRES" = "9.5-alpine" ] 45 | # for 9.5 we require a downgraded version of pg_dump; 46 | # and in the 3.4 version, it is not included in postgresql-client but rather in postgresql 47 | RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.4/main' >> /etc/apk/repositories 48 | RUN apk add postgresql=9.5.13-r0 49 | ELSE IF [ "$POSTGRES" = "16.2-alpine" ] 50 | # for 16 we need an upgraded version of pg_dump; 51 | # alpine 3.16 does not come with the postgres 16 client by default; 52 | # we must first update the public keys for the packages because they 53 | # might have been rotated since our image was built 54 | RUN apk add -X https://dl-cdn.alpinelinux.org/alpine/v3.19/main -u alpine-keys 55 | RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.19/main' >> /etc/apk/repositories 56 | RUN apk add postgresql16-client 57 | ELSE 58 | RUN apk add postgresql-client 59 | END 60 | 61 | DO +COMMON_SETUP_AND_MIX 62 | 63 | # then run the tests 64 | WITH DOCKER \ 65 | --pull "postgres:$POSTGRES" --platform linux/amd64 66 | RUN set -e; \ 67 | timeout=$(expr $(date +%s) + 30); \ 68 | docker run --name pg --network=host -d -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=postgres -e POSTGRES_DB=postgres "postgres:$POSTGRES"; \ 69 | # wait for postgres to start 70 | while ! pg_isready --host=127.0.0.1 --port=5432 --quiet; do \ 71 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for postgres"; exit 1); \ 72 | echo "waiting for postgres"; \ 73 | sleep 1; \ 74 | done; \ 75 | # run tests 76 | PG_URL=postgres:postgres@127.0.0.1 ECTO_ADAPTER=pg mix test; 77 | END 78 | 79 | integration-test-mysql: 80 | FROM +setup-base 81 | RUN apk add mysql-client 82 | 83 | DO +COMMON_SETUP_AND_MIX 84 | 85 | ARG MYSQL="5.7" 86 | WITH DOCKER \ 87 | --pull "mysql:$MYSQL" --platform linux/amd64 88 | RUN set -e; \ 89 | timeout=$(expr $(date +%s) + 30); \ 90 | docker run --name mysql --network=host -d -e MYSQL_ROOT_PASSWORD=root "mysql:$MYSQL" \ 91 | --sql_mode="ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,ANSI_QUOTES" \ 92 | # the default authentication plugin for MySQL 8 is sha 256 but it doesn't come with the docker image. falling back to the 5.7 way 93 | --default-authentication-plugin=mysql_native_password; \ 94 | # wait for mysql to start 95 | while ! mysqladmin ping --host=127.0.0.1 --port=3306 --protocol=TCP --silent; do \ 96 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for mysql"; exit 1); \ 97 | echo "waiting for mysql"; \ 98 | sleep 1; \ 99 | done; \ 100 | # run tests 101 | MYSQL_URL=root:root@127.0.0.1 ECTO_ADAPTER=myxql mix test; 102 | END 103 | 104 | 105 | integration-test-mssql: 106 | ARG TARGETARCH 107 | FROM +setup-base 108 | 109 | RUN apk add --no-cache curl gnupg --virtual .build-dependencies -- && \ 110 | curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_18.3.2.1-1_${TARGETARCH}.apk && \ 111 | curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_18.3.1.1-1_${TARGETARCH}.apk && \ 112 | echo y | apk add --allow-untrusted msodbcsql18_18.3.2.1-1_${TARGETARCH}.apk mssql-tools18_18.3.1.1-1_${TARGETARCH}.apk && \ 113 | apk del .build-dependencies && rm -f msodbcsql*.sig mssql-tools*.apk 114 | ENV PATH="/opt/mssql-tools18/bin:${PATH}" 115 | 116 | DO +COMMON_SETUP_AND_MIX 117 | 118 | ARG MSSQL="2017" 119 | WITH DOCKER \ 120 | --pull "mcr.microsoft.com/mssql/server:$MSSQL-latest" --platform linux/amd64 121 | RUN set -e; \ 122 | timeout=$(expr $(date +%s) + 30); \ 123 | docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' "mcr.microsoft.com/mssql/server:$MSSQL-latest"; \ 124 | # wait for mssql to start 125 | while ! sqlcmd -C -S tcp:127.0.0.1,1433 -U sa -P 'some!Password' -Q "SELECT 1" >/dev/null 2>&1; do \ 126 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for mssql"; exit 1); \ 127 | echo "waiting for mssql"; \ 128 | sleep 1; \ 129 | done; \ 130 | # run tests 131 | ECTO_ADAPTER=tds mix test; 132 | END 133 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ecto SQL 2 | ========= 3 | 4 | [![Build Status](https://github.com/elixir-ecto/ecto_sql/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto_sql/actions) 5 | 6 | Ecto SQL ([documentation](https://hexdocs.pm/ecto_sql)) provides building blocks for writing SQL adapters for Ecto. It features: 7 | 8 | * The Ecto.Adapters.SQL module as an entry point for all SQL-based adapters 9 | * Default implementations for Postgres (Ecto.Adapters.Postgres), MySQL (Ecto.Adapters.MyXQL), and MSSQL (Ecto.Adapters.Tds) 10 | * A test sandbox (Ecto.Adapters.SQL.Sandbox) that concurrently runs database tests inside transactions 11 | * Support for database migrations via Mix tasks 12 | 13 | To learn more about getting started, [see the Ecto repository](https://github.com/elixir-ecto/ecto). 14 | 15 | ## Running tests 16 | 17 | Clone the repo and fetch its dependencies: 18 | 19 | $ git clone https://github.com/elixir-ecto/ecto_sql.git 20 | $ cd ecto_sql 21 | $ mix deps.get 22 | $ mix test 23 | 24 | In case you are modifying Ecto and EctoSQL at the same time, you can configure EctoSQL to use an Ecto version from your machine by running: 25 | 26 | $ ECTO_PATH=../ecto mix test.all 27 | 28 | ### Running integration tests 29 | 30 | The command above will run unit tests. EctoSQL also has a suite of integration tests for its built-in adapters: `pg`, `myxql` and `tds`. If you are changing logic specific to a database, we recommend running its respective integration test suite as well. Doing so requires you to have the database available locally. MySQL and PostgreSQL can be installed directly on most systems. For MSSQL, you may need to run it as a Docker image: 31 | 32 | docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' mcr.microsoft.com/mssql/server:2017-latest 33 | 34 | Once the database is running, you can run tests against a specific Ecto adapter by using the `ECTO_ADAPTER` environment variable: 35 | 36 | $ ECTO_ADAPTER=pg mix test 37 | 38 | You may also run `mix test.all` to run the unit tests and all integration tests. You can also use a local Ecto checkout if desired: 39 | 40 | $ ECTO_PATH=../ecto mix test.all 41 | 42 | ### Running containerized tests 43 | 44 | It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly). You will also need Docker installed on your system. Then you can run: 45 | 46 | $ earthly -P +all 47 | 48 | You can also use this to interactively debug any failing integration tests using the corresponding commands: 49 | 50 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MYSQL=5.7 +integration-test-mysql 51 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MSSQL=2019 +integration-test-mssql 52 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg POSTGRES=11.11 +integration-test-postgres 53 | 54 | Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: 55 | 56 | PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test 57 | MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test 58 | sqlcmd -U sa -P 'some!Password' 59 | 60 | ## License 61 | 62 | Copyright (c) 2012 Plataformatec \ 63 | Copyright (c) 2020 Dashbit 64 | 65 | Licensed under the Apache License, Version 2.0 (the "License"); 66 | you may not use this file except in compliance with the License. 67 | You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) 68 | 69 | Unless required by applicable law or agreed to in writing, software 70 | distributed under the License is distributed on an "AS IS" BASIS, 71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 72 | See the License for the specific language governing permissions and 73 | limitations under the License. 74 | -------------------------------------------------------------------------------- /bench/README.md: -------------------------------------------------------------------------------- 1 | # Ecto Benchmarks 2 | 3 | Ecto has a benchmark suite to track performance of sensitive operations. Benchmarks 4 | are run using the [Benchee](https://github.com/PragTob/benchee) library and 5 | need PostgreSQL and MySQL up and running. 6 | 7 | To run the benchmarks tests just type in the console: 8 | 9 | ``` 10 | # POSIX-compatible shells 11 | $ MIX_ENV=bench mix run bench/bench_helper.exs 12 | ``` 13 | 14 | ``` 15 | # other shells 16 | $ env MIX_ENV=bench mix run bench/bench_helper.exs 17 | ``` 18 | 19 | Benchmarks are inside the `scripts/` directory and are divided into two 20 | categories: 21 | 22 | * `micro benchmarks`: Operations that don't actually interface with the database, 23 | but might need it up and running to start the Ecto agents and processes. 24 | 25 | * `macro benchmarks`: Operations that are actually run in the database. This are 26 | more likely to integration tests. 27 | 28 | You can also run a benchmark individually by giving the path to the benchmark 29 | script instead of `bench/bench_helper.exs`. 30 | -------------------------------------------------------------------------------- /bench/bench_helper.exs: -------------------------------------------------------------------------------- 1 | # Micro benchmarks 2 | Code.require_file("scripts/micro/load_bench.exs", __DIR__) 3 | Code.require_file("scripts/micro/to_sql_bench.exs", __DIR__) 4 | 5 | ## Macro benchmarks needs postgresql and mysql up and running 6 | Code.require_file("scripts/macro/insert_bench.exs", __DIR__) 7 | Code.require_file("scripts/macro/all_bench.exs", __DIR__) 8 | -------------------------------------------------------------------------------- /bench/config.yml: -------------------------------------------------------------------------------- 1 | elixir: 1.5.2 2 | erlang: 20.1.2 3 | environment: 4 | PG_URL: postgres:postgres@localhost 5 | MYXQL_URL: root@localhost 6 | deps: 7 | docker: 8 | - container_name: postgres 9 | image: postgres:9.6.6-alpine 10 | - container_name: mysql 11 | image: mysql:5.7.20 12 | environment: 13 | MYSQL_ALLOW_EMPTY_PASSWORD: "true" 14 | -------------------------------------------------------------------------------- /bench/scripts/macro/all_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of querying all objects of the different supported 3 | # databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of querying a set of objects registered in 7 | # the database with Repo.all/2 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # load the results into Ecto structures. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # There is only a unique parameter in this benchmark, the User objects to be 17 | # fetched. 18 | 19 | Code.require_file("../../support/setup.exs", __DIR__) 20 | 21 | alias Ecto.Bench.User 22 | 23 | limit = 5_000 24 | 25 | users = 26 | 1..limit 27 | |> Enum.map(fn _ -> User.sample_data() end) 28 | 29 | # We need to insert data to fetch 30 | Ecto.Bench.PgRepo.insert_all(User, users) 31 | Ecto.Bench.MyXQLRepo.insert_all(User, users) 32 | 33 | jobs = %{ 34 | "Pg Repo.all/2" => fn -> Ecto.Bench.PgRepo.all(User, limit: limit) end, 35 | "MyXQL Repo.all/2" => fn -> Ecto.Bench.MyXQLRepo.all(User, limit: limit) end 36 | } 37 | 38 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 39 | file = Path.join(path, "all.json") 40 | 41 | Benchee.run( 42 | jobs, 43 | formatters: [Benchee.Formatters.Console], 44 | formatter_options: [json: [file: file]], 45 | time: 10, 46 | after_each: fn results -> 47 | ^limit = length(results) 48 | end 49 | ) 50 | 51 | # Clean inserted data 52 | Ecto.Bench.PgRepo.delete_all(User) 53 | Ecto.Bench.MyXQLRepo.delete_all(User) 54 | -------------------------------------------------------------------------------- /bench/scripts/macro/insert_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of inserting changesets and structs in the different 3 | # supported databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of inserting changesets and structs in the 7 | # database with Repo.insert!/1 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # returning the result of the transaction. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # Different inputs to be inserted, aka Changesets and Structs 17 | 18 | Code.require_file("../../support/setup.exs", __DIR__) 19 | 20 | alias Ecto.Bench.User 21 | 22 | inputs = %{ 23 | "Struct" => struct(User, User.sample_data()), 24 | "Changeset" => User.changeset(User.sample_data()) 25 | } 26 | 27 | jobs = %{ 28 | "Pg Insert" => fn entry -> Ecto.Bench.PgRepo.insert!(entry) end, 29 | "MyXQL Insert" => fn entry -> Ecto.Bench.MyXQLRepo.insert!(entry) end 30 | } 31 | 32 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 33 | file = Path.join(path, "insert.json") 34 | 35 | Benchee.run( 36 | jobs, 37 | inputs: inputs, 38 | formatters: [Benchee.Formatters.Console], 39 | formatter_options: [json: [file: file]] 40 | ) 41 | 42 | # Clean inserted data 43 | Ecto.Bench.PgRepo.delete_all(User) 44 | Ecto.Bench.MyXQLRepo.delete_all(User) 45 | -------------------------------------------------------------------------------- /bench/scripts/micro/load_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the implementation of loading raw database data into Ecto structures by 3 | # the different database adapters 4 | 5 | # -------------------------------Description----------------------------------- 6 | # Repo.load/2 is an important step of a database query. 7 | # This benchmark tracks performance of loading "raw" data into ecto structures 8 | # Raw data can be in different types (e.g. keyword lists, maps), in this tests 9 | # we benchmark against map inputs 10 | 11 | # ----------------------------Factors(don't change)--------------------------- 12 | # Different adapters supported by Ecto, each one has its own implementation that 13 | # is tested against different inputs 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # Different sizes of raw data(small, medium, big) and different attribute types 17 | # such as UUID, Date and Time fetched from the database and needs to be 18 | # loaded into Ecto structures. 19 | 20 | Code.require_file("../../support/setup.exs", __DIR__) 21 | 22 | alias Ecto.Bench.User 23 | 24 | inputs = %{ 25 | "Small 1 Thousand" => 26 | 1..1_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 27 | "Medium 100 Thousand" => 28 | 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 29 | "Big 1 Million" => 30 | 1..1_000_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 31 | "Time attr" => 32 | 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", time_attr: ~T[21:25:04.361140]} end), 33 | "Date attr" => 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", date_attr: ~D[2018-06-20]} end), 34 | "NaiveDateTime attr" => 35 | 1..100_000 36 | |> Enum.map(fn _ -> %{name: "Alice", naive_datetime_attr: ~N[2019-06-20 21:32:07.424178]} end), 37 | "UUID attr" => 38 | 1..100_000 39 | |> Enum.map(fn _ -> %{name: "Alice", uuid: Ecto.UUID.bingenerate()} end) 40 | } 41 | 42 | jobs = %{ 43 | "Pg Loader" => fn data -> Enum.map(data, &Ecto.Bench.PgRepo.load(User, &1)) end, 44 | "MyXQL Loader" => fn data -> Enum.map(data, &Ecto.Bench.MyXQLRepo.load(User, &1)) end 45 | } 46 | 47 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 48 | file = Path.join(path, "load.json") 49 | 50 | Benchee.run( 51 | jobs, 52 | inputs: inputs, 53 | formatters: [Benchee.Formatters.Console], 54 | formatter_options: [json: [file: file]] 55 | ) 56 | -------------------------------------------------------------------------------- /bench/scripts/micro/to_sql_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the implementation of parsing Ecto.Query objects into SQL queries by 3 | # the different database adapters 4 | 5 | # -------------------------------Description----------------------------------- 6 | # Repo.to_sql/2 is an important step of a database query. 7 | # This benchmark tracks performance of parsing Ecto.Query structures into 8 | # "raw" SQL query strings. 9 | # Different Ecto.Query objects has multiple combinations and some different attributes 10 | # depending on the query type. In this tests we benchmark against different 11 | # query types and complexity. 12 | 13 | # ----------------------------Factors(don't change)--------------------------- 14 | # Different adapters supported by Ecto, each one has its own implementation that 15 | # is tested against different query inputs 16 | 17 | # ----------------------------Parameters(change)------------------------------- 18 | # Different query objects (select, delete, update) to be translated into pure SQL 19 | # strings. 20 | 21 | Code.require_file("../../support/setup.exs", __DIR__) 22 | 23 | import Ecto.Query 24 | 25 | alias Ecto.Bench.{User, Game} 26 | 27 | inputs = %{ 28 | "Ordinary Select All" => {:all, from(User)}, 29 | "Ordinary Delete All" => {:delete_all, from(User)}, 30 | "Ordinary Update All" => {:update_all, from(User, update: [set: [name: "Thor"]])}, 31 | "Ordinary Where" => {:all, from(User, where: [name: "Thanos", email: "blah@blah"])}, 32 | "Fetch First Registry" => {:all, first(User)}, 33 | "Fetch Last Registry" => {:all, last(User)}, 34 | "Ordinary Order By" => {:all, order_by(User, desc: :name)}, 35 | "Complex Query 2 Joins" => 36 | {:all, 37 | from(User, where: [name: "Thanos"]) 38 | |> join(:left, [u], ux in User, on: u.id == ux.id) 39 | |> join(:right, [j], uj in User, on: j.id == 1 and j.email == "email@email") 40 | |> select([u, ux], {u.name, ux.email})}, 41 | "Complex Query 4 Joins" => 42 | {:all, 43 | from(User) 44 | |> join(:left, [u], g in Game, on: g.name == u.name) 45 | |> join(:right, [g], u in User, on: g.id == 1 and u.email == "email@email") 46 | |> join(:inner, [u], g in fragment("SELECT * from games where game.id = ?", u.id)) 47 | |> join(:left, [g], u in fragment("SELECT * from users = ?", g.id)) 48 | |> select([u, g], {u.name, g.price})} 49 | } 50 | 51 | jobs = %{ 52 | "Pg Query Builder" => fn {type, query} -> Ecto.Bench.PgRepo.to_sql(type, query) end, 53 | "MyXQL Query Builder" => fn {type, query} -> Ecto.Bench.MyXQLRepo.to_sql(type, query) end 54 | } 55 | 56 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 57 | file = Path.join(path, "to_sql.json") 58 | 59 | Benchee.run( 60 | jobs, 61 | inputs: inputs, 62 | formatters: [Benchee.Formatters.Console], 63 | formatter_options: [json: [file: file]] 64 | ) 65 | -------------------------------------------------------------------------------- /bench/support/migrations.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Bench.CreateUser do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:users) do 6 | add(:name, :string) 7 | add(:email, :string) 8 | add(:password, :string) 9 | add(:time_attr, :time) 10 | add(:date_attr, :date) 11 | add(:naive_datetime_attr, :naive_datetime) 12 | add(:uuid, :binary_id) 13 | end 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /bench/support/repo.exs: -------------------------------------------------------------------------------- 1 | pg_bench_url = System.get_env("PG_URL") || "postgres:postgres@localhost" 2 | myxql_bench_url = System.get_env("MYXQL_URL") || "root@localhost" 3 | 4 | Application.put_env( 5 | :ecto_sql, 6 | Ecto.Bench.PgRepo, 7 | url: "ecto://" <> pg_bench_url <> "/ecto_test", 8 | adapter: Ecto.Adapters.Postgres, 9 | show_sensitive_data_on_connection_error: true 10 | ) 11 | 12 | Application.put_env( 13 | :ecto_sql, 14 | Ecto.Bench.MyXQLRepo, 15 | url: "ecto://" <> myxql_bench_url <> "/ecto_test_myxql", 16 | adapter: Ecto.Adapters.MyXQL, 17 | protocol: :tcp, 18 | show_sensitive_data_on_connection_error: true 19 | ) 20 | 21 | defmodule Ecto.Bench.PgRepo do 22 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres, log: false 23 | end 24 | 25 | defmodule Ecto.Bench.MyXQLRepo do 26 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL, log: false 27 | end 28 | -------------------------------------------------------------------------------- /bench/support/schemas.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Bench.User do 2 | use Ecto.Schema 3 | 4 | schema "users" do 5 | field(:name, :string) 6 | field(:email, :string) 7 | field(:password, :string) 8 | field(:time_attr, :time) 9 | field(:date_attr, :date) 10 | field(:naive_datetime_attr, :naive_datetime) 11 | field(:uuid, :binary_id) 12 | end 13 | 14 | @required_attrs [ 15 | :name, 16 | :email, 17 | :password, 18 | :time_attr, 19 | :date_attr, 20 | :naive_datetime_attr, 21 | :uuid 22 | ] 23 | 24 | def changeset() do 25 | changeset(sample_data()) 26 | end 27 | 28 | def changeset(data) do 29 | Ecto.Changeset.cast(%__MODULE__{}, data, @required_attrs) 30 | end 31 | 32 | def sample_data do 33 | %{ 34 | name: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", 35 | email: "foobar@email.com", 36 | password: "mypass", 37 | time_attr: Time.utc_now() |> Time.truncate(:second), 38 | date_attr: Date.utc_today(), 39 | naive_datetime_attr: NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second), 40 | uuid: Ecto.UUID.generate() 41 | } 42 | end 43 | end 44 | 45 | defmodule Ecto.Bench.Game do 46 | use Ecto.Schema 47 | 48 | schema "games" do 49 | field(:name, :string) 50 | field(:price, :float) 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /bench/support/setup.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("repo.exs", __DIR__) 2 | Code.require_file("migrations.exs", __DIR__) 3 | Code.require_file("schemas.exs", __DIR__) 4 | 5 | alias Ecto.Bench.{PgRepo, MyXQLRepo, CreateUser} 6 | 7 | {:ok, _} = Ecto.Adapters.Postgres.ensure_all_started(PgRepo.config(), :temporary) 8 | {:ok, _} = Ecto.Adapters.MyXQL.ensure_all_started(MyXQLRepo.config(), :temporary) 9 | 10 | _ = Ecto.Adapters.Postgres.storage_down(PgRepo.config()) 11 | :ok = Ecto.Adapters.Postgres.storage_up(PgRepo.config()) 12 | 13 | _ = Ecto.Adapters.MyXQL.storage_down(MyXQLRepo.config()) 14 | :ok = Ecto.Adapters.MyXQL.storage_up(MyXQLRepo.config()) 15 | 16 | {:ok, _pid} = PgRepo.start_link(log: false) 17 | {:ok, _pid} = MyXQLRepo.start_link(log: false) 18 | 19 | :ok = Ecto.Migrator.up(PgRepo, 0, CreateUser, log: false) 20 | :ok = Ecto.Migrator.up(MyXQLRepo, 0, CreateUser, log: false) 21 | -------------------------------------------------------------------------------- /integration_test/myxql/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | 9 | Code.require_file "../sql/alter.exs", __DIR__ 10 | Code.require_file "../sql/lock.exs", __DIR__ 11 | Code.require_file "../sql/logging.exs", __DIR__ 12 | Code.require_file "../sql/migration.exs", __DIR__ 13 | Code.require_file "../sql/migrator.exs", __DIR__ 14 | Code.require_file "../sql/query_many.exs", __DIR__ 15 | Code.require_file "../sql/sandbox.exs", __DIR__ 16 | Code.require_file "../sql/sql.exs", __DIR__ 17 | Code.require_file "../sql/stream.exs", __DIR__ 18 | Code.require_file "../sql/subquery.exs", __DIR__ 19 | Code.require_file "../sql/transaction.exs", __DIR__ 20 | -------------------------------------------------------------------------------- /integration_test/myxql/constraints_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ConstraintsTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Migrator, only: [up: 4] 5 | alias Ecto.Integration.PoolRepo 6 | 7 | defmodule ConstraintMigration do 8 | use Ecto.Migration 9 | 10 | @table table(:constraints_test) 11 | 12 | def change do 13 | create @table do 14 | add :price, :integer 15 | add :from, :integer 16 | add :to, :integer 17 | end 18 | 19 | # Only valid after MySQL 8.0.19 20 | create constraint(@table.name, :positive_price, check: "price > 0") 21 | end 22 | end 23 | 24 | defmodule Constraint do 25 | use Ecto.Integration.Schema 26 | 27 | schema "constraints_test" do 28 | field :price, :integer 29 | field :from, :integer 30 | field :to, :integer 31 | end 32 | end 33 | 34 | @base_migration 2_000_000 35 | 36 | setup_all do 37 | ExUnit.CaptureLog.capture_log(fn -> 38 | num = @base_migration + System.unique_integer([:positive]) 39 | up(PoolRepo, num, ConstraintMigration, log: false) 40 | end) 41 | 42 | :ok 43 | end 44 | 45 | @tag :create_constraint 46 | test "check constraint" do 47 | # When the changeset doesn't expect the db error 48 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 49 | exception = 50 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 51 | PoolRepo.insert(changeset) 52 | end 53 | 54 | assert exception.message =~ "\"positive_price\" (check_constraint)" 55 | assert exception.message =~ "The changeset has not defined any constraint." 56 | assert exception.message =~ "call `check_constraint/3`" 57 | 58 | # When the changeset does expect the db error, but doesn't give a custom message 59 | {:error, changeset} = 60 | changeset 61 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price) 62 | |> PoolRepo.insert() 63 | assert changeset.errors == [price: {"is invalid", [constraint: :check, constraint_name: "positive_price"]}] 64 | assert changeset.data.__meta__.state == :built 65 | 66 | # When the changeset does expect the db error and gives a custom message 67 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 68 | {:error, changeset} = 69 | changeset 70 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 71 | |> PoolRepo.insert() 72 | assert changeset.errors == [price: {"price must be greater than 0", [constraint: :check, constraint_name: "positive_price"]}] 73 | assert changeset.data.__meta__.state == :built 74 | 75 | # When the change does not violate the check constraint 76 | changeset = Ecto.Changeset.change(%Constraint{}, price: 10, from: 100, to: 200) 77 | {:ok, changeset} = 78 | changeset 79 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 80 | |> PoolRepo.insert() 81 | assert is_integer(changeset.id) 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /integration_test/myxql/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | describe "explain" do 9 | test "select" do 10 | explain = TestRepo.explain(:all, from(p in Post, where: p.title == "title"), timeout: 20000) 11 | 12 | assert explain =~ 13 | "| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |" 14 | 15 | assert explain =~ "p0" 16 | assert explain =~ "SIMPLE" 17 | assert explain =~ "Using where" 18 | end 19 | 20 | test "delete" do 21 | explain = TestRepo.explain(:delete_all, Post) 22 | assert explain =~ "DELETE" 23 | assert explain =~ "p0" 24 | end 25 | 26 | test "update" do 27 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 28 | assert explain =~ "UPDATE" 29 | assert explain =~ "p0" 30 | end 31 | 32 | test "invalid" do 33 | assert_raise(MyXQL.Error, fn -> 34 | TestRepo.explain(:all, from(p in "posts", select: p.invalid, where: p.invalid == "title")) 35 | end) 36 | end 37 | 38 | test "map format" do 39 | [explain] = TestRepo.explain(:all, Post, format: :map) 40 | keys = explain["query_block"] |> Map.keys 41 | assert Enum.member?(keys, "cost_info") 42 | assert Enum.member?(keys, "select_id") 43 | assert Enum.member?(keys, "table") 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /integration_test/myxql/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | import ExUnit.CaptureLog 6 | 7 | @moduletag :capture_log 8 | @base_migration 3_000_000 9 | 10 | defmodule NormalMigration do 11 | use Ecto.Migration 12 | 13 | def change do 14 | create_if_not_exists table(:log_mode_table) 15 | end 16 | end 17 | 18 | defmodule AlterMigration do 19 | use Ecto.Migration 20 | 21 | def change do 22 | create table(:alter_table) do 23 | add(:column1, :string) 24 | end 25 | 26 | alter table(:alter_table) do 27 | add(:column2, :string, after: :column1, comment: "second column") 28 | end 29 | end 30 | end 31 | 32 | text_variants = ~w/tinytext text mediumtext longtext/a 33 | @text_variants text_variants 34 | 35 | collation = "utf8mb4_bin" 36 | @collation collation 37 | 38 | defmodule CollateMigration do 39 | use Ecto.Migration 40 | 41 | @text_variants text_variants 42 | @collation collation 43 | 44 | def change do 45 | create table(:collate_reference) do 46 | add :name, :string, collation: @collation 47 | end 48 | 49 | create unique_index(:collate_reference, :name) 50 | 51 | create table(:collate) do 52 | add :string, :string, collation: @collation 53 | add :varchar, :varchar, size: 255, collation: @collation 54 | add :name_string, references(:collate_reference, type: :string, column: :name), collation: @collation 55 | 56 | for type <- @text_variants do 57 | add type, type, collation: @collation 58 | end 59 | end 60 | 61 | alter table(:collate) do 62 | modify :string, :string, collation: "utf8mb4_general_ci" 63 | end 64 | end 65 | end 66 | 67 | describe "Migrator" do 68 | @get_lock_command ~s[SELECT GET_LOCK('ecto_Ecto.Integration.PoolRepo', -1)] 69 | @release_lock_command ~s[SELECT RELEASE_LOCK('ecto_Ecto.Integration.PoolRepo')] 70 | @create_table_sql ~s[CREATE TABLE IF NOT EXISTS `log_mode_table`] 71 | @create_table_log "create table if not exists log_mode_table" 72 | @drop_table_sql ~s[DROP TABLE IF EXISTS `log_mode_table`] 73 | @drop_table_log "drop table if exists log_mode_table" 74 | @version_insert ~s[INSERT INTO `schema_migrations`] 75 | @version_delete ~s[DELETE s0.* FROM `schema_migrations`] 76 | 77 | test "logs locking and transaction commands" do 78 | num = @base_migration + System.unique_integer([:positive]) 79 | up_log = 80 | capture_log(fn -> 81 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 82 | end) 83 | 84 | assert up_log =~ "begin []" 85 | assert up_log =~ @get_lock_command 86 | assert up_log =~ @create_table_sql 87 | assert up_log =~ @create_table_log 88 | assert up_log =~ @release_lock_command 89 | assert up_log =~ @version_insert 90 | assert up_log =~ "commit []" 91 | 92 | down_log = 93 | capture_log(fn -> 94 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 95 | end) 96 | 97 | assert down_log =~ "begin []" 98 | assert down_log =~ @get_lock_command 99 | assert down_log =~ @drop_table_sql 100 | assert down_log =~ @drop_table_log 101 | assert down_log =~ @release_lock_command 102 | assert down_log =~ @version_delete 103 | assert down_log =~ "commit []" 104 | end 105 | 106 | test "does not log sql when log is default" do 107 | num = @base_migration + System.unique_integer([:positive]) 108 | up_log = 109 | capture_log(fn -> 110 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 111 | end) 112 | 113 | refute up_log =~ "begin []" 114 | refute up_log =~ @get_lock_command 115 | refute up_log =~ @create_table_sql 116 | assert up_log =~ @create_table_log 117 | refute up_log =~ @release_lock_command 118 | refute up_log =~ @version_insert 119 | refute up_log =~ "commit []" 120 | 121 | down_log = 122 | capture_log(fn -> 123 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 124 | end) 125 | 126 | refute down_log =~ "begin []" 127 | refute down_log =~ @get_lock_command 128 | refute down_log =~ @drop_table_sql 129 | assert down_log =~ @drop_table_log 130 | refute down_log =~ @release_lock_command 131 | refute down_log =~ @version_delete 132 | refute down_log =~ "commit []" 133 | end 134 | 135 | test "add column with after and comment options" do 136 | num = @base_migration + System.unique_integer([:positive]) 137 | 138 | log = 139 | capture_log(fn -> 140 | Ecto.Migrator.up(PoolRepo, num, AlterMigration, log_migrations_sql: :info) 141 | end) 142 | 143 | assert log =~ "ALTER TABLE `alter_table` ADD `column2` varchar(255) COMMENT 'second column' AFTER `column1`" 144 | end 145 | 146 | test "collation can be set on a column" do 147 | num = @base_migration + System.unique_integer([:positive]) 148 | assert :ok = Ecto.Migrator.up(PoolRepo, num, CollateMigration, log: false) 149 | query = fn column -> """ 150 | SELECT collation_name 151 | FROM information_schema.columns 152 | WHERE table_name = 'collate' AND column_name = '#{column}'; 153 | """ 154 | end 155 | 156 | assert %{ 157 | rows: [["utf8mb4_general_ci"]] 158 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.("string"), []) 159 | 160 | for type <- ~w/text name_string/ ++ @text_variants do 161 | assert %{ 162 | rows: [[@collation]] 163 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.(type), []) 164 | end 165 | end 166 | end 167 | end 168 | -------------------------------------------------------------------------------- /integration_test/myxql/myxql_type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MyXQLTypeTest do 2 | use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) 3 | alias Ecto.Integration.TestRepo 4 | import Ecto.Query 5 | 6 | defmodule Bool do 7 | use Ecto.Schema 8 | 9 | schema "bits" do 10 | field :bit, :boolean 11 | end 12 | end 13 | 14 | test "bit" do 15 | TestRepo.insert_all("bits", [[bit: <<1::1>>], [bit: <<0::1>>]]) 16 | 17 | assert TestRepo.all(from(b in "bits", select: b.bit, order_by: [desc: :bit])) == [ 18 | <<1::1>>, 19 | <<0::1>> 20 | ] 21 | end 22 | 23 | test "bit as boolean" do 24 | TestRepo.insert_all("bits", [[bit: <<1::1>>], [bit: <<0::1>>]]) 25 | 26 | assert TestRepo.all(from(b in Bool, select: b.bit, order_by: [desc: :bit])) == [ 27 | true, 28 | false 29 | ] 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /integration_test/myxql/prepare_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.PrepareTest do 2 | use Ecto.Integration.Case, async: false 3 | 4 | import Ecto.Query, only: [from: 2] 5 | 6 | alias Ecto.Integration.TestRepo 7 | alias Ecto.Integration.Post 8 | 9 | test "prepare option" do 10 | TestRepo.insert!(%Post{title: "one"}) 11 | 12 | query = from p in Post, select: fragment("'mxql test prepare option'") 13 | stmt_count_query = "SHOW GLOBAL STATUS LIKE '%prepared_stmt_count%'" 14 | 15 | %{rows: [[_, orig_count]]} = TestRepo.query!(stmt_count_query, []) 16 | orig_count = String.to_integer(orig_count) 17 | 18 | # Uncached 19 | assert TestRepo.all(query, prepare: :unnamed) == ["mxql test prepare option"] 20 | %{rows: [[_, new_count]]} = TestRepo.query!(stmt_count_query, []) 21 | assert String.to_integer(new_count) == orig_count 22 | 23 | assert TestRepo.all(query, prepare: :named) == ["mxql test prepare option"] 24 | assert %{rows: [[_, new_count]]} = TestRepo.query!(stmt_count_query, []) 25 | assert String.to_integer(new_count) == orig_count + 1 26 | 27 | # Cached 28 | assert TestRepo.all(query, prepare: :unnamed) == ["mxql test prepare option"] 29 | assert %{rows: [[_, new_count]]} = TestRepo.query!(stmt_count_query, []) 30 | assert String.to_integer(new_count) == orig_count + 1 31 | 32 | assert TestRepo.all(query, prepare: :named) == ["mxql test prepare option"] 33 | assert %{rows: [[_, new_count]]} = TestRepo.query!(stmt_count_query, []) 34 | assert String.to_integer(new_count) == orig_count + 1 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /integration_test/myxql/storage_test.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("../support/file_helpers.exs", __DIR__) 2 | 3 | defmodule Ecto.Integration.StorageTest do 4 | use ExUnit.Case 5 | 6 | @moduletag :capture_log 7 | @base_migration 5_000_000 8 | 9 | import Support.FileHelpers 10 | alias Ecto.Integration.{PoolRepo, TestRepo} 11 | 12 | def params do 13 | # Pass log false to ensure we can still create/drop. 14 | url = Application.get_env(:ecto_sql, :mysql_test_url) <> "/storage_mgt" 15 | [log: false] ++ Ecto.Repo.Supervisor.parse_url(url) 16 | end 17 | 18 | def wrong_params do 19 | Keyword.merge(params(), 20 | username: "randomuser", 21 | password: "password1234" 22 | ) 23 | end 24 | 25 | def drop_database do 26 | run_mysql("DROP DATABASE #{params()[:database]};") 27 | end 28 | 29 | def create_database(grant_privileges_to \\ nil) do 30 | run_mysql("CREATE DATABASE #{params()[:database]};") 31 | if grant_privileges_to do 32 | run_mysql("GRANT ALL PRIVILEGES ON #{params()[:database]}.* to #{grant_privileges_to}") 33 | end 34 | end 35 | 36 | def create_posts do 37 | run_mysql("CREATE TABLE posts (title varchar(20));", ["-D", params()[:database]]) 38 | end 39 | 40 | def run_mysql(sql, args \\ []) do 41 | params = params() 42 | env = if password = params[:password], do: [{"MYSQL_PWD", password}], else: [] 43 | 44 | args = [ 45 | "-u", 46 | params[:username], 47 | "--host", 48 | params[:hostname], 49 | "--port", 50 | to_string(params[:port] || 3306), 51 | "-e", 52 | sql | args 53 | ] 54 | 55 | System.cmd("mysql", args, env: env) 56 | end 57 | 58 | test "storage up (twice in a row)" do 59 | assert Ecto.Adapters.MyXQL.storage_up(params()) == :ok 60 | assert Ecto.Adapters.MyXQL.storage_up(params()) == {:error, :already_up} 61 | after 62 | drop_database() 63 | end 64 | 65 | test "storage down (twice in a row)" do 66 | create_database() 67 | assert Ecto.Adapters.MyXQL.storage_down(params()) == :ok 68 | assert Ecto.Adapters.MyXQL.storage_down(params()) == {:error, :already_down} 69 | end 70 | 71 | test "storage up and down (wrong credentials)" do 72 | refute Ecto.Adapters.MyXQL.storage_up(wrong_params()) == :ok 73 | create_database() 74 | refute Ecto.Adapters.MyXQL.storage_down(wrong_params()) == :ok 75 | after 76 | drop_database() 77 | end 78 | 79 | test "storage up with unprivileged user with access to the database" do 80 | unprivileged_params = Keyword.merge(params(), 81 | username: "unprivileged", 82 | password: "pass" 83 | ) 84 | run_mysql("CREATE USER unprivileged IDENTIFIED BY 'pass'") 85 | refute Ecto.Adapters.MyXQL.storage_up(unprivileged_params) == :ok 86 | create_database("unprivileged") 87 | assert Ecto.Adapters.MyXQL.storage_up(unprivileged_params) == {:error, :already_up} 88 | after 89 | run_mysql("DROP USER unprivileged") 90 | drop_database() 91 | end 92 | 93 | test "structure dump and load" do 94 | create_database() 95 | create_posts() 96 | 97 | # Default path 98 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), params()) 99 | dump = File.read!(Path.join(tmp_path(), "structure.sql")) 100 | 101 | drop_database() 102 | create_database() 103 | 104 | # Load custom 105 | dump_path = Path.join(tmp_path(), "custom.sql") 106 | File.rm(dump_path) 107 | 108 | {:error, _} = 109 | Ecto.Adapters.MyXQL.structure_load(tmp_path(), [dump_path: dump_path] ++ params()) 110 | 111 | # Dump custom 112 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 113 | assert strip_timestamp(dump) != strip_timestamp(File.read!(dump_path)) 114 | 115 | # Load original 116 | {:ok, _} = Ecto.Adapters.MyXQL.structure_load(tmp_path(), params()) 117 | 118 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 119 | assert strip_timestamp(dump) == strip_timestamp(File.read!(dump_path)) 120 | after 121 | drop_database() 122 | end 123 | 124 | test "storage status is up when database is created" do 125 | create_database() 126 | assert :up == Ecto.Adapters.MyXQL.storage_status(params()) 127 | after 128 | drop_database() 129 | end 130 | 131 | test "storage status is down when database is not created" do 132 | create_database() 133 | drop_database() 134 | assert :down == Ecto.Adapters.MyXQL.storage_status(params()) 135 | end 136 | 137 | test "storage status is an error when wrong credentials are passed" do 138 | assert ExUnit.CaptureLog.capture_log(fn -> 139 | assert {:error, _} = Ecto.Adapters.MyXQL.storage_status(wrong_params()) 140 | end) =~ "(1045) (ER_ACCESS_DENIED_ERROR)" 141 | end 142 | 143 | defmodule Migration do 144 | use Ecto.Migration 145 | def change, do: :ok 146 | end 147 | 148 | test "structure dump and load with migrations table" do 149 | default_db = "ecto_test" 150 | num = @base_migration + System.unique_integer([:positive]) 151 | :ok = Ecto.Migrator.up(PoolRepo, num, Migration, log: false) 152 | {:ok, path} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), TestRepo.config()) 153 | contents = File.read!(path) 154 | assert contents =~ "Database: #{default_db}" 155 | assert contents =~ "INSERT INTO `schema_migrations` (version) VALUES (#{num})" 156 | end 157 | 158 | test "raises when attempting to dump multiple prefixes" do 159 | config = Keyword.put(TestRepo.config(), :dump_prefixes, ["ecto_test", "another_db"]) 160 | msg = "cannot dump multiple prefixes with MySQL. Please run the command separately for each prefix." 161 | 162 | assert_raise ArgumentError, msg, fn -> 163 | Ecto.Adapters.MyXQL.structure_dump(tmp_path(), config) 164 | end 165 | end 166 | 167 | test "dumps structure and schema_migration records only from queried prefix" do 168 | # Create the storage_mgt database 169 | create_database() 170 | prefix = params()[:database] 171 | 172 | # Run migrations 173 | version = @base_migration + System.unique_integer([:positive]) 174 | :ok = Ecto.Migrator.up(PoolRepo, version, Migration, log: false, prefix: prefix) 175 | 176 | config = Keyword.put(TestRepo.config(), :dump_prefixes, [prefix]) 177 | {:ok, path} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), config) 178 | contents = File.read!(path) 179 | 180 | refute contents =~ "USE `#{prefix}`" 181 | assert contents =~ "Database: #{prefix}" 182 | assert contents =~ "INSERT INTO `schema_migrations` (version) VALUES (#{version})" 183 | after 184 | drop_database() 185 | end 186 | 187 | defp strip_timestamp(dump) do 188 | dump 189 | |> String.split("\n") 190 | |> Enum.reject(&String.contains?(&1, "completed on")) 191 | |> Enum.join("\n") 192 | end 193 | 194 | test "structure dump_cmd" do 195 | num = @base_migration + System.unique_integer([:positive]) 196 | :ok = Ecto.Migrator.up(PoolRepo, num, Migration, log: false) 197 | 198 | assert {output, 0} = 199 | Ecto.Adapters.MyXQL.dump_cmd( 200 | [], 201 | [], 202 | PoolRepo.config() 203 | ) 204 | 205 | assert output =~ "INSERT INTO `schema_migrations` VALUES (" 206 | end 207 | end 208 | -------------------------------------------------------------------------------- /integration_test/myxql/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | # Configure Ecto for support and tests 4 | Application.put_env(:ecto, :primary_key_type, :id) 5 | Application.put_env(:ecto, :async_integration_tests, false) 6 | Application.put_env(:ecto_sql, :lock_for_update, "FOR UPDATE") 7 | 8 | Code.require_file("../support/repo.exs", __DIR__) 9 | 10 | # Configure MySQL connection 11 | Application.put_env( 12 | :ecto_sql, 13 | :mysql_test_url, 14 | "ecto://" <> (System.get_env("MYSQL_URL") || "root@127.0.0.1") 15 | ) 16 | 17 | # Pool repo for async, safe tests 18 | alias Ecto.Integration.TestRepo 19 | 20 | Application.put_env(:ecto_sql, TestRepo, 21 | url: Application.get_env(:ecto_sql, :mysql_test_url) <> "/ecto_test", 22 | pool: Ecto.Adapters.SQL.Sandbox, 23 | show_sensitive_data_on_connection_error: true, 24 | after_connect: {Ecto.Integration.TestRepo, :set_connection_charset, []}, 25 | log: false 26 | ) 27 | 28 | defmodule Ecto.Integration.TestRepo do 29 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL 30 | 31 | def set_connection_charset(conn) do 32 | %{rows: [[version]]} = MyXQL.query!(conn, "SELECT @@version", []) 33 | 34 | if version >= "8.0.0" do 35 | _ = MyXQL.query!(conn, "SET NAMES utf8mb4 COLLATE utf8mb4_0900_ai_ci;", []) 36 | end 37 | end 38 | 39 | def create_prefix(prefix) do 40 | "create database #{prefix}" 41 | end 42 | 43 | def drop_prefix(prefix) do 44 | "drop database #{prefix}" 45 | end 46 | 47 | def uuid do 48 | Ecto.UUID 49 | end 50 | end 51 | 52 | # Pool repo for non-async tests 53 | alias Ecto.Integration.PoolRepo 54 | 55 | Application.put_env(:ecto_sql, PoolRepo, 56 | adapter: Ecto.Adapters.MyXQL, 57 | url: Application.get_env(:ecto_sql, :mysql_test_url) <> "/ecto_test", 58 | pool_size: 5, 59 | pool_count: String.to_integer(System.get_env("POOL_COUNT", "1")), 60 | show_sensitive_data_on_connection_error: true 61 | ) 62 | 63 | defmodule Ecto.Integration.PoolRepo do 64 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL 65 | end 66 | 67 | # Load support files 68 | ecto = Mix.Project.deps_paths()[:ecto] 69 | Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) 70 | Code.require_file("../support/migration.exs", __DIR__) 71 | 72 | defmodule Ecto.Integration.Case do 73 | use ExUnit.CaseTemplate 74 | 75 | setup do 76 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) 77 | end 78 | end 79 | 80 | {:ok, _} = Ecto.Adapters.MyXQL.ensure_all_started(TestRepo.config(), :temporary) 81 | 82 | # Load up the repository, start it, and run migrations 83 | _ = Ecto.Adapters.MyXQL.storage_down(TestRepo.config()) 84 | :ok = Ecto.Adapters.MyXQL.storage_up(TestRepo.config()) 85 | 86 | {:ok, _pid} = TestRepo.start_link() 87 | {:ok, _pid} = PoolRepo.start_link() 88 | 89 | %{rows: [[version]]} = TestRepo.query!("SELECT @@version", []) 90 | 91 | version = 92 | case Regex.named_captures(~r/(?[0-9]*)(\.(?[0-9]*))?.*/, version) do 93 | %{"major" => major, "minor" => minor} -> "#{major}.#{minor}.0" 94 | %{"major" => major} -> "#{major}.0.0" 95 | _other -> version 96 | end 97 | 98 | excludes = [ 99 | # not sure how to support this yet 100 | :bitstring_type, 101 | :duration_type, 102 | # MySQL does not have an array type 103 | :array_type, 104 | # The next two features rely on RETURNING, which MySQL does not support 105 | :read_after_writes, 106 | :returning, 107 | # Unsupported query features 108 | :aggregate_filters, 109 | :transaction_isolation, 110 | :with_conflict_target, 111 | # Unsupported migration features 112 | :create_index_if_not_exists, 113 | :add_column_if_not_exists, 114 | :remove_column_if_exists, 115 | # MySQL doesn't have a boolean type, so this ends up returning 0/1 116 | :map_boolean_in_expression, 117 | # MySQL doesn't support indexed parameters 118 | :placeholders, 119 | # MySQL doesn't support specifying columns for ON DELETE SET NULL 120 | :on_delete_nilify_column_list, 121 | # MySQL doesnt' support anything except a single column in DISTINCT 122 | :multicolumn_distinct, 123 | # uncertain whether we can support this. needs more exploring 124 | :json_extract_path_with_field 125 | ] 126 | 127 | if Version.match?(version, ">= 8.0.0") do 128 | ExUnit.configure(exclude: excludes) 129 | else 130 | ExUnit.configure(exclude: [:create_constraint, :values_list, :rename_column | excludes]) 131 | end 132 | 133 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false) 134 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 135 | Process.flag(:trap_exit, true) 136 | 137 | ExUnit.start() 138 | -------------------------------------------------------------------------------- /integration_test/myxql/upsert_all_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.UpsertAllTest do 2 | use Ecto.Integration.Case 3 | 4 | alias Ecto.Integration.TestRepo 5 | import Ecto.Query 6 | alias Ecto.Integration.Post 7 | 8 | test "on conflict raise" do 9 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 10 | {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: :raise) 11 | assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :raise)) 12 | end 13 | 14 | test "on conflict ignore" do 15 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 16 | assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == 17 | {1, nil} 18 | assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == 19 | {1, nil} 20 | end 21 | 22 | test "on conflict keyword list" do 23 | on_conflict = [set: [title: "second"]] 24 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 25 | {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict) 26 | 27 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 28 | {2, nil} 29 | assert TestRepo.all(from p in Post, select: p.title) == ["second"] 30 | end 31 | 32 | test "on conflict query and conflict target" do 33 | on_conflict = from Post, update: [set: [title: "second"]] 34 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 35 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 36 | {1, nil} 37 | 38 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 39 | {2, nil} 40 | assert TestRepo.all(from p in Post, select: p.title) == ["second"] 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /integration_test/pg/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | Code.require_file "#{ecto}/integration_test/cases/windows.exs", __DIR__ 9 | 10 | Code.require_file "../sql/alter.exs", __DIR__ 11 | Code.require_file "../sql/lock.exs", __DIR__ 12 | Code.require_file "../sql/logging.exs", __DIR__ 13 | Code.require_file "../sql/migration.exs", __DIR__ 14 | Code.require_file "../sql/migrator.exs", __DIR__ 15 | Code.require_file "../sql/sandbox.exs", __DIR__ 16 | Code.require_file "../sql/sql.exs", __DIR__ 17 | Code.require_file "../sql/stream.exs", __DIR__ 18 | Code.require_file "../sql/subquery.exs", __DIR__ 19 | Code.require_file "../sql/transaction.exs", __DIR__ 20 | -------------------------------------------------------------------------------- /integration_test/pg/constraints_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ConstraintsTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Migrator, only: [up: 4] 5 | alias Ecto.Integration.PoolRepo 6 | 7 | defmodule ConstraintMigration do 8 | use Ecto.Migration 9 | 10 | @table table(:constraints_test) 11 | 12 | def change do 13 | create @table do 14 | add :price, :integer 15 | add :from, :integer 16 | add :to, :integer 17 | end 18 | create constraint(@table.name, :cannot_overlap, exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|) 19 | create constraint(@table.name, "positive_price", check: "price > 0") 20 | end 21 | end 22 | 23 | defmodule Constraint do 24 | use Ecto.Integration.Schema 25 | 26 | schema "constraints_test" do 27 | field :price, :integer 28 | field :from, :integer 29 | field :to, :integer 30 | end 31 | end 32 | 33 | @base_migration 2_000_000 34 | 35 | setup_all do 36 | ExUnit.CaptureLog.capture_log(fn -> 37 | num = @base_migration + System.unique_integer([:positive]) 38 | up(PoolRepo, num, ConstraintMigration, log: false) 39 | end) 40 | 41 | :ok 42 | end 43 | 44 | test "exclusion constraint" do 45 | changeset = Ecto.Changeset.change(%Constraint{}, from: 0, to: 10) 46 | {:ok, _} = PoolRepo.insert(changeset) 47 | 48 | non_overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 11, to: 12) 49 | {:ok, _} = PoolRepo.insert(non_overlapping_changeset) 50 | 51 | overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 9, to: 12) 52 | 53 | exception = 54 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 55 | PoolRepo.insert(overlapping_changeset) 56 | end 57 | assert exception.message =~ "\"cannot_overlap\" (exclusion_constraint)" 58 | assert exception.message =~ "The changeset has not defined any constraint." 59 | assert exception.message =~ "call `exclusion_constraint/3`" 60 | 61 | message = ~r/constraint error when attempting to insert struct/ 62 | exception = 63 | assert_raise Ecto.ConstraintError, message, fn -> 64 | overlapping_changeset 65 | |> Ecto.Changeset.exclusion_constraint(:from) 66 | |> PoolRepo.insert() 67 | end 68 | assert exception.message =~ "\"cannot_overlap\" (exclusion_constraint)" 69 | 70 | {:error, changeset} = 71 | overlapping_changeset 72 | |> Ecto.Changeset.exclusion_constraint(:from, name: :cannot_overlap) 73 | |> PoolRepo.insert() 74 | assert changeset.errors == [from: {"violates an exclusion constraint", [constraint: :exclusion, constraint_name: "cannot_overlap"]}] 75 | assert changeset.data.__meta__.state == :built 76 | end 77 | 78 | test "check constraint" do 79 | # When the changeset doesn't expect the db error 80 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 81 | exception = 82 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 83 | PoolRepo.insert(changeset) 84 | end 85 | 86 | assert exception.message =~ "\"positive_price\" (check_constraint)" 87 | assert exception.message =~ "The changeset has not defined any constraint." 88 | assert exception.message =~ "call `check_constraint/3`" 89 | 90 | # When the changeset does expect the db error, but doesn't give a custom message 91 | {:error, changeset} = 92 | changeset 93 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price) 94 | |> PoolRepo.insert() 95 | assert changeset.errors == [price: {"is invalid", [constraint: :check, constraint_name: "positive_price"]}] 96 | assert changeset.data.__meta__.state == :built 97 | 98 | # When the changeset does expect the db error and gives a custom message 99 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 100 | {:error, changeset} = 101 | changeset 102 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 103 | |> PoolRepo.insert() 104 | assert changeset.errors == [price: {"price must be greater than 0", [constraint: :check, constraint_name: "positive_price"]}] 105 | assert changeset.data.__meta__.state == :built 106 | 107 | # When the change does not violate the check constraint 108 | changeset = Ecto.Changeset.change(%Constraint{}, price: 10, from: 100, to: 200) 109 | {:ok, changeset} = 110 | changeset 111 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 112 | |> PoolRepo.insert() 113 | assert is_integer(changeset.id) 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /integration_test/pg/copy_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.CopyTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | 7 | test "copy to and from table" do 8 | read = Ecto.Adapters.SQL.stream(TestRepo, "COPY posts TO STDOUT") 9 | write = Ecto.Adapters.SQL.stream(TestRepo, "COPY posts FROM STDIN") 10 | 11 | TestRepo.transaction fn -> 12 | one = TestRepo.insert!(%Post{title: "one"}) 13 | two = TestRepo.insert!(%Post{title: "two"}) 14 | 15 | data = Enum.map(read, &(&1.rows)) 16 | assert TestRepo.delete_all(Post) == {2, nil} 17 | 18 | assert ^write = Enum.into(data, write) 19 | assert TestRepo.all(Post) == [one, two] 20 | end 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /integration_test/pg/exceptions_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExceptionsTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | test "on bad JSON interpolation" do 9 | assert_raise Postgrex.Error, 10 | ~r/If you are trying to query a JSON field, the parameter may need to be interpolated/, 11 | fn -> TestRepo.all(from p in Post, where: p.meta["field"] != "example") end 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /integration_test/pg/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | test "explain" do 9 | explain = TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000) 10 | assert explain =~ "cost=" 11 | assert explain =~ "actual time=" 12 | assert explain =~ "loops=" 13 | assert explain =~ "Output:" 14 | assert explain =~ ~r/Planning [T|t]ime:/ 15 | assert explain =~ ~r/Execution [T|t]ime:/ 16 | 17 | explain = TestRepo.explain(:delete_all, Post) 18 | assert explain =~ "Delete on posts p0" 19 | assert explain =~ "cost=" 20 | 21 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 22 | assert explain =~ "Update on posts p0" 23 | assert explain =~ "cost=" 24 | 25 | assert_raise(ArgumentError, "bad boolean value 1", fn -> 26 | TestRepo.explain(:all, Post, analyze: "1") 27 | end) 28 | end 29 | 30 | @tag :plan_cache_mode 31 | test "explain with fallback generic plan" do 32 | # when using fallback generic plan , placeholders are used instead of values. i.e. $1 instead of 1 33 | query = from p in Post, where: p.visits == ^1 and p.title == ^"title" 34 | 35 | explain = 36 | TestRepo.explain(:all, query, plan: :fallback_generic, verbose: true, timeout: 20000) 37 | 38 | assert explain =~ "p0.visits = $1" 39 | assert explain =~ "(p0.title)::text = $2" 40 | 41 | # Works when no parameters are given 42 | TestRepo.explain(:all, Post, plan: :fallback_generic, verbose: true, timeout: 20000) 43 | end 44 | 45 | test "explain with fallback generic plan cannot use analyze" do 46 | msg = ~r/analyze cannot be used with a `:fallback_generic` explain plan/ 47 | 48 | assert_raise ArgumentError, msg, fn -> 49 | TestRepo.explain(:all, Post, plan: :fallback_generic, analyze: true) 50 | end 51 | end 52 | 53 | test "explain with custom plan" do 54 | # when using custom plan, values are used instead of placeholders. i.e. 1 instead of $1 55 | query = from p in Post, where: p.visits == ^1 and p.title == ^"title" 56 | 57 | explain = 58 | TestRepo.explain(:all, query, plan: :custom, analyze: true, verbose: true, timeout: 20000) 59 | 60 | refute explain =~ "$1" 61 | refute explain =~ "$2" 62 | assert explain =~ "p0.visits = 1" 63 | assert explain =~ "(p0.title)::text = 'title'" 64 | end 65 | 66 | test "explain MAP format" do 67 | [explain] = 68 | TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000, format: :map) 69 | 70 | keys = explain["Plan"] |> Map.keys() 71 | assert Enum.member?(keys, "Actual Loops") 72 | assert Enum.member?(keys, "Actual Rows") 73 | assert Enum.member?(keys, "Actual Startup Time") 74 | end 75 | 76 | test "explain YAML format" do 77 | explain = 78 | TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000, format: :yaml) 79 | 80 | assert explain =~ ~r/Plan:/ 81 | assert explain =~ ~r/Node Type:/ 82 | assert explain =~ ~r/Relation Name:/ 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /integration_test/pg/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | alias Ecto.Integration.AdvisoryLockPoolRepo 6 | import ExUnit.CaptureLog 7 | 8 | @moduletag :capture_log 9 | @base_migration 3_000_000 10 | 11 | defmodule DuplicateTableMigration do 12 | use Ecto.Migration 13 | 14 | def change do 15 | create_if_not_exists table(:duplicate_table) 16 | create_if_not_exists table(:duplicate_table) 17 | end 18 | end 19 | 20 | defmodule NormalMigration do 21 | use Ecto.Migration 22 | 23 | def change do 24 | create_if_not_exists table(:log_mode_table) 25 | end 26 | end 27 | 28 | defmodule IndexMigration do 29 | use Ecto.Migration 30 | @disable_ddl_transaction true 31 | 32 | def change do 33 | create_if_not_exists table(:index_table) do 34 | add :name, :string 35 | add :custom_id, :uuid 36 | timestamps() 37 | end 38 | 39 | create_if_not_exists index(:index_table, [:name], concurrently: true) 40 | end 41 | end 42 | 43 | collation = "POSIX" 44 | @collation collation 45 | 46 | text_types = ~w/char varchar text/a 47 | @text_types text_types 48 | 49 | defmodule CollateMigration do 50 | use Ecto.Migration 51 | 52 | @collation collation 53 | @text_types text_types 54 | 55 | def change do 56 | create table(:collate_reference) do 57 | add :name, :string, primary_key: true, collation: @collation 58 | end 59 | 60 | create unique_index(:collate_reference, :name) 61 | 62 | create table(:collate) do 63 | add :string, :string, collation: @collation 64 | for type <- @text_types do 65 | add type, type, collation: @collation 66 | end 67 | 68 | add :name_string, references(:collate_reference, type: :string, column: :name), collation: @collation 69 | end 70 | 71 | alter table(:collate) do 72 | modify :string, :string, collation: "C" 73 | end 74 | end 75 | end 76 | 77 | test "logs Postgres notice messages" do 78 | log = 79 | capture_log(fn -> 80 | num = @base_migration + System.unique_integer([:positive]) 81 | Ecto.Migrator.up(PoolRepo, num, DuplicateTableMigration, log: :info) 82 | end) 83 | 84 | assert log =~ ~s(relation "duplicate_table" already exists, skipping) 85 | end 86 | 87 | describe "Migrator" do 88 | @get_lock_command ~s(LOCK TABLE "schema_migrations" IN SHARE UPDATE EXCLUSIVE MODE) 89 | @get_advisory_lock_command ~s[SELECT pg_try_advisory_lock(129653361)] 90 | @release_advisory_lock_command ~s[SELECT pg_advisory_unlock(129653361)] 91 | @create_table_sql ~s(CREATE TABLE IF NOT EXISTS "log_mode_table") 92 | @create_table_log "create table if not exists log_mode_table" 93 | @drop_table_sql ~s(DROP TABLE IF EXISTS "log_mode_table") 94 | @drop_table_log "drop table if exists log_mode_table" 95 | @version_insert ~s(INSERT INTO "schema_migrations") 96 | @advisory_version_insert ~s(INSERT INTO "advisory_lock_schema_migrations") 97 | @version_delete ~s(DELETE FROM "schema_migrations") 98 | @advisory_version_delete ~s(DELETE FROM "advisory_lock_schema_migrations") 99 | 100 | test "logs locking and transaction commands" do 101 | num = @base_migration + System.unique_integer([:positive]) 102 | up_log = 103 | capture_log(fn -> 104 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 105 | end) 106 | 107 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 108 | assert up_log =~ @get_lock_command 109 | assert up_log =~ @create_table_sql 110 | assert up_log =~ @create_table_log 111 | assert up_log =~ @version_insert 112 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 113 | 114 | down_log = 115 | capture_log(fn -> 116 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 117 | end) 118 | 119 | assert down_log =~ "begin []" 120 | assert down_log =~ @get_lock_command 121 | assert down_log =~ @drop_table_sql 122 | assert down_log =~ @drop_table_log 123 | assert down_log =~ @version_delete 124 | assert down_log =~ "commit []" 125 | end 126 | 127 | test "logs advisory lock and transaction commands" do 128 | num = @base_migration + System.unique_integer([:positive]) 129 | up_log = 130 | capture_log(fn -> 131 | Ecto.Migrator.up(AdvisoryLockPoolRepo, num, IndexMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 132 | end) 133 | 134 | refute up_log =~ @get_lock_command 135 | refute up_log =~ "begin []" 136 | assert up_log =~ @get_advisory_lock_command 137 | refute up_log =~ @version_insert 138 | assert up_log =~ @advisory_version_insert 139 | refute up_log =~ "commit []" 140 | assert up_log =~ @release_advisory_lock_command 141 | 142 | down_log = 143 | capture_log(fn -> 144 | Ecto.Migrator.down(AdvisoryLockPoolRepo, num, IndexMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 145 | end) 146 | 147 | refute down_log =~ "begin []" 148 | refute down_log =~ @get_lock_command 149 | assert down_log =~ @get_advisory_lock_command 150 | refute down_log =~ @version_delete 151 | assert down_log =~ @advisory_version_delete 152 | refute down_log =~ "commit []" 153 | assert down_log =~ @release_advisory_lock_command 154 | end 155 | 156 | test "does not log sql when log is default" do 157 | num = @base_migration + System.unique_integer([:positive]) 158 | up_log = 159 | capture_log(fn -> 160 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 161 | end) 162 | 163 | refute up_log =~ "begin []" 164 | refute up_log =~ @get_lock_command 165 | refute up_log =~ @create_table_sql 166 | assert up_log =~ @create_table_log 167 | refute up_log =~ @version_insert 168 | refute up_log =~ "commit []" 169 | 170 | down_log = 171 | capture_log(fn -> 172 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 173 | end) 174 | 175 | refute down_log =~ "begin []" 176 | refute down_log =~ @get_lock_command 177 | refute down_log =~ @drop_table_sql 178 | assert down_log =~ @drop_table_log 179 | refute down_log =~ @version_delete 180 | refute down_log =~ "commit []" 181 | end 182 | 183 | test "collation can be set on a column" do 184 | num = @base_migration + System.unique_integer([:positive]) 185 | 186 | assert :ok = Ecto.Migrator.up(PoolRepo, num, CollateMigration, log: :info) 187 | 188 | query = fn column -> """ 189 | SELECT collation_name 190 | FROM information_schema.columns 191 | WHERE table_name = 'collate' AND column_name = '#{column}'; 192 | """ 193 | end 194 | 195 | assert %{ 196 | rows: [["C"]] 197 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.("string"), []) 198 | 199 | for type <- @text_types do 200 | assert %{ 201 | rows: [[@collation]] 202 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.(type), []) 203 | end 204 | end 205 | end 206 | end 207 | -------------------------------------------------------------------------------- /integration_test/pg/prepare_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.PrepareTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | 7 | test "prepare option" do 8 | one = TestRepo.insert!(%Post{title: "one"}) 9 | two = TestRepo.insert!(%Post{title: "two"}) 10 | 11 | # Uncached 12 | assert TestRepo.all(Post, prepare: :unnamed) == [one, two] 13 | assert TestRepo.all(Post, prepare: :named) == [one, two] 14 | 15 | # Cached 16 | assert TestRepo.all(Post, prepare: :unnamed) == [one, two] 17 | assert TestRepo.all(Post, prepare: :named) == [one, two] 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /integration_test/pg/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | # Configure Ecto for support and tests 4 | Application.put_env(:ecto, :primary_key_type, :id) 5 | Application.put_env(:ecto, :async_integration_tests, true) 6 | Application.put_env(:ecto_sql, :lock_for_update, "FOR UPDATE") 7 | 8 | # Configure PG connection 9 | Application.put_env( 10 | :ecto_sql, 11 | :pg_test_url, 12 | "ecto://" <> (System.get_env("PG_URL") || "postgres:postgres@127.0.0.1") 13 | ) 14 | 15 | Code.require_file("../support/repo.exs", __DIR__) 16 | 17 | # Define type module 18 | opts = if Code.ensure_loaded?(Duration), do: [interval_decode_type: Duration], else: [] 19 | Postgrex.Types.define(Postgrex.EctoTypes, [], opts) 20 | 21 | # Pool repo for async, safe tests 22 | alias Ecto.Integration.TestRepo 23 | 24 | Application.put_env(:ecto_sql, TestRepo, 25 | url: Application.get_env(:ecto_sql, :pg_test_url) <> "/ecto_test", 26 | pool: Ecto.Adapters.SQL.Sandbox, 27 | show_sensitive_data_on_connection_error: true, 28 | log: false, 29 | types: Postgrex.EctoTypes 30 | ) 31 | 32 | defmodule Ecto.Integration.TestRepo do 33 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres 34 | 35 | def create_prefix(prefix) do 36 | "create schema #{prefix}" 37 | end 38 | 39 | def drop_prefix(prefix) do 40 | "drop schema #{prefix}" 41 | end 42 | 43 | def uuid do 44 | Ecto.UUID 45 | end 46 | end 47 | 48 | # Pool repo for non-async tests 49 | alias Ecto.Integration.PoolRepo 50 | 51 | defmodule Ecto.Integration.PoolRepo do 52 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres 53 | end 54 | 55 | # Pool repo for non-async and advisory lock tests 56 | alias Ecto.Integration.AdvisoryLockPoolRepo 57 | 58 | defmodule Ecto.Integration.AdvisoryLockPoolRepo do 59 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres 60 | end 61 | 62 | pool_repo_config = [ 63 | url: Application.get_env(:ecto_sql, :pg_test_url) <> "/ecto_test", 64 | pool_size: 5, 65 | pool_count: String.to_integer(System.get_env("POOL_COUNT", "1")), 66 | max_restarts: 20, 67 | max_seconds: 10 68 | ] 69 | 70 | Application.put_env(:ecto_sql, PoolRepo, pool_repo_config) 71 | 72 | Application.put_env( 73 | :ecto_sql, 74 | AdvisoryLockPoolRepo, 75 | pool_repo_config ++ 76 | [ 77 | migration_source: "advisory_lock_schema_migrations", 78 | migration_lock: :pg_advisory_lock 79 | ] 80 | ) 81 | 82 | # Load support files 83 | ecto = Mix.Project.deps_paths()[:ecto] 84 | Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) 85 | Code.require_file("../support/migration.exs", __DIR__) 86 | 87 | defmodule Ecto.Integration.Case do 88 | use ExUnit.CaseTemplate 89 | 90 | setup do 91 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) 92 | end 93 | end 94 | 95 | {:ok, _} = Ecto.Adapters.Postgres.ensure_all_started(TestRepo.config(), :temporary) 96 | 97 | # Load up the repository, start it, and run migrations 98 | _ = Ecto.Adapters.Postgres.storage_down(TestRepo.config()) 99 | :ok = Ecto.Adapters.Postgres.storage_up(TestRepo.config()) 100 | 101 | {:ok, _pid} = TestRepo.start_link() 102 | {:ok, _pid} = PoolRepo.start_link() 103 | {:ok, _pid} = AdvisoryLockPoolRepo.start_link() 104 | 105 | %{rows: [[version]]} = TestRepo.query!("SHOW server_version", []) 106 | 107 | version = 108 | case Regex.named_captures(~r/(?[0-9]*)(\.(?[0-9]*))?.*/, version) do 109 | %{"major" => major, "minor" => minor} -> "#{major}.#{minor}.0" 110 | %{"major" => major} -> "#{major}.0.0" 111 | _other -> version 112 | end 113 | 114 | excludes = [:selected_as_with_having, :selected_as_with_order_by_expression] 115 | excludes_above_9_5 = [:without_conflict_target] 116 | excludes_below_9_6 = [:add_column_if_not_exists, :no_error_on_conditional_column_migration] 117 | excludes_below_12_0 = [:plan_cache_mode] 118 | excludes_below_15_0 = [:on_delete_nilify_column_list] 119 | 120 | exclude_list = excludes ++ excludes_above_9_5 121 | 122 | cond do 123 | Version.match?(version, "< 9.6.0") -> 124 | ExUnit.configure( 125 | exclude: exclude_list ++ excludes_below_9_6 ++ excludes_below_12_0 ++ excludes_below_15_0 126 | ) 127 | 128 | Version.match?(version, "< 12.0.0") -> 129 | ExUnit.configure(exclude: exclude_list ++ excludes_below_12_0 ++ excludes_below_15_0) 130 | 131 | Version.match?(version, "< 15.0.0") -> 132 | ExUnit.configure(exclude: exclude_list ++ excludes_below_15_0) 133 | 134 | true -> 135 | ExUnit.configure(exclude: exclude_list) 136 | end 137 | 138 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false) 139 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 140 | Process.flag(:trap_exit, true) 141 | 142 | ExUnit.start() 143 | -------------------------------------------------------------------------------- /integration_test/pg/transaction_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.PGTransactionTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | alias Ecto.Integration.TestRepo 6 | alias Ecto.Integration.Post 7 | 8 | require Logger 9 | @timeout 500 10 | 11 | describe "aborts on corrupted transactions" do 12 | test "outside sandbox" do 13 | PoolRepo.transaction fn -> 14 | {:error, _} = PoolRepo.query("INVALID") 15 | end 16 | 17 | PoolRepo.transaction fn -> 18 | # This will taint the whole inner transaction 19 | {:error, _} = PoolRepo.query("INVALID") 20 | 21 | assert_raise Postgrex.Error, ~r/current transaction is aborted/, fn -> 22 | PoolRepo.insert(%Post{}, skip_transaction: true) 23 | end 24 | end 25 | end 26 | 27 | test "inside sandbox" do 28 | TestRepo.transaction fn -> 29 | {:error, _} = TestRepo.query("INVALID") 30 | end 31 | 32 | TestRepo.transaction fn -> 33 | # This will taint the whole inner transaction 34 | {:error, _} = TestRepo.query("INVALID") 35 | 36 | assert_raise Postgrex.Error, ~r/current transaction is aborted/, fn -> 37 | TestRepo.insert(%Post{}, skip_transaction: true) 38 | end 39 | end 40 | end 41 | end 42 | 43 | describe "deadlocks" do 44 | test "reset worker" do 45 | tx1 = self() 46 | 47 | %Task{pid: tx2} = tx2_task = Task.async fn -> 48 | PoolRepo.transaction fn -> 49 | acquire_deadlock(tx1, [2, 1]) 50 | end 51 | end 52 | 53 | tx1_result = PoolRepo.transaction fn -> 54 | acquire_deadlock(tx2, [1, 2]) 55 | end 56 | 57 | tx2_result = Task.await(tx2_task) 58 | assert Enum.sort([tx1_result, tx2_result]) == [{:error, :deadlocked}, {:ok, :acquired}] 59 | end 60 | end 61 | 62 | defp acquire_deadlock(other_tx, [key1, key2] = _locks) do 63 | pg_advisory_xact_lock(key1) # acquire first lock 64 | Logger.debug "#{inspect self()} acquired #{key1}" 65 | send other_tx, :acquired1 # signal other_tx that we acquired lock on key1 66 | assert_receive :acquired1, @timeout # wait for other_tx to signal us that it acquired lock on its key1 67 | Logger.debug "#{inspect self()} continuing" 68 | 69 | try do 70 | Logger.debug "#{inspect self()} acquiring #{key2}" 71 | pg_advisory_xact_lock(key2) # try to acquire lock on key2 (might deadlock) 72 | rescue 73 | err in [Postgrex.Error] -> 74 | Logger.debug "#{inspect self()} got killed by deadlock detection" 75 | assert %Postgrex.Error{postgres: %{code: :deadlock_detected}} = err 76 | 77 | assert_tx_aborted() 78 | 79 | # Trapping a transaction should still be fine. 80 | try do 81 | Process.flag(:trap_exit, true) 82 | PoolRepo.transaction fn -> :ok end 83 | catch 84 | class, msg -> 85 | Logger.debug inspect([class, msg]) 86 | after 87 | Process.flag(:trap_exit, false) 88 | end 89 | 90 | # Even aborted transactions can be rolled back. 91 | PoolRepo.rollback(:deadlocked) 92 | else 93 | _ -> 94 | Logger.debug "#{inspect self()} acquired #{key2}" 95 | :acquired 96 | end 97 | end 98 | 99 | defp assert_tx_aborted do 100 | try do 101 | PoolRepo.query!("SELECT 1"); 102 | rescue 103 | err in [Postgrex.Error] -> 104 | # current transaction is aborted, commands ignored until end of transaction block 105 | assert %Postgrex.Error{postgres: %{code: :in_failed_sql_transaction}} = err 106 | else 107 | _ -> flunk "transaction should be aborted" 108 | end 109 | end 110 | 111 | defp pg_advisory_xact_lock(key) do 112 | %{rows: [[:void]]} = 113 | PoolRepo.query!("SELECT pg_advisory_xact_lock($1);", [key]) 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /integration_test/sql/alter.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.AlterTest do 2 | use Ecto.Integration.Case, async: false 3 | 4 | alias Ecto.Integration.PoolRepo 5 | 6 | defmodule AlterMigrationOne do 7 | use Ecto.Migration 8 | 9 | def up do 10 | create table(:alter_col_type) do 11 | add :value, :integer 12 | end 13 | 14 | execute "INSERT INTO alter_col_type (value) VALUES (1)" 15 | end 16 | 17 | def down do 18 | drop table(:alter_col_type) 19 | end 20 | end 21 | 22 | defmodule AlterMigrationTwo do 23 | use Ecto.Migration 24 | 25 | def up do 26 | alter table(:alter_col_type) do 27 | modify :value, :numeric 28 | end 29 | end 30 | 31 | def down do 32 | alter table(:alter_col_type) do 33 | modify :value, :integer 34 | end 35 | end 36 | end 37 | 38 | import Ecto.Query, only: [from: 1, from: 2] 39 | 40 | defp run(direction, repo, module) do 41 | Ecto.Migration.Runner.run(repo, repo.config(), 1, module, :forward, direction, direction, log: false) 42 | end 43 | 44 | test "reset cache on returning query after alter column type" do 45 | values = from v in "alter_col_type", select: v.value 46 | 47 | assert :ok == run(:up, PoolRepo, AlterMigrationOne) 48 | assert PoolRepo.all(values) == [1] 49 | 50 | assert :ok == run(:up, PoolRepo, AlterMigrationTwo) 51 | [%Decimal{}] = PoolRepo.all(values) 52 | 53 | PoolRepo.transaction(fn() -> 54 | assert [%Decimal{}] = PoolRepo.all(values) 55 | assert :ok == run(:down, PoolRepo, AlterMigrationTwo) 56 | 57 | # Optionally fail once with database error when 58 | # already prepared on connection (and clear cache) 59 | try do 60 | PoolRepo.all(values, [mode: :savepoint]) 61 | rescue 62 | _ -> 63 | assert PoolRepo.all(values) == [1] 64 | else 65 | result -> 66 | assert result == [1] 67 | end 68 | end) 69 | after 70 | assert :ok == run(:down, PoolRepo, AlterMigrationOne) 71 | end 72 | 73 | test "reset cache on parameterized query after alter column type" do 74 | values = from v in "alter_col_type" 75 | 76 | assert :ok == run(:up, PoolRepo, AlterMigrationOne) 77 | assert PoolRepo.update_all(values, [set: [value: 2]]) == {1, nil} 78 | 79 | assert :ok == run(:up, PoolRepo, AlterMigrationTwo) 80 | assert PoolRepo.update_all(values, [set: [value: 3]]) == {1, nil} 81 | 82 | PoolRepo.transaction(fn() -> 83 | assert PoolRepo.update_all(values, [set: [value: Decimal.new(5)]]) == {1, nil} 84 | assert :ok == run(:down, PoolRepo, AlterMigrationTwo) 85 | assert PoolRepo.update_all(values, [set: [value: 6]]) == {1, nil} 86 | end) 87 | after 88 | assert :ok == run(:down, PoolRepo, AlterMigrationOne) 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /integration_test/sql/lock.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LockTest do 2 | # We can keep this test async as long as it 3 | # is the only one accessing the lock_test table. 4 | use ExUnit.Case, async: true 5 | 6 | import Ecto.Query 7 | alias Ecto.Integration.PoolRepo 8 | 9 | defmodule LockCounter do 10 | use Ecto.Schema 11 | 12 | schema "lock_counters" do 13 | field :count, :integer 14 | end 15 | end 16 | 17 | setup do 18 | PoolRepo.delete_all(LockCounter) 19 | :ok 20 | end 21 | 22 | test "lock for update" do 23 | %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) 24 | pid = self() 25 | 26 | lock_for_update = 27 | Application.get_env(:ecto_sql, :lock_for_update) || 28 | raise ":lock_for_update not set in :ecto application" 29 | 30 | # Here we are manually inserting the lock in the query 31 | # to test multiple adapters. Never do this in actual 32 | # application code: it is not safe and not public. 33 | query = from(lc in LockCounter, where: lc.id == ^id) 34 | query = %{query | lock: lock_for_update} 35 | 36 | {:ok, new_pid} = 37 | Task.start_link fn -> 38 | assert_receive :select_for_update, 5000 39 | 40 | PoolRepo.transaction(fn -> 41 | [post] = PoolRepo.all(query) # this should block until the other trans. commit 42 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 43 | end) 44 | 45 | send pid, :updated 46 | end 47 | 48 | PoolRepo.transaction(fn -> 49 | [post] = PoolRepo.all(query) # select and lock the row 50 | send new_pid, :select_for_update # signal second process to begin a transaction 51 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 52 | end) 53 | 54 | assert_receive :updated, 5000 55 | 56 | # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise 57 | assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /integration_test/sql/query_many.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.QueryManyTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | 6 | test "query_many!/4" do 7 | results = TestRepo.query_many!("SELECT 1; SELECT 2;") 8 | assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results 9 | end 10 | 11 | test "query_many!/4 with iodata" do 12 | results = TestRepo.query_many!(["SELECT", ?\s, ?1, ";", ?\s, "SELECT", ?\s, ?2, ";"]) 13 | assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /integration_test/sql/sql.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.SQLTest do 2 | use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) 3 | 4 | alias Ecto.Integration.PoolRepo 5 | alias Ecto.Integration.TestRepo 6 | alias Ecto.Integration.Barebone 7 | alias Ecto.Integration.Post 8 | alias Ecto.Integration.CorruptedPk 9 | alias Ecto.Integration.Tag 10 | import Ecto.Query, only: [from: 2] 11 | 12 | test "fragmented types" do 13 | datetime = ~N[2014-01-16 20:26:51] 14 | TestRepo.insert!(%Post{inserted_at: datetime}) 15 | query = from p in Post, where: fragment("? >= ?", p.inserted_at, ^datetime), select: p.inserted_at 16 | assert [^datetime] = TestRepo.all(query) 17 | end 18 | 19 | test "fragmented schemaless types" do 20 | TestRepo.insert!(%Post{visits: 123}) 21 | assert [123] = TestRepo.all(from p in "posts", select: type(fragment("visits"), :integer)) 22 | end 23 | 24 | test "type casting negative integers" do 25 | TestRepo.insert!(%Post{visits: -42}) 26 | assert [-42] = TestRepo.all(from(p in Post, select: type(p.visits, :integer))) 27 | end 28 | 29 | @tag :array_type 30 | test "fragment array types" do 31 | text1 = "foo" 32 | text2 = "bar" 33 | result = TestRepo.query!("SELECT $1::text[]", [[text1, text2]]) 34 | assert result.rows == [[[text1, text2]]] 35 | end 36 | 37 | @tag :array_type 38 | test "Converts empty array correctly" do 39 | result = TestRepo.query!("SELECT array[1,2,3] = $1", [[]]) 40 | assert result.rows == [[false]] 41 | 42 | result = TestRepo.query!("SELECT array[]::integer[] = $1", [[]]) 43 | assert result.rows == [[true]] 44 | 45 | %{id: tag_id} = TestRepo.insert!(%Tag{uuids: []}) 46 | query = from t in Tag, where: t.uuids == [] 47 | assert [%{id: ^tag_id}] = TestRepo.all(query) 48 | end 49 | 50 | test "query!/4 with dynamic repo" do 51 | TestRepo.put_dynamic_repo(:unknown) 52 | assert_raise RuntimeError, ~r/:unknown/, fn -> TestRepo.query!("SELECT 1") end 53 | end 54 | 55 | test "query!/4" do 56 | result = TestRepo.query!("SELECT 1") 57 | assert result.rows == [[1]] 58 | end 59 | 60 | test "query!/4 with iodata" do 61 | result = TestRepo.query!(["SELECT", ?\s, ?1]) 62 | assert result.rows == [[1]] 63 | end 64 | 65 | test "disconnect_all/2" do 66 | assert :ok = PoolRepo.disconnect_all(0) 67 | end 68 | 69 | test "to_sql/3" do 70 | {sql, []} = TestRepo.to_sql(:all, Barebone) 71 | assert sql =~ "SELECT" 72 | assert sql =~ "barebones" 73 | 74 | {sql, [0]} = TestRepo.to_sql(:update_all, from(b in Barebone, update: [set: [num: ^0]])) 75 | assert sql =~ "UPDATE" 76 | assert sql =~ "barebones" 77 | assert sql =~ "SET" 78 | 79 | {sql, []} = TestRepo.to_sql(:delete_all, Barebone) 80 | assert sql =~ "DELETE" 81 | assert sql =~ "barebones" 82 | end 83 | 84 | test "raises when primary key is not unique on struct operation" do 85 | schema = %CorruptedPk{a: "abc"} 86 | TestRepo.insert!(schema) 87 | TestRepo.insert!(schema) 88 | TestRepo.insert!(schema) 89 | 90 | assert_raise Ecto.MultiplePrimaryKeyError, 91 | ~r|expected delete on corrupted_pk to return at most one entry but got 3 entries|, 92 | fn -> TestRepo.delete!(schema) end 93 | end 94 | 95 | test "Repo.insert! escape" do 96 | TestRepo.insert!(%Post{title: "'"}) 97 | 98 | query = from(p in Post, select: p.title) 99 | assert ["'"] == TestRepo.all(query) 100 | end 101 | 102 | test "Repo.update! escape" do 103 | p = TestRepo.insert!(%Post{title: "hello"}) 104 | TestRepo.update!(Ecto.Changeset.change(p, title: "'")) 105 | 106 | query = from(p in Post, select: p.title) 107 | assert ["'"] == TestRepo.all(query) 108 | end 109 | 110 | @tag :insert_cell_wise_defaults 111 | test "Repo.insert_all escape" do 112 | TestRepo.insert_all(Post, [%{title: "'"}]) 113 | 114 | query = from(p in Post, select: p.title) 115 | assert ["'"] == TestRepo.all(query) 116 | end 117 | 118 | test "Repo.update_all escape" do 119 | TestRepo.insert!(%Post{title: "hello"}) 120 | 121 | TestRepo.update_all(Post, set: [title: "'"]) 122 | reader = from(p in Post, select: p.title) 123 | assert ["'"] == TestRepo.all(reader) 124 | 125 | query = from(Post, where: "'" != "") 126 | TestRepo.update_all(query, set: [title: "''"]) 127 | assert ["''"] == TestRepo.all(reader) 128 | end 129 | 130 | test "Repo.delete_all escape" do 131 | TestRepo.insert!(%Post{title: "hello"}) 132 | assert [_] = TestRepo.all(Post) 133 | 134 | TestRepo.delete_all(from(Post, where: "'" == "'")) 135 | assert [] == TestRepo.all(Post) 136 | end 137 | 138 | test "load" do 139 | inserted_at = ~N[2016-01-01 09:00:00] 140 | TestRepo.insert!(%Post{title: "title1", inserted_at: inserted_at, public: false}) 141 | 142 | result = Ecto.Adapters.SQL.query!(TestRepo, "SELECT * FROM posts", []) 143 | posts = Enum.map(result.rows, &TestRepo.load(Post, {result.columns, &1})) 144 | assert [%Post{title: "title1", inserted_at: ^inserted_at, public: false}] = posts 145 | end 146 | 147 | test "returns true when table exists" do 148 | assert Ecto.Adapters.SQL.table_exists?(TestRepo, "posts") 149 | end 150 | 151 | test "returns false table doesn't exists" do 152 | refute Ecto.Adapters.SQL.table_exists?(TestRepo, "unknown") 153 | end 154 | 155 | test "returns result as a formatted table" do 156 | TestRepo.insert_all(Post, [%{title: "my post title", counter: 1, public: nil}]) 157 | 158 | # resolve correct query for each adapter 159 | query = from(p in Post, select: [p.title, p.counter, p.public]) 160 | {query, _} = Ecto.Adapters.SQL.to_sql(:all, TestRepo, query) 161 | 162 | table = 163 | query 164 | |> TestRepo.query!() 165 | |> Ecto.Adapters.SQL.format_table() 166 | 167 | assert table == "+---------------+---------+--------+\n| title | counter | public |\n+---------------+---------+--------+\n| my post title | 1 | NULL |\n+---------------+---------+--------+" 168 | end 169 | 170 | test "format_table edge cases" do 171 | assert Ecto.Adapters.SQL.format_table(nil) == "" 172 | assert Ecto.Adapters.SQL.format_table(%{columns: nil, rows: nil}) == "" 173 | assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: []}) == "" 174 | assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: [["test"]]}) == "" 175 | assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: []}) == "+------+\n| test |\n+------+\n+------+" 176 | assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: nil}) == "+------+\n| test |\n+------+\n+------+" 177 | end 178 | end 179 | -------------------------------------------------------------------------------- /integration_test/sql/stream.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.StreamTest do 2 | use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | alias Ecto.Integration.Comment 7 | import Ecto.Query 8 | 9 | test "stream empty" do 10 | assert {:ok, []} = TestRepo.transaction(fn() -> 11 | TestRepo.stream(Post) 12 | |> Enum.to_list() 13 | end) 14 | 15 | assert {:ok, []} = TestRepo.transaction(fn() -> 16 | TestRepo.stream(from p in Post) 17 | |> Enum.to_list() 18 | end) 19 | end 20 | 21 | test "stream without schema" do 22 | %Post{} = TestRepo.insert!(%Post{title: "title1"}) 23 | %Post{} = TestRepo.insert!(%Post{title: "title2"}) 24 | 25 | assert {:ok, ["title1", "title2"]} = TestRepo.transaction(fn() -> 26 | TestRepo.stream(from(p in "posts", order_by: p.title, select: p.title)) 27 | |> Enum.to_list() 28 | end) 29 | end 30 | 31 | test "stream with assoc" do 32 | p1 = TestRepo.insert!(%Post{title: "1"}) 33 | 34 | %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) 35 | %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) 36 | 37 | stream = TestRepo.stream(Ecto.assoc(p1, :comments)) 38 | assert {:ok, [c1, c2]} = TestRepo.transaction(fn() -> 39 | Enum.to_list(stream) 40 | end) 41 | assert c1.id == cid1 42 | assert c2.id == cid2 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /integration_test/sql/subquery.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.SubQueryTest do 2 | use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) 3 | 4 | alias Ecto.Integration.TestRepo 5 | import Ecto.Query 6 | alias Ecto.Integration.Post 7 | alias Ecto.Integration.Comment 8 | 9 | test "from: subqueries with select source" do 10 | TestRepo.insert!(%Post{title: "hello", public: true}) 11 | 12 | query = from p in Post, select: p 13 | assert ["hello"] = 14 | TestRepo.all(from p in subquery(query), select: p.title) 15 | assert [post] = 16 | TestRepo.all(from p in subquery(query), select: p) 17 | 18 | assert %NaiveDateTime{} = post.inserted_at 19 | assert post.__meta__.state == :loaded 20 | end 21 | 22 | @tag :map_boolean_in_expression 23 | test "from: subqueries with map and select expression" do 24 | TestRepo.insert!(%Post{title: "hello", public: true}) 25 | 26 | query = from p in Post, select: %{title: p.title, pub: not p.public} 27 | assert ["hello"] = 28 | TestRepo.all(from p in subquery(query), select: p.title) 29 | assert [%{title: "hello", pub: false}] = 30 | TestRepo.all(from p in subquery(query), select: p) 31 | assert [{"hello", %{title: "hello", pub: false}}] = 32 | TestRepo.all(from p in subquery(query), select: {p.title, p}) 33 | assert [{%{title: "hello", pub: false}, false}] = 34 | TestRepo.all(from p in subquery(query), select: {p, p.pub}) 35 | end 36 | 37 | @tag :map_boolean_in_expression 38 | test "from: subqueries with map update and select expression" do 39 | TestRepo.insert!(%Post{title: "hello", public: true}) 40 | 41 | query = from p in Post, select: %{p | public: not p.public} 42 | assert ["hello"] = 43 | TestRepo.all(from p in subquery(query), select: p.title) 44 | assert [%Post{title: "hello", public: false}] = 45 | TestRepo.all(from p in subquery(query), select: p) 46 | assert [{"hello", %Post{title: "hello", public: false}}] = 47 | TestRepo.all(from p in subquery(query), select: {p.title, p}) 48 | assert [{%Post{title: "hello", public: false}, false}] = 49 | TestRepo.all(from p in subquery(query), select: {p, p.public}) 50 | end 51 | 52 | test "from: subqueries with map update on virtual field and select expression" do 53 | TestRepo.insert!(%Post{title: "hello"}) 54 | 55 | query = from p in Post, select: %{p | temp: p.title} 56 | assert ["hello"] = 57 | TestRepo.all(from p in subquery(query), select: p.temp) 58 | assert [%Post{title: "hello", temp: "hello"}] = 59 | TestRepo.all(from p in subquery(query), select: p) 60 | end 61 | 62 | @tag :subquery_aggregates 63 | test "from: subqueries with aggregates" do 64 | TestRepo.insert!(%Post{visits: 10}) 65 | TestRepo.insert!(%Post{visits: 11}) 66 | TestRepo.insert!(%Post{visits: 13}) 67 | 68 | query = from p in Post, select: [:visits], order_by: [asc: :visits] 69 | assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 70 | query = from p in Post, select: [:visits], order_by: [asc: :visits], limit: 2 71 | assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 72 | 73 | query = from p in Post, order_by: [asc: :visits] 74 | assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 75 | query = from p in Post, order_by: [asc: :visits], limit: 2 76 | assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 77 | end 78 | 79 | test "from: subqueries with parameters" do 80 | TestRepo.insert!(%Post{visits: 10, title: "hello"}) 81 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 82 | TestRepo.insert!(%Post{visits: 13, title: "world"}) 83 | 84 | query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 85 | query = from p in subquery(query), where: p.title == ^"hello", select: fragment("? + ?", p.visits, ^1) 86 | assert [12] = TestRepo.all(query) 87 | end 88 | 89 | test "join: subqueries with select source" do 90 | %{id: id} = TestRepo.insert!(%Post{title: "hello", public: true}) 91 | TestRepo.insert!(%Comment{post_id: id}) 92 | 93 | query = from p in Post, select: p 94 | assert ["hello"] = 95 | TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p.title) 96 | assert [%Post{inserted_at: %NaiveDateTime{}}] = 97 | TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p) 98 | end 99 | 100 | test "join: subqueries with parameters" do 101 | TestRepo.insert!(%Post{visits: 10, title: "hello"}) 102 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 103 | TestRepo.insert!(%Post{visits: 13, title: "world"}) 104 | TestRepo.insert!(%Comment{}) 105 | TestRepo.insert!(%Comment{}) 106 | 107 | query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 108 | query = from c in Comment, 109 | join: p in subquery(query), 110 | on: true, 111 | where: p.title == ^"hello", 112 | select: fragment("? + ?", p.visits, ^1) 113 | assert [12, 12] = TestRepo.all(query) 114 | end 115 | 116 | @tag :subquery_in_order_by 117 | test "subqueries in order by" do 118 | TestRepo.insert!(%Post{visits: 10, title: "hello"}) 119 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 120 | 121 | query = from p in Post, as: :p, order_by: [asc: exists(from p in Post, where: p.visits > parent_as(:p).visits)] 122 | 123 | assert [%{visits: 11}, %{visits: 10}] = TestRepo.all(query) 124 | end 125 | 126 | @tag :multicolumn_distinct 127 | @tag :subquery_in_distinct 128 | test "subqueries in distinct" do 129 | TestRepo.insert!(%Post{visits: 10, title: "hello1"}) 130 | TestRepo.insert!(%Post{visits: 10, title: "hello2"}) 131 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 132 | 133 | query = from p in Post, as: :p, distinct: exists(from p in Post, where: p.visits > parent_as(:p).visits), order_by: [asc: :title] 134 | 135 | assert [%{title: "hello"}, %{title: "hello1"}] = TestRepo.all(query) 136 | end 137 | 138 | @tag :subquery_in_group_by 139 | test "subqueries in group by" do 140 | TestRepo.insert!(%Post{visits: 10, title: "hello1"}) 141 | TestRepo.insert!(%Post{visits: 10, title: "hello2"}) 142 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 143 | 144 | query = from p in Post, as: :p, select: sum(p.visits), group_by: exists(from p in Post, where: p.visits > parent_as(:p).visits), order_by: [sum(p.visits)] 145 | 146 | query 147 | |> TestRepo.all() 148 | |> Enum.map(&Decimal.new/1) 149 | |> Enum.zip([Decimal.new(11), Decimal.new(20)]) 150 | |> Enum.all?(fn {a, b} -> Decimal.eq?(a, b) end) 151 | |> assert() 152 | end 153 | end 154 | -------------------------------------------------------------------------------- /integration_test/support/file_helpers.exs: -------------------------------------------------------------------------------- 1 | defmodule Support.FileHelpers do 2 | import ExUnit.Assertions 3 | 4 | @doc """ 5 | Returns the `tmp_path` for tests. 6 | """ 7 | def tmp_path do 8 | Path.expand("../../tmp", __DIR__) 9 | end 10 | 11 | @doc """ 12 | Executes the given function in a temp directory 13 | tailored for this test case and test. 14 | """ 15 | defmacro in_tmp(fun) do 16 | {name, _arity} = __CALLER__.function || raise "in_tmp must be called inside a function" 17 | path = Path.join([tmp_path(), "#{__CALLER__.module}", "#{name}"]) 18 | 19 | quote do 20 | path = unquote(path) 21 | File.rm_rf!(path) 22 | File.mkdir_p!(path) 23 | File.cd!(path, fn -> unquote(fun).(path) end) 24 | end 25 | end 26 | 27 | @doc """ 28 | Asserts a file was generated. 29 | """ 30 | def assert_file(file) do 31 | assert File.regular?(file), "Expected #{file} to exist, but does not" 32 | end 33 | 34 | @doc """ 35 | Asserts a file was generated and that it matches a given pattern. 36 | """ 37 | def assert_file(file, callback) when is_function(callback, 1) do 38 | assert_file(file) 39 | callback.(File.read!(file)) 40 | end 41 | 42 | def assert_file(file, match) do 43 | assert_file(file, &assert(&1 =~ match)) 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /integration_test/support/migration.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.Migration do 2 | use Ecto.Migration 3 | 4 | def change do 5 | # IO.puts "TESTING MIGRATION LOCK" 6 | # Process.sleep(10000) 7 | 8 | create table(:users, comment: "users table") do 9 | add :name, :string, comment: "name column" 10 | add :custom_id, :uuid 11 | timestamps() 12 | end 13 | 14 | create table(:posts) do 15 | add :title, :string, size: 100 16 | add :counter, :integer 17 | add :blob, :binary 18 | add :bid, :binary_id 19 | add :uuid, :uuid 20 | add :meta, :map 21 | add :links, {:map, :string} 22 | add :intensities, {:map, :float} 23 | add :public, :boolean 24 | add :cost, :decimal, precision: 2, scale: 1 25 | add :visits, :integer 26 | add :wrapped_visits, :integer 27 | add :intensity, :float 28 | add :author_id, :integer 29 | add :posted, :date 30 | add :read_only, :string 31 | timestamps(null: true) 32 | end 33 | 34 | create table(:posts_users, primary_key: false) do 35 | add :post_id, references(:posts) 36 | add :user_id, references(:users) 37 | end 38 | 39 | create table(:posts_users_pk) do 40 | add :post_id, references(:posts) 41 | add :user_id, references(:users) 42 | timestamps() 43 | end 44 | 45 | # Add a unique index on uuid. We use this 46 | # to verify the behaviour that the index 47 | # only matters if the UUID column is not NULL. 48 | create unique_index(:posts, [:uuid], comment: "posts index") 49 | 50 | create table(:permalinks) do 51 | add :uniform_resource_locator, :string 52 | add :title, :string 53 | add :post_id, references(:posts) 54 | add :user_id, references(:users) 55 | end 56 | 57 | create unique_index(:permalinks, [:post_id]) 58 | create unique_index(:permalinks, [:uniform_resource_locator]) 59 | 60 | create table(:comments) do 61 | add :text, :string, size: 100 62 | add :lock_version, :integer, default: 1 63 | add :post_id, references(:posts) 64 | add :author_id, references(:users) 65 | end 66 | 67 | create table(:customs, primary_key: false) do 68 | add :bid, :binary_id, primary_key: true 69 | add :uuid, :uuid 70 | end 71 | 72 | create unique_index(:customs, [:uuid]) 73 | 74 | create table(:customs_customs, primary_key: false) do 75 | add :custom_id1, references(:customs, column: :bid, type: :binary_id) 76 | add :custom_id2, references(:customs, column: :bid, type: :binary_id) 77 | end 78 | 79 | create table(:barebones) do 80 | add :num, :integer 81 | end 82 | 83 | create table(:transactions) do 84 | add :num, :integer 85 | end 86 | 87 | create table(:lock_counters) do 88 | add :count, :integer 89 | end 90 | 91 | create table(:orders) do 92 | add :label, :string 93 | add :item, :map 94 | add :items, :map 95 | add :meta, :map 96 | add :permalink_id, references(:permalinks) 97 | end 98 | 99 | unless :array_type in ExUnit.configuration()[:exclude] do 100 | create table(:tags) do 101 | add :ints, {:array, :integer} 102 | add :uuids, {:array, :uuid}, default: [] 103 | add :items, {:array, :map} 104 | end 105 | 106 | create table(:array_loggings) do 107 | add :uuids, {:array, :uuid}, default: [] 108 | timestamps() 109 | end 110 | end 111 | 112 | unless :bitstring_type in ExUnit.configuration()[:exclude] do 113 | create table(:bitstrings) do 114 | add :bs, :bitstring 115 | add :bs_with_default, :bitstring, default: <<42::6>> 116 | add :bs_with_size, :bitstring, size: 10 117 | end 118 | end 119 | 120 | if Code.ensure_loaded?(Duration) do 121 | unless :duration_type in ExUnit.configuration()[:exclude] do 122 | create table(:durations) do 123 | add :dur, :duration 124 | add :dur_with_fields, :duration, fields: "MONTH" 125 | add :dur_with_precision, :duration, precision: 4 126 | add :dur_with_fields_and_precision, :duration, fields: "HOUR TO SECOND", precision: 1 127 | add :dur_with_default, :duration, default: "10 MONTH" 128 | end 129 | end 130 | end 131 | 132 | create table(:composite_pk, primary_key: false) do 133 | add :a, :integer, primary_key: true 134 | add :b, :integer, primary_key: true 135 | add :name, :string 136 | end 137 | 138 | create table(:corrupted_pk, primary_key: false) do 139 | add :a, :string 140 | end 141 | 142 | create table(:posts_users_composite_pk) do 143 | add :post_id, references(:posts), primary_key: true 144 | add :user_id, references(:users), primary_key: true 145 | timestamps() 146 | end 147 | 148 | create unique_index(:posts_users_composite_pk, [:post_id, :user_id]) 149 | 150 | create table(:usecs) do 151 | add :naive_datetime_usec, :naive_datetime_usec 152 | add :utc_datetime_usec, :utc_datetime_usec 153 | end 154 | 155 | create table(:bits) do 156 | add :bit, :bit 157 | end 158 | 159 | create table(:loggings, primary_key: false) do 160 | add :bid, :binary_id, primary_key: true 161 | add :int, :integer 162 | add :uuid, :uuid 163 | timestamps() 164 | end 165 | end 166 | end 167 | -------------------------------------------------------------------------------- /integration_test/support/repo.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.Repo do 2 | defmacro __using__(opts) do 3 | quote do 4 | use Ecto.Repo, unquote(opts) 5 | 6 | @query_event __MODULE__ 7 | |> Module.split() 8 | |> Enum.map(& &1 |> Macro.underscore() |> String.to_atom()) 9 | |> Kernel.++([:query]) 10 | 11 | def init(_, opts) do 12 | fun = &Ecto.Integration.Repo.handle_event/4 13 | :telemetry.attach_many(__MODULE__, [[:custom], @query_event], fun, :ok) 14 | {:ok, opts} 15 | end 16 | end 17 | end 18 | 19 | def handle_event(event, latency, metadata, _config) do 20 | handler = Process.delete(:telemetry) || fn _, _, _ -> :ok end 21 | handler.(event, latency, metadata) 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /integration_test/tds/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | 9 | Code.require_file "../sql/alter.exs", __DIR__ 10 | Code.require_file "../sql/logging.exs", __DIR__ 11 | Code.require_file "../sql/migration.exs", __DIR__ 12 | Code.require_file "../sql/migrator.exs", __DIR__ 13 | Code.require_file "../sql/sandbox.exs", __DIR__ 14 | Code.require_file "../sql/sql.exs", __DIR__ 15 | # Code.require_file "../sql/stream.exs", __DIR__ 16 | Code.require_file "../sql/subquery.exs", __DIR__ 17 | Code.require_file "../sql/transaction.exs", __DIR__ 18 | -------------------------------------------------------------------------------- /integration_test/tds/constraints_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ConstraintsTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Migrator, only: [up: 4] 5 | alias Ecto.Integration.PoolRepo 6 | 7 | defmodule ConstraintMigration do 8 | use Ecto.Migration 9 | 10 | @table table(:constraints_test) 11 | 12 | def change do 13 | create @table do 14 | add :price, :integer 15 | add :from, :integer 16 | add :to, :integer 17 | end 18 | create constraint(@table.name, :cannot_overlap, check: "[from] < [to]") 19 | end 20 | end 21 | 22 | defmodule Constraint do 23 | use Ecto.Integration.Schema 24 | 25 | schema "constraints_test" do 26 | field :price, :integer 27 | field :from, :integer 28 | field :to, :integer 29 | end 30 | end 31 | 32 | @base_migration 2_000_000 33 | 34 | setup_all do 35 | ExUnit.CaptureLog.capture_log(fn -> 36 | num = @base_migration + System.unique_integer([:positive]) 37 | up(PoolRepo, num, ConstraintMigration, log: false) 38 | end) 39 | 40 | :ok 41 | end 42 | 43 | test "check constraint" do 44 | changeset = Ecto.Changeset.change(%Constraint{}, from: 0, to: 10) 45 | {:ok, _} = PoolRepo.insert(changeset) 46 | 47 | non_overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 11, to: 12) 48 | {:ok, _} = PoolRepo.insert(non_overlapping_changeset) 49 | 50 | overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 1900, to: 12) 51 | 52 | exception = 53 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 54 | PoolRepo.insert(overlapping_changeset) 55 | end 56 | assert exception.message =~ "\"cannot_overlap\" (check_constraint)" 57 | assert exception.message =~ "The changeset has not defined any constraint." 58 | assert exception.message =~ "call `check_constraint/3`" 59 | 60 | {:error, changeset} = 61 | overlapping_changeset 62 | |> Ecto.Changeset.check_constraint(:from, name: :cannot_overlap) 63 | |> PoolRepo.insert() 64 | assert changeset.errors == [from: {"is invalid", [constraint: :check, constraint_name: "cannot_overlap"]}] 65 | assert changeset.data.__meta__.state == :built 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /integration_test/tds/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | describe "explain" do 9 | test "select" do 10 | explain = TestRepo.explain(:all, from(p in Post, where: p.title == "explain_test", limit: 1)) 11 | assert explain =~ "| Rows | Executes |" 12 | assert explain =~ "| Parallel | EstimateExecutions |" 13 | assert explain =~ "SELECT TOP(1)" 14 | assert explain =~ "explain_test" 15 | end 16 | 17 | test "delete" do 18 | explain = TestRepo.explain(:delete_all, Post) 19 | assert explain =~ "DELETE" 20 | assert explain =~ "p0" 21 | end 22 | 23 | test "update" do 24 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 25 | assert explain =~ "UPDATE" 26 | assert explain =~ "p0" 27 | assert explain =~ "new title" 28 | end 29 | 30 | test "invalid" do 31 | assert_raise(Tds.Error, fn -> 32 | TestRepo.explain(:all, from(p in "posts", select: p.invalid, where: p.invalid == "title")) 33 | end) 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /integration_test/tds/lock_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LockTest do 2 | # We can keep this test async as long as it 3 | # is the only one accessing the lock_test table. 4 | use ExUnit.Case, async: true 5 | 6 | import Ecto.Query 7 | alias Ecto.Integration.PoolRepo 8 | 9 | defmodule LockCounter do 10 | use Ecto.Schema 11 | 12 | schema "lock_counters" do 13 | field :count, :integer 14 | end 15 | end 16 | 17 | setup do 18 | PoolRepo.delete_all(LockCounter) 19 | :ok 20 | end 21 | 22 | test "hints for update" do 23 | %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) 24 | pid = self() 25 | query = from(lc in LockCounter, hints: ["UPDLOCK"], where: lc.id == ^id) 26 | 27 | {:ok, new_pid} = 28 | Task.start_link fn -> 29 | assert_receive :select_for_update, 5000 30 | 31 | PoolRepo.transaction(fn -> 32 | [post] = PoolRepo.all(query) # this should block until the other trans. commit 33 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 34 | end) 35 | 36 | send pid, :updated 37 | end 38 | 39 | PoolRepo.transaction(fn -> 40 | [post] = PoolRepo.all(query) # select and lock the row 41 | send new_pid, :select_for_update # signal second process to begin a transaction 42 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 43 | end) 44 | 45 | assert_receive :updated, 5000 46 | 47 | # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise 48 | assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /integration_test/tds/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | import ExUnit.CaptureLog 6 | 7 | @moduletag :capture_log 8 | @base_migration 3_000_000 9 | 10 | defmodule NormalMigration do 11 | use Ecto.Migration 12 | 13 | def change do 14 | create_if_not_exists table(:log_mode_table) 15 | end 16 | end 17 | 18 | collation = "Latin1_General_CS_AS" 19 | @collation collation 20 | 21 | defmodule CollateMigration do 22 | use Ecto.Migration 23 | @collation collation 24 | 25 | def change do 26 | create table(:collate_reference) do 27 | add :name, :string, collation: @collation 28 | end 29 | 30 | create unique_index(:collate_reference, :name) 31 | 32 | create table(:collate) do 33 | add :string, :string, collation: @collation 34 | add :char, :char, size: 255, collation: @collation 35 | add :nchar, :nchar, size: 255, collation: @collation 36 | add :varchar, :varchar, size: 255, collation: @collation 37 | add :nvarchar, :nvarchar, size: 255, collation: @collation 38 | add :text, :text, collation: @collation 39 | add :ntext, :ntext, collation: @collation 40 | add :name_string, references(:collate_reference, type: :string, column: :name), collation: @collation 41 | end 42 | 43 | alter table(:collate) do 44 | modify :string, :string, collation: "Japanese_Bushu_Kakusu_100_CS_AS_KS_WS" 45 | end 46 | end 47 | end 48 | 49 | describe "Migrator" do 50 | @get_lock_command ~s(sp_getapplock @Resource = 'ecto_Ecto.Integration.PoolRepo', @LockMode = 'Exclusive', @LockOwner = 'Transaction', @LockTimeout = -1) 51 | @create_table_sql ~s(CREATE TABLE [log_mode_table]) 52 | @create_table_log "create table if not exists log_mode_table" 53 | @drop_table_sql ~s(DROP TABLE [log_mode_table]) 54 | @drop_table_log "drop table if exists log_mode_table" 55 | @version_insert ~s(INSERT INTO [schema_migrations]) 56 | @version_delete ~s(DELETE s0 FROM [schema_migrations]) 57 | 58 | test "logs locking and transaction commands" do 59 | num = @base_migration + System.unique_integer([:positive]) 60 | up_log = 61 | capture_log(fn -> 62 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 63 | end) 64 | 65 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 66 | assert up_log =~ @get_lock_command 67 | assert up_log =~ @create_table_sql 68 | assert up_log =~ @create_table_log 69 | assert up_log =~ @version_insert 70 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 71 | 72 | down_log = 73 | capture_log(fn -> 74 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 75 | end) 76 | 77 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 78 | assert down_log =~ @get_lock_command 79 | assert down_log =~ @drop_table_sql 80 | assert down_log =~ @drop_table_log 81 | assert down_log =~ @version_delete 82 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 83 | end 84 | 85 | test "does not log sql when log is default" do 86 | num = @base_migration + System.unique_integer([:positive]) 87 | up_log = 88 | capture_log(fn -> 89 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 90 | end) 91 | 92 | refute up_log =~ "begin []" 93 | refute up_log =~ @get_lock_command 94 | refute up_log =~ @create_table_sql 95 | assert up_log =~ @create_table_log 96 | refute up_log =~ @version_insert 97 | refute up_log =~ "commit []" 98 | 99 | down_log = 100 | capture_log(fn -> 101 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 102 | end) 103 | 104 | refute down_log =~ "begin []" 105 | refute down_log =~ @get_lock_command 106 | refute down_log =~ @drop_table_sql 107 | assert down_log =~ @drop_table_log 108 | refute down_log =~ @version_delete 109 | refute down_log =~ "commit []" 110 | end 111 | 112 | test "collation can be set on a column" do 113 | num = @base_migration + System.unique_integer([:positive]) 114 | assert :ok = Ecto.Migrator.up(PoolRepo, num, CollateMigration, log: :info) 115 | 116 | query = fn column -> """ 117 | SELECT collation_name 118 | FROM information_schema.columns 119 | WHERE table_name = 'collate' AND column_name = '#{column}'; 120 | """ 121 | end 122 | 123 | assert %{ 124 | rows: [["Japanese_Bushu_Kakusu_100_CS_AS_KS_WS"]] 125 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.("string"), []) 126 | 127 | for type <- ~w/char varchar nchar nvarchar text ntext/ do 128 | assert %{ 129 | rows: [[@collation]] 130 | } = Ecto.Adapters.SQL.query!(PoolRepo, query.(type), []) 131 | end 132 | end 133 | end 134 | end 135 | -------------------------------------------------------------------------------- /integration_test/tds/storage_test.exs: -------------------------------------------------------------------------------- 1 | Code.require_file "../support/file_helpers.exs", __DIR__ 2 | 3 | defmodule Ecto.Integration.StorageTest do 4 | use ExUnit.Case 5 | 6 | @moduletag :capture_log 7 | 8 | alias Ecto.Adapters.Tds 9 | 10 | def params do 11 | url = Application.get_env(:ecto_sql, :tds_test_url) <> "/storage_mgt" 12 | [log: false] ++ Ecto.Repo.Supervisor.parse_url(url) 13 | end 14 | 15 | def wrong_params() do 16 | Keyword.merge params(), 17 | [username: "randomuser", 18 | password: "password1234"] 19 | end 20 | 21 | test "storage up (twice in a row)" do 22 | assert :ok == Tds.storage_up(params()) 23 | assert {:error, :already_up} == Tds.storage_up(params()) 24 | after 25 | Tds.storage_down(params()) 26 | end 27 | 28 | test "storage down (twice in a row)" do 29 | assert :ok == Tds.storage_up(params()) 30 | assert :ok == Tds.storage_down(params()) 31 | assert {:error, :already_down} == Tds.storage_down(params()) 32 | end 33 | 34 | test "storage up and down (wrong credentials)" do 35 | refute :ok == Tds.storage_up(wrong_params()) 36 | assert :ok == Tds.storage_up(params()) 37 | refute :ok == Tds.storage_down(wrong_params()) 38 | after 39 | Tds.storage_down(params()) 40 | end 41 | 42 | defmodule Migration do 43 | use Ecto.Migration 44 | def change, do: :ok 45 | end 46 | 47 | test "storage status is up when database is created" do 48 | Tds.storage_up(params()) 49 | assert :up == Tds.storage_status(params()) 50 | after 51 | Tds.storage_down(params()) 52 | end 53 | 54 | test "storage status is down when database is not created" do 55 | Tds.storage_up(params()) 56 | Tds.storage_down(params()) 57 | assert :down == Tds.storage_status(params()) 58 | end 59 | 60 | test "storage status is an error when wrong credentials are passed" do 61 | assert ExUnit.CaptureLog.capture_log(fn -> 62 | assert {:error, _} = Tds.storage_status(wrong_params()) 63 | end) =~ ~r"Login failed for user 'randomuser'" 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /integration_test/tds/tds_type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TdsTypeTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Type 5 | alias Tds.Ecto.VarChar 6 | alias Ecto.Adapters.Tds 7 | 8 | @varchar_string "some string" 9 | 10 | test "dumps through the adapter" do 11 | assert adapter_dump(Tds, {:map, VarChar}, %{"a" => @varchar_string}) == 12 | {:ok, %{"a" => @varchar_string}} 13 | end 14 | 15 | test "loads through the adapter" do 16 | assert adapter_load(Tds, {:map, VarChar}, %{"a" => {@varchar_string, :varchar}}) == 17 | {:ok, %{"a" => @varchar_string}} 18 | 19 | assert adapter_load(Tds, {:map, VarChar}, %{"a" => @varchar_string}) == 20 | {:ok, %{"a" => @varchar_string}} 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /integration_test/tds/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | ExUnit.start( 4 | exclude: [ 5 | # not sure how to support this yet 6 | :aggregate_filters, 7 | :bitstring_type, 8 | :duration_type, 9 | # subquery contains ORDER BY and that is not supported 10 | :subquery_aggregates, 11 | # sql don't have array type 12 | :array_type, 13 | # upserts can only be supported with MERGE statement and it is tricky to make it fast 14 | :upsert, 15 | :upsert_all, 16 | # mssql rounds differently than ecto/integration_test/cases/interval.exs 17 | :uses_msec, 18 | # unique index compares even NULL values for post_id, so below fails inserting permalinks without setting valid post_id 19 | :insert_cell_wise_defaults, 20 | # MSSQL does not support strings on text fields 21 | :text_type_as_string, 22 | # IDENTITY_INSERT ON/OFF must be manually executed 23 | :assigns_id_type, 24 | # without schema we don't know anything about :map and :embeds, where value is kept in nvarchar(max) column 25 | :map_type_schemaless, 26 | # SELECT NOT(t.bool_fields) is not supported by sql server 27 | :map_boolean_in_expression, 28 | # Decimal casting can not be precise in MSSQL adapter since precision is kept in migration file :( 29 | # or in case of schema-less queries we don't know at all about precision 30 | :decimal_precision, 31 | # this fails because schema-less queries in select uses Decimal casting, 32 | # see below comment about :decimal_type_cast exclusion or :decimal_type_cast 33 | :union_with_literals, 34 | # inline queries can't use order by 35 | :inline_order_by, 36 | # running destruction of PK columns requires that PK constraint is dropped first 37 | :alter_primary_key, 38 | # below 2 exclusions (in theory) requires filtered unique index on permalinks table post_id column e.g. 39 | # CREATE UNIQUE NONCLUSTERED INDEX idx_tbl_TestUnique_ID 40 | # ON [permalinks] ([post_id]) 41 | # WHERE [post_id] IS NOT NULL 42 | # But I couldn't make it work :( 43 | :on_replace_nilify, 44 | :on_replace_update, 45 | # Tds allows nested transactions so this will never raise and SQL query should be "BEGIN TRAN" 46 | :transaction_checkout_raises, 47 | # JSON_VALUE always returns strings (even for e.g. integers) and returns null for 48 | # arrays/objects (JSON_QUERY must be used for these) 49 | :json_extract_path, 50 | # MSSQL does not support streaming 51 | :stream, 52 | # MSSQL fails the regex matching because it uses square brackets outside of the parameter list 53 | :parameter_logging, 54 | # MSSQL can't reference aliased columns in GROUP BY 55 | :selected_as_with_group_by, 56 | # MSSQL can't reference aliased columns in HAVING 57 | :selected_as_with_having, 58 | # MSSQL can't reference aliased columns in ORDER BY expressions 59 | :selected_as_with_order_by_expression, 60 | # MSSQL doesn't support specifying columns for ON DELETE SET NULL 61 | :on_delete_nilify_column_list, 62 | # MSSQL doesnt' support anything except a single column in DISTINCT 63 | :multicolumn_distinct, 64 | # MSSQL doesnt' support subqueries in group by or in distinct 65 | :subquery_in_group_by, 66 | :subquery_in_distinct, 67 | :subquery_in_order_by 68 | ] 69 | ) 70 | 71 | Application.put_env(:tds, :json_library, Jason) 72 | Application.put_env(:ecto, :primary_key_type, :id) 73 | Application.put_env(:ecto, :async_integration_tests, false) 74 | Application.put_env(:ecto_sql, :lock_for_update, "(UPDLOCK)") 75 | 76 | Application.put_env( 77 | :ecto_sql, 78 | :tds_test_url, 79 | "ecto://" <> (System.get_env("MSSQL_URL") || "sa:some!Password@localhost") 80 | ) 81 | 82 | alias Ecto.Integration.TestRepo 83 | 84 | # Load support files 85 | ecto = Mix.Project.deps_paths()[:ecto] 86 | Code.require_file("../support/repo.exs", __DIR__) 87 | 88 | Application.put_env( 89 | :ecto_sql, 90 | TestRepo, 91 | url: Application.get_env(:ecto_sql, :tds_test_url) <> "/ecto_test", 92 | pool: Ecto.Adapters.SQL.Sandbox, 93 | set_allow_snapshot_isolation: :on, 94 | show_sensitive_data_on_connection_error: true, 95 | log: false 96 | ) 97 | 98 | defmodule Ecto.Integration.TestRepo do 99 | use Ecto.Integration.Repo, 100 | otp_app: :ecto_sql, 101 | adapter: Ecto.Adapters.Tds 102 | 103 | def uuid, do: Tds.Ecto.UUID 104 | 105 | def create_prefix(prefix) do 106 | """ 107 | CREATE SCHEMA #{prefix}; 108 | """ 109 | end 110 | 111 | def drop_prefix(prefix) do 112 | """ 113 | DROP SCHEMA #{prefix}; 114 | """ 115 | end 116 | end 117 | 118 | Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) 119 | Code.require_file("../support/migration.exs", __DIR__) 120 | 121 | alias Ecto.Integration.PoolRepo 122 | 123 | Application.put_env( 124 | :ecto_sql, 125 | PoolRepo, 126 | url: "#{Application.get_env(:ecto_sql, :tds_test_url)}/ecto_test", 127 | pool_size: 10, 128 | set_allow_snapshot_isolation: :on 129 | ) 130 | 131 | defmodule Ecto.Integration.PoolRepo do 132 | use Ecto.Integration.Repo, 133 | otp_app: :ecto_sql, 134 | adapter: Ecto.Adapters.Tds 135 | 136 | def create_prefix(prefix) do 137 | "create schema #{prefix}" 138 | end 139 | 140 | def drop_prefix(prefix) do 141 | "drop schema #{prefix}" 142 | end 143 | end 144 | 145 | defmodule Ecto.Integration.Case do 146 | use ExUnit.CaseTemplate 147 | 148 | setup context do 149 | level = Map.get(context, :isolation_level, :read_committed) 150 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo, isolation_level: level) 151 | end 152 | end 153 | 154 | # :dbg.start() 155 | # :dbg.tracer() 156 | # :dbg.p(:all,:c) 157 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :column_change, :x) 158 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :execute_ddl, :x) 159 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :all, :x) 160 | # :dbg.tpl(Tds.Parameter, :prepare_params, :x) 161 | # :dbg.tpl(Tds.Parameter, :prepared_params, :x) 162 | 163 | {:ok, _} = Ecto.Adapters.Tds.ensure_all_started(TestRepo.config(), :temporary) 164 | 165 | # Load up the repository, start it, and run migrations 166 | _ = Ecto.Adapters.Tds.storage_down(TestRepo.config()) 167 | :ok = Ecto.Adapters.Tds.storage_up(TestRepo.config()) 168 | 169 | {:ok, _pid} = TestRepo.start_link() 170 | {:ok, _pid} = PoolRepo.start_link() 171 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: :debug) 172 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 173 | Process.flag(:trap_exit, true) 174 | -------------------------------------------------------------------------------- /lib/ecto/adapter/migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapter.Migration do 2 | @moduledoc """ 3 | Specifies the adapter migrations API. 4 | """ 5 | 6 | alias Ecto.Migration.Constraint 7 | alias Ecto.Migration.Table 8 | alias Ecto.Migration.Index 9 | alias Ecto.Migration.Reference 10 | 11 | @type adapter_meta :: Ecto.Adapter.adapter_meta() 12 | 13 | @type drop_mode :: :restrict | :cascade 14 | 15 | @typedoc "All migration commands" 16 | @type command :: 17 | raw :: 18 | String.t() 19 | | {:create, Table.t(), [table_subcommand]} 20 | | {:create_if_not_exists, Table.t(), [table_subcommand]} 21 | | {:alter, Table.t(), [table_subcommand]} 22 | | {:drop, Table.t(), drop_mode()} 23 | | {:drop_if_exists, Table.t(), drop_mode()} 24 | | {:create, Index.t()} 25 | | {:create_if_not_exists, Index.t()} 26 | | {:drop, Index.t(), drop_mode()} 27 | | {:drop_if_exists, Index.t(), drop_mode()} 28 | | {:create, Constraint.t()} 29 | | {:drop, Constraint.t(), drop_mode()} 30 | | {:drop_if_exists, Constraint.t(), drop_mode()} 31 | 32 | @typedoc "All commands allowed within the block passed to `table/2`" 33 | @type table_subcommand :: 34 | {:add, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} 35 | | {:add_if_not_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), 36 | Keyword.t()} 37 | | {:modify, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), 38 | Keyword.t()} 39 | | {:remove, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), 40 | Keyword.t()} 41 | | {:remove, field :: atom} 42 | | {:remove_if_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary()} 43 | | {:remove_if_exists, field :: atom} 44 | 45 | @typedoc """ 46 | A struct that represents a table or index in a database schema. 47 | 48 | These database objects can be modified through the use of a Data 49 | Definition Language, hence the name DDL object. 50 | """ 51 | @type ddl_object :: Table.t() | Index.t() 52 | 53 | @doc """ 54 | Checks if the adapter supports ddl transaction. 55 | """ 56 | @callback supports_ddl_transaction? :: boolean 57 | 58 | @doc """ 59 | Executes migration commands. 60 | """ 61 | @callback execute_ddl(adapter_meta, command, options :: Keyword.t()) :: 62 | {:ok, [{Logger.level(), Logger.message(), Logger.metadata()}]} 63 | 64 | @doc """ 65 | Locks the migrations table and emits the locked versions for callback execution. 66 | 67 | It returns the result of calling the given function with a list of versions. 68 | """ 69 | @callback lock_for_migrations(adapter_meta, options :: Keyword.t(), fun) :: 70 | result 71 | when fun: (-> result), result: var 72 | end 73 | -------------------------------------------------------------------------------- /lib/ecto/adapter/structure.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapter.Structure do 2 | @moduledoc """ 3 | Specifies the adapter structure (dump/load) API. 4 | """ 5 | 6 | @doc """ 7 | Dumps the given structure. 8 | 9 | The path will be looked in the `config` under :dump_path or 10 | default to the structure path inside `default`. 11 | 12 | Returns an `:ok` tuple if it was dumped successfully, an error tuple otherwise. 13 | 14 | ## Examples 15 | 16 | structure_dump("priv/repo", username: "postgres", 17 | database: "ecto_test", 18 | hostname: "localhost") 19 | 20 | """ 21 | @callback structure_dump(default :: String.t(), config :: Keyword.t()) :: 22 | {:ok, String.t()} | {:error, term} 23 | 24 | @doc """ 25 | Loads the given structure. 26 | 27 | The path will be looked in the `config` under :dump_path or 28 | default to the structure path inside `default`. 29 | 30 | Returns an `:ok` tuple if it was loaded successfully, an error tuple otherwise. 31 | 32 | ## Examples 33 | 34 | structure_load("priv/repo", username: "postgres", 35 | database: "ecto_test", 36 | hostname: "localhost") 37 | 38 | """ 39 | @callback structure_load(default :: String.t(), config :: Keyword.t()) :: 40 | {:ok, String.t()} | {:error, term} 41 | 42 | @doc """ 43 | Runs the dump command for the given repo / config. 44 | 45 | Calling this function will setup authentication and run the dump cli 46 | command with your provided `args`. 47 | 48 | The options in `opts` are passed directly to `System.cmd/3`. 49 | 50 | Returns `{output, exit_status}` where `output` is a string of the stdout 51 | (as long as no option `into` is provided, see `System.cmd/3`) and `exit_status` 52 | is the exit status of the invocation. (`0` for success) 53 | 54 | ## Examples 55 | 56 | iex> dump_cmd(["--data-only", "--table", "table_name"], [stdout_to_stderr: true], Acme.Repo.config()) 57 | {"--\n-- PostgreSQL database dump\n--\n" <> _rest, 0} 58 | 59 | """ 60 | @callback dump_cmd(args :: [String.t()], opts :: Keyword.t(), config :: Keyword.t()) :: 61 | {output :: Collectable.t(), exit_status :: non_neg_integer()} 62 | end 63 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Application do 2 | @moduledoc false 3 | use Application 4 | 5 | def start(_type, _args) do 6 | children = [ 7 | {DynamicSupervisor, strategy: :one_for_one, name: Ecto.MigratorSupervisor}, 8 | {Task.Supervisor, name: Ecto.Adapters.SQL.StorageSupervisor} 9 | ] 10 | 11 | opts = [strategy: :one_for_one, name: Ecto.Adapters.SQL.Supervisor] 12 | Supervisor.start_link(children, opts) 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Connection do 2 | @moduledoc """ 3 | Specifies the behaviour to be implemented by all SQL connections. 4 | """ 5 | 6 | @typedoc "The query name" 7 | @type name :: String.t() 8 | 9 | @typedoc "The SQL statement" 10 | @type statement :: String.t() 11 | 12 | @typedoc "The cached query which is a DBConnection Query" 13 | @type cached :: map 14 | 15 | @type connection :: DBConnection.conn() 16 | @type params :: [term] 17 | 18 | @doc """ 19 | Receives options and returns `DBConnection` supervisor child 20 | specification. 21 | """ 22 | @callback child_spec(options :: Keyword.t()) :: :supervisor.child_spec() | {module, Keyword.t()} 23 | 24 | @doc """ 25 | Prepares and executes the given query with `DBConnection`. 26 | """ 27 | @callback prepare_execute(connection, name, statement, params, options :: Keyword.t()) :: 28 | {:ok, cached, term} | {:error, Exception.t()} 29 | 30 | @doc """ 31 | Executes a cached query. 32 | """ 33 | @callback execute(connection, cached, params, options :: Keyword.t()) :: 34 | {:ok, cached, term} | {:ok, term} | {:error | :reset, Exception.t()} 35 | 36 | @doc """ 37 | Runs the given statement as a query. 38 | """ 39 | @callback query(connection, statement, params, options :: Keyword.t()) :: 40 | {:ok, term} | {:error, Exception.t()} 41 | 42 | @doc """ 43 | Runs the given statement as a multi-result query. 44 | """ 45 | @callback query_many(connection, statement, params, options :: Keyword.t()) :: 46 | {:ok, term} | {:error, Exception.t()} 47 | 48 | @doc """ 49 | Returns a stream that prepares and executes the given query with 50 | `DBConnection`. 51 | """ 52 | @callback stream(connection, statement, params, options :: Keyword.t()) :: 53 | Enum.t() 54 | 55 | @doc """ 56 | Receives the exception returned by `c:query/4`. 57 | 58 | The constraints are in the keyword list and must return the 59 | constraint type, like `:unique`, and the constraint name as 60 | a string, for example: 61 | 62 | [unique: "posts_title_index"] 63 | 64 | Must return an empty list if the error does not come 65 | from any constraint. 66 | """ 67 | @callback to_constraints(exception :: Exception.t(), options :: Keyword.t()) :: Keyword.t() 68 | 69 | ## Queries 70 | 71 | @doc """ 72 | Receives a query and must return a SELECT query. 73 | """ 74 | @callback all(query :: Ecto.Query.t()) :: iodata 75 | 76 | @doc """ 77 | Receives a query and values to update and must return an UPDATE query. 78 | """ 79 | @callback update_all(query :: Ecto.Query.t()) :: iodata 80 | 81 | @doc """ 82 | Receives a query and must return a DELETE query. 83 | """ 84 | @callback delete_all(query :: Ecto.Query.t()) :: iodata 85 | 86 | @doc """ 87 | Returns an INSERT for the given `rows` in `table` returning 88 | the given `returning`. 89 | """ 90 | @callback insert( 91 | prefix :: String.t(), 92 | table :: String.t(), 93 | header :: [atom], 94 | rows :: [[atom | nil]], 95 | on_conflict :: Ecto.Adapter.Schema.on_conflict(), 96 | returning :: [atom], 97 | placeholders :: [term] 98 | ) :: iodata 99 | 100 | @doc """ 101 | Returns an UPDATE for the given `fields` in `table` filtered by 102 | `filters` returning the given `returning`. 103 | """ 104 | @callback update( 105 | prefix :: String.t(), 106 | table :: String.t(), 107 | fields :: [atom], 108 | filters :: [atom], 109 | returning :: [atom] 110 | ) :: iodata 111 | 112 | @doc """ 113 | Returns a DELETE for the `filters` returning the given `returning`. 114 | """ 115 | @callback delete( 116 | prefix :: String.t(), 117 | table :: String.t(), 118 | filters :: [atom], 119 | returning :: [atom] 120 | ) :: iodata 121 | 122 | @doc """ 123 | Executes an EXPLAIN query or similar depending on the adapter to obtains statistics of the given query. 124 | 125 | Receives the `connection`, `query`, `params` for the query, 126 | and all `opts` including those related to the EXPLAIN statement and shared opts. 127 | 128 | Must execute the explain query and return the result. 129 | """ 130 | @callback explain_query( 131 | connection, 132 | query :: String.t(), 133 | params :: Keyword.t(), 134 | opts :: Keyword.t() 135 | ) :: 136 | {:ok, term} | {:error, Exception.t()} 137 | 138 | ## DDL 139 | 140 | @doc """ 141 | Receives a DDL command and returns a query that executes it. 142 | """ 143 | @callback execute_ddl(command :: Ecto.Adapter.Migration.command()) :: String.t() | [iodata] 144 | 145 | @doc """ 146 | Receives a query result and returns a list of logs. 147 | """ 148 | @callback ddl_logs(result :: term) :: [{Logger.level(), Logger.message(), Logger.metadata()}] 149 | 150 | @doc """ 151 | Returns a queryable to check if the given `table` exists. 152 | """ 153 | @callback table_exists_query(table :: String.t()) :: {iodata, [term]} 154 | end 155 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/stream.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Stream do 2 | @moduledoc false 3 | 4 | defstruct [:meta, :statement, :params, :opts] 5 | 6 | def build(meta, statement, params, opts) do 7 | %__MODULE__{meta: meta, statement: statement, params: params, opts: opts} 8 | end 9 | end 10 | 11 | alias Ecto.Adapters.SQL.Stream 12 | 13 | defimpl Enumerable, for: Stream do 14 | def count(_), do: {:error, __MODULE__} 15 | 16 | def member?(_, _), do: {:error, __MODULE__} 17 | 18 | def slice(_), do: {:error, __MODULE__} 19 | 20 | def reduce(stream, acc, fun) do 21 | %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream 22 | Ecto.Adapters.SQL.reduce(meta, statement, params, opts, acc, fun) 23 | end 24 | end 25 | 26 | defimpl Collectable, for: Stream do 27 | def into(stream) do 28 | %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream 29 | {state, fun} = Ecto.Adapters.SQL.into(meta, statement, params, opts) 30 | {state, make_into(fun, stream)} 31 | end 32 | 33 | defp make_into(fun, stream) do 34 | fn 35 | state, :done -> 36 | fun.(state, :done) 37 | stream 38 | 39 | state, acc -> 40 | fun.(state, acc) 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /lib/ecto/migration/schema_migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Migration.SchemaMigration do 2 | # Defines a schema that works with a table that tracks schema migrations. 3 | # The table name defaults to `schema_migrations`. 4 | @moduledoc false 5 | use Ecto.Schema 6 | 7 | import Ecto.Query, only: [from: 2] 8 | 9 | @primary_key false 10 | schema "schema_migrations" do 11 | field :version, :integer, primary_key: true 12 | timestamps updated_at: false 13 | end 14 | 15 | # The migration flag is used to signal to the repository 16 | # we are in a migration operation. 17 | @default_opts [ 18 | timeout: :infinity, 19 | log: false, 20 | # Keep schema_migration for backwards compatibility 21 | schema_migration: true, 22 | ecto_query: :schema_migration, 23 | telemetry_options: [schema_migration: true] 24 | ] 25 | 26 | def ensure_schema_migrations_table!(repo, config, opts) do 27 | {repo, source} = get_repo_and_source(repo, config) 28 | table_name = String.to_atom(source) 29 | table = %Ecto.Migration.Table{name: table_name, prefix: opts[:prefix]} 30 | meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) 31 | 32 | commands = [ 33 | {:add, :version, :bigint, primary_key: true}, 34 | {:add, :inserted_at, :naive_datetime, []} 35 | ] 36 | 37 | repo.__adapter__().execute_ddl(meta, {:create_if_not_exists, table, commands}, @default_opts) 38 | end 39 | 40 | def versions(repo, config, prefix) do 41 | {repo, source} = get_repo_and_source(repo, config) 42 | from_opts = [prefix: prefix] ++ @default_opts 43 | 44 | query = 45 | if Keyword.get(config, :migration_cast_version_column, false) do 46 | from(m in source, select: type(m.version, :integer)) 47 | else 48 | from(m in source, select: m.version) 49 | end 50 | 51 | {repo, query, from_opts} 52 | end 53 | 54 | def up(repo, config, version, opts) do 55 | {repo, source} = get_repo_and_source(repo, config) 56 | 57 | %__MODULE__{version: version} 58 | |> Ecto.put_meta(source: source) 59 | |> repo.insert(default_opts(opts)) 60 | end 61 | 62 | def down(repo, config, version, opts) do 63 | {repo, source} = get_repo_and_source(repo, config) 64 | 65 | from(m in source, where: m.version == type(^version, :integer)) 66 | |> repo.delete_all(default_opts(opts)) 67 | end 68 | 69 | def get_repo_and_source(repo, config) do 70 | {Keyword.get(config, :migration_repo, repo), 71 | Keyword.get(config, :migration_source, "schema_migrations")} 72 | end 73 | 74 | defp default_opts(opts) do 75 | Keyword.merge( 76 | @default_opts, 77 | prefix: opts[:prefix], 78 | log: Keyword.get(opts, :log_migrator_sql, false) 79 | ) 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /lib/mix/ecto_sql.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.EctoSQL do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Ensures the given repository's migrations paths exists on the file system. 6 | """ 7 | @spec ensure_migrations_paths(Ecto.Repo.t(), Keyword.t()) :: [String.t()] 8 | def ensure_migrations_paths(repo, opts) do 9 | paths = Keyword.get_values(opts, :migrations_path) 10 | paths = if paths == [], do: [Path.join(source_repo_priv(repo), "migrations")], else: paths 11 | 12 | if not Mix.Project.umbrella?() do 13 | for path <- paths, not File.dir?(path) do 14 | raise_missing_migrations(Path.relative_to_cwd(path), repo) 15 | end 16 | end 17 | 18 | paths 19 | end 20 | 21 | defp raise_missing_migrations(path, repo) do 22 | Mix.raise(""" 23 | Could not find migrations directory #{inspect(path)} 24 | for repo #{inspect(repo)}. 25 | 26 | This may be because you are in a new project and the 27 | migration directory has not been created yet. Creating an 28 | empty directory at the path above will fix this error. 29 | 30 | If you expected existing migrations to be found, please 31 | make sure your repository has been properly configured 32 | and the configured path exists. 33 | """) 34 | end 35 | 36 | @doc """ 37 | Returns the private repository path relative to the source. 38 | """ 39 | def source_repo_priv(repo) do 40 | config = repo.config() 41 | priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" 42 | app = Keyword.fetch!(config, :otp_app) 43 | Path.join(Mix.Project.deps_paths()[app] || File.cwd!(), priv) 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.dump.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Dump do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Dumps the repository database structure" 7 | @default_opts [quiet: false] 8 | 9 | @aliases [ 10 | d: :dump_path, 11 | q: :quiet, 12 | r: :repo 13 | ] 14 | 15 | @switches [ 16 | dump_path: :string, 17 | quiet: :boolean, 18 | repo: [:string, :keep], 19 | no_compile: :boolean, 20 | no_deps_check: :boolean, 21 | prefix: [:string, :keep] 22 | ] 23 | 24 | @moduledoc """ 25 | Dumps the current environment's database structure for the 26 | given repository into a structure file. 27 | 28 | The repository must be set under `:ecto_repos` in the 29 | current app configuration or given via the `-r` option. 30 | 31 | This task needs some shell utility to be present on the machine 32 | running the task. 33 | 34 | Database | Utility needed 35 | :--------- | :------------- 36 | PostgreSQL | pg_dump 37 | MySQL | mysqldump 38 | 39 | ## Example 40 | 41 | $ mix ecto.dump 42 | 43 | ## Command line options 44 | 45 | * `-r`, `--repo` - the repo to load the structure info from 46 | * `-d`, `--dump-path` - the path of the dump file to create 47 | * `-q`, `--quiet` - run the command quietly 48 | * `--no-compile` - does not compile applications before dumping 49 | * `--no-deps-check` - does not check dependencies before dumping 50 | * `--prefix` - prefix that will be included in the structure dump. 51 | Can include multiple prefixes (ex. `--prefix foo --prefix bar`) with 52 | PostgreSQL but not MySQL. When specified, the prefixes will have 53 | their definitions dumped along with the data in their migration table. 54 | The default behavior is dependent on the adapter for backwards compatibility 55 | reasons. For PostgreSQL, the configured database has the definitions dumped 56 | from all of its schemas but only the data from the migration table 57 | from the `public` schema is included. For MySQL, only the configured 58 | database and its migration table are dumped. 59 | """ 60 | 61 | @impl true 62 | def run(args) do 63 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 64 | 65 | dump_prefixes = 66 | case Keyword.get_values(opts, :prefix) do 67 | [_ | _] = prefixes -> prefixes 68 | [] -> nil 69 | end 70 | 71 | opts = 72 | @default_opts 73 | |> Keyword.merge(opts) 74 | |> Keyword.put(:dump_prefixes, dump_prefixes) 75 | 76 | Enum.each(parse_repo(args), fn repo -> 77 | ensure_repo(repo, args) 78 | 79 | ensure_implements( 80 | repo.__adapter__(), 81 | Ecto.Adapter.Structure, 82 | "dump structure for #{inspect(repo)}" 83 | ) 84 | 85 | migration_repo = repo.config()[:migration_repo] || repo 86 | 87 | for repo <- Enum.uniq([repo, migration_repo]) do 88 | config = Keyword.merge(repo.config(), opts) 89 | start_time = System.system_time() 90 | 91 | case repo.__adapter__().structure_dump(source_repo_priv(repo), config) do 92 | {:ok, location} -> 93 | unless opts[:quiet] do 94 | elapsed = 95 | System.convert_time_unit(System.system_time() - start_time, :native, :microsecond) 96 | 97 | Mix.shell().info( 98 | "The structure for #{inspect(repo)} has been dumped to #{location} in #{format_time(elapsed)}" 99 | ) 100 | end 101 | 102 | {:error, term} when is_binary(term) -> 103 | Mix.raise("The structure for #{inspect(repo)} couldn't be dumped: #{term}") 104 | 105 | {:error, term} -> 106 | Mix.raise("The structure for #{inspect(repo)} couldn't be dumped: #{inspect(term)}") 107 | end 108 | end 109 | end) 110 | end 111 | 112 | defp format_time(microsec) when microsec < 1_000, do: "#{microsec} μs" 113 | defp format_time(microsec) when microsec < 1_000_000, do: "#{div(microsec, 1_000)} ms" 114 | defp format_time(microsec), do: "#{Float.round(microsec / 1_000_000.0)} s" 115 | end 116 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.gen.migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Gen.Migration do 2 | use Mix.Task 3 | 4 | import Macro, only: [camelize: 1, underscore: 1] 5 | import Mix.Generator 6 | import Mix.Ecto 7 | import Mix.EctoSQL 8 | 9 | @shortdoc "Generates a new migration for the repo" 10 | 11 | @aliases [ 12 | r: :repo 13 | ] 14 | 15 | @switches [ 16 | change: :string, 17 | repo: [:string, :keep], 18 | no_compile: :boolean, 19 | no_deps_check: :boolean, 20 | migrations_path: :string 21 | ] 22 | 23 | @moduledoc """ 24 | Generates a migration. 25 | 26 | The repository must be set under `:ecto_repos` in the 27 | current app configuration or given via the `-r` option. 28 | 29 | ## Examples 30 | 31 | $ mix ecto.gen.migration add_posts_table 32 | $ mix ecto.gen.migration add_posts_table -r Custom.Repo 33 | 34 | The generated migration filename will be prefixed with the current 35 | timestamp in UTC which is used for versioning and ordering. 36 | 37 | By default, the migration will be generated to the 38 | "priv/YOUR_REPO/migrations" directory of the current application 39 | but it can be configured to be any subdirectory of `priv` by 40 | specifying the `:priv` key under the repository configuration. 41 | 42 | This generator will automatically open the generated file if 43 | you have `ECTO_EDITOR` set in your environment variable. 44 | 45 | ## Command line options 46 | 47 | * `-r`, `--repo` - the repo to generate migration for 48 | * `--no-compile` - does not compile applications before running 49 | * `--no-deps-check` - does not check dependencies before running 50 | * `--migrations-path` - the path to run the migrations from, defaults to `priv/repo/migrations` 51 | 52 | ## Configuration 53 | 54 | If the current app configuration specifies a custom migration module 55 | the generated migration code will use that rather than the default 56 | `Ecto.Migration`: 57 | 58 | config :ecto_sql, migration_module: MyApplication.CustomMigrationModule 59 | 60 | """ 61 | 62 | @impl true 63 | def run(args) do 64 | repos = parse_repo(args) 65 | 66 | Enum.map(repos, fn repo -> 67 | case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do 68 | {opts, [name]} -> 69 | ensure_repo(repo, args) 70 | path = opts[:migrations_path] || Path.join(source_repo_priv(repo), "migrations") 71 | base_name = "#{underscore(name)}.exs" 72 | file = Path.join(path, "#{timestamp()}_#{base_name}") 73 | unless File.dir?(path), do: create_directory(path) 74 | 75 | fuzzy_path = Path.join(path, "*_#{base_name}") 76 | 77 | if Path.wildcard(fuzzy_path) != [] do 78 | Mix.raise( 79 | "migration can't be created, there is already a migration file with name #{name}." 80 | ) 81 | end 82 | 83 | # The :change option may be used by other tasks but not the CLI 84 | assigns = [ 85 | mod: Module.concat([repo, Migrations, camelize(name)]), 86 | change: opts[:change] 87 | ] 88 | 89 | create_file(file, migration_template(assigns)) 90 | 91 | if open?(file) and Mix.shell().yes?("Do you want to run this migration?") do 92 | Mix.Task.run("ecto.migrate", ["-r", inspect(repo), "--migrations-path", path]) 93 | end 94 | 95 | file 96 | 97 | {_, _} -> 98 | Mix.raise( 99 | "expected ecto.gen.migration to receive the migration file name, " <> 100 | "got: #{inspect(Enum.join(args, " "))}" 101 | ) 102 | end 103 | end) 104 | end 105 | 106 | defp timestamp do 107 | {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() 108 | "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" 109 | end 110 | 111 | defp pad(i) when i < 10, do: <> 112 | defp pad(i), do: to_string(i) 113 | 114 | defp migration_module do 115 | case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do 116 | migration_module when is_atom(migration_module) -> migration_module 117 | other -> Mix.raise("Expected :migration_module to be a module, got: #{inspect(other)}") 118 | end 119 | end 120 | 121 | embed_template(:migration, """ 122 | defmodule <%= inspect @mod %> do 123 | use <%= inspect migration_module() %> 124 | 125 | def change do 126 | <%= @change %> 127 | end 128 | end 129 | """) 130 | end 131 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.load.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Load do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Loads previously dumped database structure" 7 | @default_opts [force: false, quiet: false] 8 | 9 | @aliases [ 10 | d: :dump_path, 11 | f: :force, 12 | q: :quiet, 13 | r: :repo 14 | ] 15 | 16 | @switches [ 17 | dump_path: :string, 18 | force: :boolean, 19 | quiet: :boolean, 20 | repo: [:string, :keep], 21 | no_compile: :boolean, 22 | no_deps_check: :boolean, 23 | skip_if_loaded: :boolean 24 | ] 25 | 26 | @moduledoc """ 27 | Loads the current environment's database structure for the 28 | given repository from a previously dumped structure file. 29 | 30 | The repository must be set under `:ecto_repos` in the 31 | current app configuration or given via the `-r` option. 32 | 33 | This task needs some shell utility to be present on the machine 34 | running the task. 35 | 36 | Database | Utility needed 37 | :--------- | :------------- 38 | PostgreSQL | psql 39 | MySQL | mysql 40 | 41 | ## Example 42 | 43 | $ mix ecto.load 44 | 45 | ## Command line options 46 | 47 | * `-r`, `--repo` - the repo to load the structure info into 48 | * `-d`, `--dump-path` - the path of the dump file to load from 49 | * `-q`, `--quiet` - run the command quietly 50 | * `-f`, `--force` - do not ask for confirmation when loading data. 51 | Configuration is asked only when `:start_permanent` is set to true 52 | (typically in production) 53 | * `--no-compile` - does not compile applications before loading 54 | * `--no-deps-check` - does not check dependencies before loading 55 | * `--skip-if-loaded` - does not load the dump file if the repo has the migrations table up 56 | 57 | """ 58 | 59 | @impl true 60 | def run(args, table_exists? \\ &Ecto.Adapters.SQL.table_exists?/3) do 61 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 62 | opts = Keyword.merge(@default_opts, opts) 63 | opts = if opts[:quiet], do: Keyword.put(opts, :log, false), else: opts 64 | 65 | Enum.each(parse_repo(args), fn repo -> 66 | ensure_repo(repo, args) 67 | 68 | ensure_implements( 69 | repo.__adapter__(), 70 | Ecto.Adapter.Structure, 71 | "load structure for #{inspect(repo)}" 72 | ) 73 | 74 | {migration_repo, source} = 75 | Ecto.Migration.SchemaMigration.get_repo_and_source(repo, repo.config()) 76 | 77 | {:ok, loaded?, _} = 78 | Ecto.Migrator.with_repo(migration_repo, table_exists_closure(table_exists?, source, opts)) 79 | 80 | for repo <- Enum.uniq([repo, migration_repo]) do 81 | cond do 82 | loaded? and opts[:skip_if_loaded] -> 83 | :ok 84 | 85 | (skip_safety_warnings?() and not loaded?) or opts[:force] or confirm_load(repo, loaded?) -> 86 | load_structure(repo, opts) 87 | 88 | true -> 89 | :ok 90 | end 91 | end 92 | end) 93 | end 94 | 95 | defp table_exists_closure(fun, source, opts) when is_function(fun, 3) do 96 | &fun.(&1, source, opts) 97 | end 98 | 99 | defp table_exists_closure(fun, source, _opts) when is_function(fun, 2) do 100 | &fun.(&1, source) 101 | end 102 | 103 | defp skip_safety_warnings? do 104 | Mix.Project.config()[:start_permanent] != true 105 | end 106 | 107 | defp confirm_load(repo, false) do 108 | Mix.shell().yes?( 109 | "Are you sure you want to load a new structure for #{inspect(repo)}? Any existing data in this repo may be lost." 110 | ) 111 | end 112 | 113 | defp confirm_load(repo, true) do 114 | Mix.shell().yes?(""" 115 | It looks like a structure was already loaded for #{inspect(repo)}. Any attempt to load it again might fail. 116 | Are you sure you want to proceed? 117 | """) 118 | end 119 | 120 | defp load_structure(repo, opts) do 121 | config = Keyword.merge(repo.config(), opts) 122 | start_time = System.system_time() 123 | 124 | case repo.__adapter__().structure_load(source_repo_priv(repo), config) do 125 | {:ok, location} -> 126 | unless opts[:quiet] do 127 | elapsed = 128 | System.convert_time_unit(System.system_time() - start_time, :native, :microsecond) 129 | 130 | Mix.shell().info( 131 | "The structure for #{inspect(repo)} has been loaded from #{location} in #{format_time(elapsed)}" 132 | ) 133 | end 134 | 135 | {:error, term} when is_binary(term) -> 136 | Mix.raise("The structure for #{inspect(repo)} couldn't be loaded: #{term}") 137 | 138 | {:error, term} -> 139 | Mix.raise("The structure for #{inspect(repo)} couldn't be loaded: #{inspect(term)}") 140 | end 141 | end 142 | 143 | defp format_time(microsec) when microsec < 1_000, do: "#{microsec} μs" 144 | defp format_time(microsec) when microsec < 1_000_000, do: "#{div(microsec, 1_000)} ms" 145 | defp format_time(microsec), do: "#{Float.round(microsec / 1_000_000.0)} s" 146 | end 147 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.migrate.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Migrate do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Runs the repository migrations" 7 | 8 | @aliases [ 9 | n: :step, 10 | r: :repo 11 | ] 12 | 13 | @switches [ 14 | all: :boolean, 15 | step: :integer, 16 | to: :integer, 17 | to_exclusive: :integer, 18 | quiet: :boolean, 19 | prefix: :string, 20 | pool_size: :integer, 21 | log_level: :string, 22 | log_migrations_sql: :boolean, 23 | log_migrator_sql: :boolean, 24 | strict_version_order: :boolean, 25 | repo: [:keep, :string], 26 | no_compile: :boolean, 27 | no_deps_check: :boolean, 28 | migrations_path: :keep 29 | ] 30 | 31 | @moduledoc """ 32 | Runs the pending migrations for the given repository. 33 | 34 | Migrations are expected at "priv/YOUR_REPO/migrations" directory 35 | of the current application, where "YOUR_REPO" is the last segment 36 | in your repository name. For example, the repository `MyApp.Repo` 37 | will use "priv/repo/migrations". The repository `Whatever.MyRepo` 38 | will use "priv/my_repo/migrations". 39 | 40 | You can configure a repository to use another directory by specifying 41 | the `:priv` key under the repository configuration. The "migrations" 42 | part will be automatically appended to it. For instance, to use 43 | "priv/custom_repo/migrations": 44 | 45 | config :my_app, MyApp.Repo, priv: "priv/custom_repo" 46 | 47 | This task runs all pending migrations by default. To migrate up to a 48 | specific version number, supply `--to version_number`. To migrate a 49 | specific number of times, use `--step n`. 50 | 51 | The repositories to migrate are the ones specified under the 52 | `:ecto_repos` option in the current app configuration. However, 53 | if the `-r` option is given, it replaces the `:ecto_repos` config. 54 | 55 | Since Ecto tasks can only be executed once, if you need to migrate 56 | multiple repositories, set `:ecto_repos` accordingly or pass the `-r` 57 | flag multiple times. 58 | 59 | If a repository has not yet been started, one will be started outside 60 | your application supervision tree and shutdown afterwards. 61 | 62 | ## Examples 63 | 64 | $ mix ecto.migrate 65 | $ mix ecto.migrate -r Custom.Repo 66 | 67 | $ mix ecto.migrate -n 3 68 | $ mix ecto.migrate --step 3 69 | 70 | $ mix ecto.migrate --to 20080906120000 71 | 72 | ## Command line options 73 | 74 | * `--all` - run all pending migrations 75 | 76 | * `--log-migrations-sql` - log SQL generated by migration commands 77 | 78 | * `--log-migrator-sql` - log SQL generated by the migrator, such as 79 | transactions, table locks, etc 80 | 81 | * `--log-level` (since v3.11.0) - the level to set for `Logger`. This task 82 | does not start your application, so whatever level you have configured in 83 | your config files will not be used. If this is not provided, no level 84 | will be set, so that if you set it yourself before calling this task 85 | then this won't interfere. Can be any of the `t:Logger.level/0` levels 86 | 87 | * `--migrations-path` - the path to load the migrations from, defaults to 88 | `"priv/repo/migrations"`. This option may be given multiple times in which 89 | case the migrations are loaded from all the given directories and sorted 90 | as if they were in the same one 91 | 92 | * `--no-compile` - does not compile applications before migrating 93 | 94 | * `--no-deps-check` - does not check dependencies before migrating 95 | 96 | * `--pool-size` - the pool size if the repository is started 97 | only for the task (defaults to 2) 98 | 99 | * `--prefix` - the prefix to run migrations on 100 | 101 | * `--quiet` - do not log migration commands 102 | 103 | * `-r`, `--repo` - the repo to migrate 104 | 105 | * `--step`, `-n` - run n number of pending migrations 106 | 107 | * `--strict-version-order` - abort when applying a migration with old 108 | timestamp (otherwise it emits a warning) 109 | 110 | * `--to` - run all migrations up to and including version 111 | 112 | * `--to-exclusive` - run all migrations up to and excluding version 113 | 114 | """ 115 | 116 | @impl true 117 | def run(args, migrator \\ &Ecto.Migrator.run/4) do 118 | repos = parse_repo(args) 119 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 120 | 121 | opts = 122 | if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], 123 | do: opts, 124 | else: Keyword.put(opts, :all, true) 125 | 126 | opts = 127 | if opts[:quiet], 128 | do: Keyword.merge(opts, log: false, log_migrations_sql: false, log_migrator_sql: false), 129 | else: opts 130 | 131 | if log_level = opts[:log_level] do 132 | Logger.configure(level: String.to_existing_atom(log_level)) 133 | end 134 | 135 | # Start ecto_sql explicitly before as we don't need 136 | # to restart those apps if migrated. 137 | {:ok, _} = Application.ensure_all_started(:ecto_sql) 138 | 139 | for repo <- repos do 140 | ensure_repo(repo, args) 141 | paths = ensure_migrations_paths(repo, opts) 142 | pool = repo.config()[:pool] 143 | 144 | fun = 145 | if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do 146 | &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :up, opts) end) 147 | else 148 | &migrator.(&1, paths, :up, opts) 149 | end 150 | 151 | case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do 152 | {:ok, _migrated, _apps} -> 153 | :ok 154 | 155 | {:error, error} -> 156 | Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") 157 | end 158 | end 159 | 160 | :ok 161 | end 162 | end 163 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.migrations.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Migrations do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Displays the repository migration status" 7 | 8 | @aliases [ 9 | r: :repo 10 | ] 11 | 12 | @switches [ 13 | repo: [:keep, :string], 14 | no_compile: :boolean, 15 | no_deps_check: :boolean, 16 | migrations_path: :keep, 17 | prefix: :string 18 | ] 19 | 20 | @moduledoc """ 21 | Displays the up / down migration status for the given repository. 22 | 23 | The repository must be set under `:ecto_repos` in the 24 | current app configuration or given via the `-r` option. 25 | 26 | By default, migrations are expected at "priv/YOUR_REPO/migrations" 27 | directory of the current application but it can be configured 28 | by specifying the `:priv` key under the repository configuration. 29 | 30 | If the repository has not been started yet, one will be 31 | started outside our application supervision tree and shutdown 32 | afterwards. 33 | 34 | ## Examples 35 | 36 | $ mix ecto.migrations 37 | $ mix ecto.migrations -r Custom.Repo 38 | 39 | ## Command line options 40 | 41 | * `--migrations-path` - the path to load the migrations from, defaults to 42 | `"priv/repo/migrations"`. This option may be given multiple times in which 43 | case the migrations are loaded from all the given directories and sorted as 44 | if they were in the same one. 45 | 46 | Note, if you have previously run migrations from paths `a/` and `b/`, and now 47 | run `mix ecto.migrations --migrations-path a/` (omitting path `b/`), the 48 | migrations from the path `b/` will be shown in the output as `** FILE NOT FOUND **`. 49 | 50 | * `--no-compile` - does not compile applications before running 51 | 52 | * `--no-deps-check` - does not check dependencies before running 53 | 54 | * `--prefix` - the prefix to check migrations on 55 | 56 | * `-r`, `--repo` - the repo to obtain the status for 57 | 58 | """ 59 | 60 | @impl true 61 | def run(args, migrations \\ &Ecto.Migrator.migrations/3, puts \\ &IO.puts/1) do 62 | repos = parse_repo(args) 63 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 64 | 65 | for repo <- repos do 66 | ensure_repo(repo, args) 67 | paths = ensure_migrations_paths(repo, opts) 68 | 69 | case Ecto.Migrator.with_repo(repo, &migrations.(&1, paths, opts), mode: :temporary) do 70 | {:ok, repo_status, _} -> 71 | puts.( 72 | """ 73 | 74 | Repo: #{inspect(repo)} 75 | 76 | Status Migration ID Migration Name 77 | -------------------------------------------------- 78 | """ <> 79 | Enum.map_join(repo_status, "\n", fn {status, number, description} -> 80 | " #{format(status, 10)}#{format(number, 16)}#{description}" 81 | end) <> "\n" 82 | ) 83 | 84 | {:error, error} -> 85 | Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") 86 | end 87 | end 88 | 89 | :ok 90 | end 91 | 92 | defp format(content, pad) do 93 | content 94 | |> to_string 95 | |> String.pad_trailing(pad) 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.rollback.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Rollback do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Rolls back the repository migrations" 7 | 8 | @aliases [ 9 | r: :repo, 10 | n: :step 11 | ] 12 | 13 | @switches [ 14 | all: :boolean, 15 | step: :integer, 16 | to: :integer, 17 | to_exclusive: :integer, 18 | quiet: :boolean, 19 | prefix: :string, 20 | pool_size: :integer, 21 | log_level: :string, 22 | log_migrations_sql: :boolean, 23 | log_migrator_sql: :boolean, 24 | repo: [:keep, :string], 25 | no_compile: :boolean, 26 | no_deps_check: :boolean, 27 | migrations_path: :keep 28 | ] 29 | 30 | @moduledoc """ 31 | Reverts applied migrations in the given repository. 32 | 33 | Migrations are expected at "priv/YOUR_REPO/migrations" directory 34 | of the current application, where "YOUR_REPO" is the last segment 35 | in your repository name. For example, the repository `MyApp.Repo` 36 | will use "priv/repo/migrations". The repository `Whatever.MyRepo` 37 | will use "priv/my_repo/migrations". 38 | 39 | You can configure a repository to use another directory by specifying 40 | the `:priv` key under the repository configuration. The "migrations" 41 | part will be automatically appended to it. For instance, to use 42 | "priv/custom_repo/migrations": 43 | 44 | config :my_app, MyApp.Repo, priv: "priv/custom_repo" 45 | 46 | This task rolls back the last applied migration by default. To roll 47 | back to a version number, supply `--to version_number`. To roll 48 | back a specific number of times, use `--step n`. To undo all applied 49 | migrations, provide `--all`. 50 | 51 | The repositories to rollback are the ones specified under the 52 | `:ecto_repos` option in the current app configuration. However, 53 | if the `-r` option is given, it replaces the `:ecto_repos` config. 54 | 55 | If a repository has not yet been started, one will be started outside 56 | your application supervision tree and shutdown afterwards. 57 | 58 | ## Examples 59 | 60 | $ mix ecto.rollback 61 | $ mix ecto.rollback -r Custom.Repo 62 | 63 | $ mix ecto.rollback -n 3 64 | $ mix ecto.rollback --step 3 65 | 66 | $ mix ecto.rollback --to 20080906120000 67 | 68 | ## Command line options 69 | 70 | * `--all` - run all pending migrations 71 | 72 | * `--log-migrations-sql` - log SQL generated by migration commands 73 | 74 | * `--log-migrator-sql` - log SQL generated by the migrator, such as 75 | transactions, table locks, etc 76 | 77 | * `--log-level` (since v3.11.0) - the level to set for `Logger`. This task 78 | does not start your application, so whatever level you have configured in 79 | your config files will not be used. If this is not provided, no level 80 | will be set, so that if you set it yourself before calling this task 81 | then this won't interfere. Can be any of the `t:Logger.level/0` levels 82 | 83 | * `--migrations-path` - the path to load the migrations from, defaults to 84 | `"priv/repo/migrations"`. This option may be given multiple times in which 85 | case the migrations are loaded from all the given directories and sorted 86 | as if they were in the same one 87 | 88 | * `--no-compile` - does not compile applications before migrating 89 | 90 | * `--no-deps-check` - does not check dependencies before migrating 91 | 92 | * `--pool-size` - the pool size if the repository is started 93 | only for the task (defaults to 2) 94 | 95 | * `--prefix` - the prefix to run migrations on 96 | 97 | * `--quiet` - do not log migration commands 98 | 99 | * `-r`, `--repo` - the repo to migrate 100 | 101 | * `--step`, `-n` - revert n migrations 102 | 103 | * `--strict-version-order` - abort when applying a migration with old 104 | timestamp (otherwise it emits a warning) 105 | 106 | * `--to` - revert all migrations down to and including version 107 | 108 | * `--to-exclusive` - revert all migrations down to and excluding version 109 | 110 | """ 111 | 112 | @impl true 113 | def run(args, migrator \\ &Ecto.Migrator.run/4) do 114 | repos = parse_repo(args) 115 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 116 | 117 | opts = 118 | if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], 119 | do: opts, 120 | else: Keyword.put(opts, :step, 1) 121 | 122 | opts = 123 | if opts[:quiet], 124 | do: Keyword.merge(opts, log: false, log_migrations_sql: false, log_migrator_sql: false), 125 | else: opts 126 | 127 | if log_level = opts[:log_level] do 128 | Logger.configure(level: String.to_existing_atom(log_level)) 129 | end 130 | 131 | # Start ecto_sql explicitly before as we don't need 132 | # to restart those apps if migrated. 133 | {:ok, _} = Application.ensure_all_started(:ecto_sql) 134 | 135 | for repo <- repos do 136 | ensure_repo(repo, args) 137 | paths = ensure_migrations_paths(repo, opts) 138 | pool = repo.config()[:pool] 139 | 140 | fun = 141 | if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do 142 | &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :down, opts) end) 143 | else 144 | &migrator.(&1, paths, :down, opts) 145 | end 146 | 147 | case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do 148 | {:ok, _migrated, _apps} -> 149 | :ok 150 | 151 | {:error, error} -> 152 | Mix.raise("Could not start repo #{inspect(repo)}, error: #{inspect(error)}") 153 | end 154 | end 155 | 156 | :ok 157 | end 158 | end 159 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoSQL.MixProject do 2 | use Mix.Project 3 | 4 | @source_url "https://github.com/elixir-ecto/ecto_sql" 5 | @version "3.12.1" 6 | @adapters ~w(pg myxql tds) 7 | 8 | def project do 9 | [ 10 | app: :ecto_sql, 11 | version: @version, 12 | elixir: "~> 1.14", 13 | deps: deps(), 14 | test_paths: test_paths(System.get_env("ECTO_ADAPTER")), 15 | xref: [ 16 | exclude: [ 17 | MyXQL, 18 | Ecto.Adapters.MyXQL.Connection, 19 | Postgrex, 20 | Ecto.Adapters.Postgres.Connection, 21 | Tds, 22 | Tds.Ecto.UUID, 23 | Ecto.Adapters.Tds.Connection 24 | ] 25 | ], 26 | 27 | # Custom testing 28 | aliases: [ 29 | "test.all": ["test", "test.adapters", "test.as_a_dep"], 30 | "test.adapters": &test_adapters/1, 31 | "test.as_a_dep": &test_as_a_dep/1 32 | ], 33 | preferred_cli_env: ["test.all": :test, "test.adapters": :test], 34 | 35 | # Hex 36 | description: "SQL-based adapters for Ecto and database migrations", 37 | package: package(), 38 | 39 | # Docs 40 | name: "Ecto SQL", 41 | docs: docs() 42 | ] 43 | end 44 | 45 | def application do 46 | [ 47 | extra_applications: [:logger, :eex], 48 | env: [postgres_map_type: "jsonb"], 49 | mod: {Ecto.Adapters.SQL.Application, []} 50 | ] 51 | end 52 | 53 | defp deps do 54 | [ 55 | ecto_dep(), 56 | {:telemetry, "~> 0.4.0 or ~> 1.0"}, 57 | 58 | # Drivers 59 | {:db_connection, "~> 2.5 or ~> 2.4.1"}, 60 | postgrex_dep(), 61 | myxql_dep(), 62 | tds_dep(), 63 | 64 | # Bring something in for JSON during tests 65 | {:jason, ">= 0.0.0", only: [:test, :docs]}, 66 | 67 | # Docs 68 | {:ex_doc, "~> 0.21", only: :docs}, 69 | 70 | # Benchmarks 71 | {:benchee, "~> 1.0", only: :bench} 72 | ] 73 | end 74 | 75 | defp ecto_dep do 76 | if path = System.get_env("ECTO_PATH") do 77 | {:ecto, path: path} 78 | else 79 | {:ecto, github: "elixir-ecto/ecto"} 80 | end 81 | end 82 | 83 | defp postgrex_dep do 84 | if path = System.get_env("POSTGREX_PATH") do 85 | {:postgrex, path: path} 86 | else 87 | {:postgrex, "~> 0.19 or ~> 1.0", optional: true} 88 | end 89 | end 90 | 91 | defp myxql_dep do 92 | if path = System.get_env("MYXQL_PATH") do 93 | {:myxql, path: path} 94 | else 95 | {:myxql, "~> 0.7", optional: true} 96 | end 97 | end 98 | 99 | defp tds_dep do 100 | if path = System.get_env("TDS_PATH") do 101 | {:tds, path: path} 102 | else 103 | {:tds, "~> 2.1.1 or ~> 2.2", optional: true} 104 | end 105 | end 106 | 107 | defp test_paths(adapter) when adapter in @adapters, do: ["integration_test/#{adapter}"] 108 | defp test_paths(nil), do: ["test"] 109 | defp test_paths(other), do: raise("unknown adapter #{inspect(other)}") 110 | 111 | defp package do 112 | [ 113 | maintainers: ["Eric Meadows-Jönsson", "José Valim", "James Fish", "Michał Muskała"], 114 | licenses: ["Apache-2.0"], 115 | links: %{"GitHub" => @source_url}, 116 | files: 117 | ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ 118 | ~w(integration_test/sql integration_test/support) 119 | ] 120 | end 121 | 122 | defp test_as_a_dep(args) do 123 | IO.puts("==> Compiling ecto_sql from a dependency") 124 | File.rm_rf!("tmp/as_a_dep") 125 | File.mkdir_p!("tmp/as_a_dep") 126 | 127 | File.cd!("tmp/as_a_dep", fn -> 128 | File.write!("mix.exs", """ 129 | defmodule DepsOnEctoSQL.MixProject do 130 | use Mix.Project 131 | 132 | def project do 133 | [ 134 | app: :deps_on_ecto_sql, 135 | version: "0.0.1", 136 | deps: [{:ecto_sql, path: "../.."}] 137 | ] 138 | end 139 | end 140 | """) 141 | 142 | mix_cmd_with_status_check(["do", "deps.get,", "compile", "--force" | args]) 143 | end) 144 | end 145 | 146 | defp test_adapters(args) do 147 | for adapter <- @adapters, do: env_run(adapter, args) 148 | end 149 | 150 | defp env_run(adapter, args) do 151 | IO.puts("==> Running tests for ECTO_ADAPTER=#{adapter} mix test") 152 | 153 | mix_cmd_with_status_check( 154 | ["test", ansi_option() | args], 155 | env: [{"ECTO_ADAPTER", adapter}] 156 | ) 157 | end 158 | 159 | defp ansi_option do 160 | if IO.ANSI.enabled?(), do: "--color", else: "--no-color" 161 | end 162 | 163 | defp mix_cmd_with_status_check(args, opts \\ []) do 164 | {_, res} = System.cmd("mix", args, [into: IO.binstream(:stdio, :line)] ++ opts) 165 | 166 | if res > 0 do 167 | System.at_exit(fn _ -> exit({:shutdown, 1}) end) 168 | end 169 | end 170 | 171 | defp docs do 172 | [ 173 | main: "Ecto.Adapters.SQL", 174 | source_ref: "v#{@version}", 175 | canonical: "http://hexdocs.pm/ecto_sql", 176 | source_url: @source_url, 177 | extras: ["CHANGELOG.md"], 178 | skip_undefined_reference_warnings_on: ["CHANGELOG.md"], 179 | groups_for_modules: [ 180 | # Ecto.Adapters.SQL, 181 | # Ecto.Adapters.SQL.Sandbox, 182 | # Ecto.Migration, 183 | # Ecto.Migrator, 184 | 185 | "Built-in adapters": [ 186 | Ecto.Adapters.MyXQL, 187 | Ecto.Adapters.Tds, 188 | Ecto.Adapters.Postgres 189 | ], 190 | "TDS Types": [ 191 | Tds.Ecto.UUID, 192 | Tds.Ecto.VarChar 193 | ], 194 | "Adapter specification": [ 195 | Ecto.Adapter.Migration, 196 | Ecto.Adapter.Structure, 197 | Ecto.Adapters.SQL.Connection, 198 | Ecto.Migration.Command, 199 | Ecto.Migration.Constraint, 200 | Ecto.Migration.Index, 201 | Ecto.Migration.Reference, 202 | Ecto.Migration.Table 203 | ] 204 | ] 205 | ] 206 | end 207 | end 208 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.2.0", "afd2f0caec06ce3a70d9c91c514c0b58114636db9d83c2dc6bfd416656618353", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "ee729e53217898b8fd30aaad3cce61973dab61574ae6f48229fe7ff42d5e4457"}, 3 | "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, 4 | "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, 5 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 6 | "earmark_parser": {:hex, :earmark_parser, "1.4.41", "ab34711c9dc6212dda44fcd20ecb87ac3f3fce6f0ca2f28d4a00e4154f8cd599", [:mix], [], "hexpm", "a81a04c7e34b6617c2792e291b5a2e57ab316365c2644ddc553bb9ed863ebefa"}, 7 | "ecto": {:git, "https://github.com/elixir-ecto/ecto.git", "60120357088650119b6e1b0ee6277637bae943c1", []}, 8 | "ex_doc": {:hex, :ex_doc, "0.34.2", "13eedf3844ccdce25cfd837b99bea9ad92c4e511233199440488d217c92571e8", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "5ce5f16b41208a50106afed3de6a2ed34f4acfd65715b82a0b84b49d995f95c1"}, 9 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 10 | "makeup": {:hex, :makeup, "1.1.2", "9ba8837913bdf757787e71c1581c21f9d2455f4dd04cfca785c70bbfff1a76a3", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cce1566b81fbcbd21eca8ffe808f33b221f9eee2cbc7a1706fc3da9ff18e6cac"}, 11 | "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"}, 12 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.1", "c7f58c120b2b5aa5fd80d540a89fdf866ed42f1f3994e4fe189abebeab610839", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "8a89a1eeccc2d798d6ea15496a6e4870b75e014d1af514b1b71fa33134f57814"}, 13 | "myxql": {:hex, :myxql, "0.7.1", "7c7b75aa82227cd2bc9b7fbd4de774fb19a1cdb309c219f411f82ca8860f8e01", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:geo, "~> 3.4", [hex: :geo, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "a491cdff53353a09b5850ac2d472816ebe19f76c30b0d36a43317a67c9004936"}, 14 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, 15 | "postgrex": {:hex, :postgrex, "0.19.0", "f7d50e50cb42e0a185f5b9a6095125a9ab7e4abccfbe2ab820ab9aa92b71dbab", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "dba2d2a0a8637defbf2307e8629cb2526388ba7348f67d04ec77a5d6a72ecfae"}, 16 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 17 | "tds": {:hex, :tds, "2.3.4", "534749dd9ef61af960fcafa9cbb7186d6d7b9f92ea0133fb25da07b121c8295c", [:mix], [{:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.9 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "bb9a53d4688a85fd566f342f76b50d39adfc4b410062886ef908365ead24ba3f"}, 18 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, 19 | } 20 | -------------------------------------------------------------------------------- /test/ecto/migrator_repo_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.MigratorRepoTest do 2 | use ExUnit.Case 3 | 4 | import Ecto.Migrator 5 | import ExUnit.CaptureLog 6 | 7 | defmodule Migration do 8 | use Ecto.Migration 9 | 10 | def up do 11 | execute "up" 12 | end 13 | 14 | def down do 15 | execute "down" 16 | end 17 | end 18 | 19 | defmodule ChangeMigration do 20 | use Ecto.Migration 21 | 22 | def change do 23 | create table(:posts) do 24 | add :name, :string 25 | end 26 | 27 | create index(:posts, [:title]) 28 | end 29 | end 30 | 31 | defmodule MainRepo do 32 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 33 | end 34 | 35 | defmodule MigrationRepo do 36 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 37 | end 38 | 39 | Application.put_env(:ecto_sql, MainRepo, migration_repo: MigrationRepo) 40 | 41 | setup do 42 | {:ok, _} = start_supervised({MigrationsAgent, [{1, nil}, {2, nil}, {3, nil}]}) 43 | :ok 44 | end 45 | 46 | def put_test_adapter_config(config) do 47 | Application.put_env(:ecto_sql, EctoSQL.TestAdapter, config) 48 | 49 | on_exit(fn -> 50 | Application.delete_env(:ecto, EctoSQL.TestAdapter) 51 | end) 52 | end 53 | 54 | setup_all do 55 | {:ok, _pid} = MainRepo.start_link() 56 | {:ok, _pid} = MigrationRepo.start_link() 57 | :ok 58 | end 59 | 60 | describe "migration_repo option" do 61 | test "upwards and downwards migrations" do 62 | assert run(MainRepo, [{3, ChangeMigration}, {4, Migration}], :up, to: 4, log: false) == [4] 63 | 64 | assert run(MainRepo, [{2, ChangeMigration}, {3, Migration}], :down, all: true, log: false) == 65 | [3, 2] 66 | end 67 | 68 | test "down invokes the repository adapter with down commands" do 69 | assert down(MainRepo, 0, Migration, log: false) == :already_down 70 | assert down(MainRepo, 2, Migration, log: false) == :ok 71 | end 72 | 73 | test "up invokes the repository adapter with up commands" do 74 | assert up(MainRepo, 3, Migration, log: false) == :already_up 75 | assert up(MainRepo, 4, Migration, log: false) == :ok 76 | end 77 | 78 | test "migrations run inside a transaction if the adapter supports ddl transactions when configuring a migration repo" do 79 | capture_log(fn -> 80 | put_test_adapter_config(supports_ddl_transaction?: true, test_process: self()) 81 | up(MainRepo, 0, Migration) 82 | 83 | assert_receive {:transaction, %{repo: MainRepo}, _} 84 | assert_receive {:lock_for_migrations, %{repo: MigrationRepo}, _, _} 85 | end) 86 | end 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /test/ecto/tenant_migrator_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.TenantMigratorTest do 2 | use ExUnit.Case 3 | 4 | import Ecto.Migrator 5 | import ExUnit.CaptureLog 6 | 7 | alias EctoSQL.TestRepo 8 | 9 | defmodule Migration do 10 | use Ecto.Migration 11 | 12 | def up do 13 | execute "up" 14 | end 15 | 16 | def down do 17 | execute "down" 18 | end 19 | end 20 | 21 | defmodule ChangeMigration do 22 | use Ecto.Migration 23 | 24 | def change do 25 | create table(:posts) do 26 | add :name, :string 27 | end 28 | 29 | create index(:posts, [:title]) 30 | end 31 | end 32 | 33 | setup do 34 | {:ok, _} = start_supervised({MigrationsAgent, [{1, nil}, {2, nil}, {3, nil}]}) 35 | :ok 36 | end 37 | 38 | def put_test_adapter_config(config) do 39 | Application.put_env(:ecto_sql, EctoSQL.TestAdapter, config) 40 | 41 | on_exit(fn -> 42 | Application.delete_env(:ecto, EctoSQL.TestAdapter) 43 | end) 44 | end 45 | 46 | describe "dynamic_repo option" do 47 | test "upwards and downwards migrations" do 48 | assert run(TestRepo, [{3, ChangeMigration}, {4, Migration}], :up, 49 | to: 4, 50 | log: false, 51 | dynamic_repo: :tenant_db 52 | ) == [4] 53 | 54 | assert run(TestRepo, [{2, ChangeMigration}, {3, Migration}], :down, 55 | all: true, 56 | log: false, 57 | dynamic_repo: :tenant_db 58 | ) == [3, 2] 59 | end 60 | 61 | test "down invokes the repository adapter with down commands" do 62 | assert down(TestRepo, 0, Migration, log: false, dynamic_repo: :tenant_db) == :already_down 63 | assert down(TestRepo, 2, Migration, log: false, dynamic_repo: :tenant_db) == :ok 64 | end 65 | 66 | test "up invokes the repository adapter with up commands" do 67 | assert up(TestRepo, 3, Migration, log: false, dynamic_repo: :tenant_db) == :already_up 68 | assert up(TestRepo, 4, Migration, log: false, dynamic_repo: :tenant_db) == :ok 69 | end 70 | 71 | test "migrations run inside a transaction if the adapter supports ddl transactions" do 72 | capture_log(fn -> 73 | put_test_adapter_config(supports_ddl_transaction?: true, test_process: self()) 74 | up(TestRepo, 0, Migration, dynamic_repo: :tenant_db) 75 | assert_receive {:transaction, _, _} 76 | end) 77 | end 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /test/ecto/type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.TypeTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Type 5 | alias Ecto.Adapters.{MyXQL, Postgres, Tds} 6 | 7 | @uuid_string "bfe0888c-5c59-4bb3-adfd-71f0b85d3db7" 8 | @uuid_binary <<191, 224, 136, 140, 92, 89, 75, 179, 173, 253, 113, 240, 184, 93, 61, 183>> 9 | @mssql_uuid_binary <<140, 136, 224, 191, 89, 92, 179, 75, 173, 253, 113, 240, 184, 93, 61, 183>> 10 | 11 | # We don't effectively dump because we need to keep JSON encoding 12 | test "dumps through the adapter" do 13 | assert adapter_dump(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 14 | {:ok, %{"a" => @uuid_string}} 15 | 16 | assert adapter_dump(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 17 | {:ok, %{"a" => @uuid_string}} 18 | 19 | assert adapter_dump(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @uuid_string}) == 20 | {:ok, %{"a" => @uuid_string}} 21 | end 22 | 23 | # Therefore we need to support both binaries and strings when loading 24 | test "loads through the adapter" do 25 | assert adapter_load(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_binary}) == 26 | {:ok, %{"a" => @uuid_string}} 27 | 28 | assert adapter_load(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_binary}) == 29 | {:ok, %{"a" => @uuid_string}} 30 | 31 | assert adapter_load(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @mssql_uuid_binary}) == 32 | {:ok, %{"a" => @uuid_string}} 33 | 34 | assert adapter_load(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 35 | {:ok, %{"a" => @uuid_string}} 36 | 37 | assert adapter_load(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 38 | {:ok, %{"a" => @uuid_string}} 39 | 40 | assert adapter_load(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @uuid_string}) == 41 | {:ok, %{"a" => @uuid_string}} 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /test/mix/ecto_sql_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.EctoSQLTest do 2 | use ExUnit.Case, async: true 3 | import Mix.EctoSQL 4 | 5 | defmodule Repo do 6 | def config do 7 | [priv: Process.get(:priv), otp_app: :ecto_sql] 8 | end 9 | end 10 | 11 | test "source_priv_repo" do 12 | Process.put(:priv, nil) 13 | assert source_repo_priv(Repo) == Path.expand("priv/repo", File.cwd!()) 14 | Process.put(:priv, "hello") 15 | assert source_repo_priv(Repo) == Path.expand("hello", File.cwd!()) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.dump_load_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.DumpLoadTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Mix.Tasks.Ecto.{Load, Dump} 5 | 6 | # Mocked adapters 7 | 8 | defmodule Adapter do 9 | @behaviour Ecto.Adapter 10 | @behaviour Ecto.Adapter.Structure 11 | 12 | defmacro __before_compile__(_), do: :ok 13 | def dumpers(_, _), do: raise("not implemented") 14 | def loaders(_, _), do: raise("not implemented") 15 | def checkout(_, _, _), do: raise("not implemented") 16 | def checked_out?(_), do: raise("not implemented") 17 | def ensure_all_started(_, _), do: {:ok, []} 18 | 19 | def init(_opts) do 20 | child_spec = Supervisor.child_spec({Task, fn -> :timer.sleep(:infinity) end}, []) 21 | {:ok, child_spec, %{}} 22 | end 23 | 24 | def structure_dump(_, _), do: Process.get(:structure_dump) || raise("no structure_dump") 25 | def structure_load(_, _), do: Process.get(:structure_load) || raise("no structure_load") 26 | def dump_cmd(_, _, _), do: Process.get(:dump_cmd) || raise("no dump_cmd") 27 | end 28 | 29 | defmodule NoStructureAdapter do 30 | @behaviour Ecto.Adapter 31 | defmacro __before_compile__(_), do: :ok 32 | def dumpers(_, _), do: raise("not implemented") 33 | def loaders(_, _), do: raise("not implemented") 34 | def init(_), do: raise("not implemented") 35 | def checkout(_, _, _), do: raise("not implemented") 36 | def checked_out?(_), do: raise("not implemented") 37 | def ensure_all_started(_, _), do: raise("not implemented") 38 | end 39 | 40 | # Mocked repos 41 | 42 | defmodule Repo do 43 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Adapter 44 | end 45 | 46 | defmodule MigrationRepo do 47 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Adapter 48 | end 49 | 50 | defmodule NoStructureRepo do 51 | use Ecto.Repo, otp_app: :ecto_sql, adapter: NoStructureAdapter 52 | end 53 | 54 | setup do 55 | Application.put_env(:ecto_sql, __MODULE__.Repo, []) 56 | Application.put_env(:ecto_sql, __MODULE__.NoStructureRepo, []) 57 | end 58 | 59 | ## Dump 60 | 61 | test "runs the adapter structure_dump" do 62 | Process.put(:structure_dump, {:ok, "foo"}) 63 | Dump.run(["-r", to_string(Repo)]) 64 | assert_received {:mix_shell, :info, [msg]} 65 | assert msg =~ "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been dumped to foo" 66 | end 67 | 68 | test "runs the adapter structure_dump for migration_repo" do 69 | Application.put_env(:ecto_sql, Repo, migration_repo: MigrationRepo) 70 | 71 | Process.put(:structure_dump, {:ok, "foo"}) 72 | Dump.run(["-r", to_string(Repo)]) 73 | 74 | assert_received {:mix_shell, :info, 75 | [ 76 | "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been dumped to foo" <> 77 | _ 78 | ]} 79 | 80 | assert_received {:mix_shell, :info, 81 | [ 82 | "The structure for Mix.Tasks.Ecto.DumpLoadTest.MigrationRepo has been dumped to foo" <> 83 | _ 84 | ]} 85 | end 86 | 87 | test "runs the adapter structure_dump with --quiet" do 88 | Process.put(:structure_dump, {:ok, "foo"}) 89 | Dump.run(["-r", to_string(Repo), "--quiet"]) 90 | refute_received {:mix_shell, :info, [_]} 91 | end 92 | 93 | test "raises an error when structure_dump gives an unknown feedback" do 94 | Process.put(:structure_dump, {:error, :confused}) 95 | 96 | assert_raise Mix.Error, fn -> 97 | Dump.run(["-r", to_string(Repo)]) 98 | end 99 | end 100 | 101 | test "raises an error on structure_dump when the adapter doesn't define a storage" do 102 | assert_raise Mix.Error, ~r/to implement Ecto.Adapter.Structure/, fn -> 103 | Dump.run(["-r", to_string(NoStructureRepo)]) 104 | end 105 | end 106 | 107 | ## Load 108 | 109 | test "runs the adapter structure_load" do 110 | table_exists? = fn _, _ -> false end 111 | 112 | Process.put(:structure_load, {:ok, "foo"}) 113 | Load.run(["-r", to_string(Repo)], table_exists?) 114 | 115 | assert_received {:mix_shell, :info, [msg]} 116 | assert msg =~ "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been loaded from foo" 117 | end 118 | 119 | test "runs the adapter structure_load for migration_repo" do 120 | Application.put_env(:ecto_sql, Repo, migration_repo: MigrationRepo) 121 | 122 | table_exists? = fn _, _ -> false end 123 | 124 | Process.put(:structure_load, {:ok, "foo"}) 125 | Load.run(["-r", to_string(Repo)], table_exists?) 126 | 127 | assert_received {:mix_shell, :info, 128 | [ 129 | "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been loaded from foo" <> 130 | _ 131 | ]} 132 | 133 | assert_received {:mix_shell, :info, 134 | [ 135 | "The structure for Mix.Tasks.Ecto.DumpLoadTest.MigrationRepo has been loaded from foo" <> 136 | _ 137 | ]} 138 | end 139 | 140 | test "runs the adapter structure_load with --quiet" do 141 | table_exists? = fn _, _ -> false end 142 | Process.put(:structure_load, {:ok, "foo"}) 143 | Load.run(["-r", to_string(Repo), "--quiet"], table_exists?) 144 | refute_received {:mix_shell, :info, [_]} 145 | end 146 | 147 | test "skips when the database is loaded with --skip-if-loaded" do 148 | table_exists? = fn _, _ -> true end 149 | assert :ok == Load.run(["-r", to_string(Repo), "--skip-if-loaded"], table_exists?) 150 | end 151 | 152 | test "raises an error when structure_load gives an unknown feedback" do 153 | table_exists? = fn _, _ -> false end 154 | 155 | Process.put(:structure_load, {:error, :confused}) 156 | 157 | assert_raise Mix.Error, fn -> 158 | Load.run(["-r", to_string(Repo)], table_exists?) 159 | end 160 | end 161 | 162 | test "raises an error on structure_load when the adapter doesn't define a storage" do 163 | assert_raise Mix.Error, ~r/to implement Ecto.Adapter.Structure/, fn -> 164 | Load.run(["-r", to_string(NoStructureRepo)]) 165 | end 166 | end 167 | end 168 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.gen.migration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Gen.MigrationTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Support.FileHelpers 5 | import Mix.Tasks.Ecto.Gen.Migration, only: [run: 1] 6 | 7 | tmp_path = Path.join(tmp_path(), inspect(Ecto.Gen.Migration)) 8 | @migrations_path Path.join(tmp_path, "migrations") 9 | 10 | defmodule Repo do 11 | def __adapter__ do 12 | true 13 | end 14 | 15 | def config do 16 | [priv: "tmp/#{inspect(Ecto.Gen.Migration)}", otp_app: :ecto_sql] 17 | end 18 | end 19 | 20 | setup do 21 | File.rm_rf!(unquote(tmp_path)) 22 | :ok 23 | end 24 | 25 | test "generates a new migration" do 26 | [path] = run(["-r", to_string(Repo), "my_migration"]) 27 | assert Path.dirname(path) == @migrations_path 28 | assert Path.basename(path) =~ ~r/^\d{14}_my_migration\.exs$/ 29 | 30 | assert_file(path, fn file -> 31 | assert file =~ "defmodule Mix.Tasks.Ecto.Gen.MigrationTest.Repo.Migrations.MyMigration do" 32 | assert file =~ "use Ecto.Migration" 33 | assert file =~ "def change do" 34 | end) 35 | end 36 | 37 | test "generates a new migration with Custom Migration Module" do 38 | Application.put_env(:ecto_sql, :migration_module, MyCustomApp.MigrationModule) 39 | [path] = run(["-r", to_string(Repo), "my_custom_migration"]) 40 | Application.delete_env(:ecto_sql, :migration_module) 41 | assert Path.dirname(path) == @migrations_path 42 | assert Path.basename(path) =~ ~r/^\d{14}_my_custom_migration\.exs$/ 43 | 44 | assert_file(path, fn file -> 45 | assert file =~ 46 | "defmodule Mix.Tasks.Ecto.Gen.MigrationTest.Repo.Migrations.MyCustomMigration do" 47 | 48 | assert file =~ "use MyCustomApp.MigrationModule" 49 | assert file =~ "def change do" 50 | end) 51 | end 52 | 53 | test "underscores the filename when generating a migration" do 54 | run(["-r", to_string(Repo), "MyMigration"]) 55 | assert [name] = File.ls!(@migrations_path) 56 | assert name =~ ~r/^\d{14}_my_migration\.exs$/ 57 | end 58 | 59 | test "custom migrations_path" do 60 | dir = Path.join([unquote(tmp_path), "custom_migrations"]) 61 | [path] = run(["-r", to_string(Repo), "--migrations-path", dir, "custom_path"]) 62 | assert Path.dirname(path) == dir 63 | end 64 | 65 | test "raises when existing migration exists" do 66 | run(["-r", to_string(Repo), "my_migration"]) 67 | 68 | assert_raise Mix.Error, ~r"migration can't be created", fn -> 69 | run(["-r", to_string(Repo), "my_migration"]) 70 | end 71 | end 72 | 73 | test "raises when missing file" do 74 | assert_raise Mix.Error, fn -> run(["-r", to_string(Repo)]) end 75 | end 76 | end 77 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.migrate_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.MigrateTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Migrate, only: [run: 2] 5 | import Support.FileHelpers 6 | 7 | @migrations_path Path.join([tmp_path(), inspect(Ecto.Migrate), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(@migrations_path) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | 18 | Task.start_link(fn -> 19 | Process.flag(:trap_exit, true) 20 | 21 | receive do 22 | {:EXIT, _, :normal} -> :ok 23 | end 24 | end) 25 | end 26 | 27 | def stop do 28 | :ok 29 | end 30 | 31 | def __adapter__ do 32 | EctoSQL.TestAdapter 33 | end 34 | 35 | def config do 36 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 37 | end 38 | end 39 | 40 | defmodule StartedRepo do 41 | def start_link(_) do 42 | Process.put(:already_started, true) 43 | {:error, {:already_started, :whatever}} 44 | end 45 | 46 | def stop do 47 | raise "should not be called" 48 | end 49 | 50 | def __adapter__ do 51 | EctoSQL.TestAdapter 52 | end 53 | 54 | def config do 55 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 56 | end 57 | end 58 | 59 | test "runs the migrator with app_repo config" do 60 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 61 | 62 | run([], fn _, _, _, _ -> 63 | Process.put(:migrated, true) 64 | [] 65 | end) 66 | 67 | assert Process.get(:migrated) 68 | assert Process.get(:started) 69 | after 70 | Application.delete_env(:ecto, :ecto_repos) 71 | end 72 | 73 | test "runs the migrator after starting repo" do 74 | run(["-r", to_string(Repo)], fn _, _, _, _ -> 75 | Process.put(:migrated, true) 76 | [] 77 | end) 78 | 79 | assert Process.get(:migrated) 80 | assert Process.get(:started) 81 | end 82 | 83 | test "runs the migrator with the already started repo" do 84 | run(["-r", to_string(StartedRepo)], fn _, _, _, _ -> 85 | Process.put(:migrated, true) 86 | [] 87 | end) 88 | 89 | assert Process.get(:migrated) 90 | assert Process.get(:already_started) 91 | end 92 | 93 | test "runs the migrator with two repos" do 94 | run(["-r", to_string(Repo), "-r", to_string(StartedRepo)], fn _, _, _, _ -> 95 | Process.put(:migrated, true) 96 | [] 97 | end) 98 | 99 | assert Process.get(:migrated) 100 | assert Process.get(:started) 101 | assert Process.get(:already_started) 102 | end 103 | 104 | test "runs the migrator yielding the repository and migrations path" do 105 | run(["-r", to_string(Repo), "--quiet", "--prefix", "foo"], fn repo, [path], direction, opts -> 106 | assert repo == Repo 107 | refute path =~ ~r/_build/ 108 | assert direction == :up 109 | assert opts[:all] == true 110 | assert opts[:log] == false 111 | assert opts[:prefix] == "foo" 112 | [] 113 | end) 114 | 115 | assert Process.get(:started) 116 | end 117 | 118 | test "runs the migrator with --step" do 119 | run(["-r", to_string(Repo), "-n", "1"], fn repo, [path], direction, opts -> 120 | assert repo == Repo 121 | refute path =~ ~r/_build/ 122 | assert direction == :up 123 | assert opts == [repo: "Elixir.Mix.Tasks.Ecto.MigrateTest.Repo", step: 1] 124 | [] 125 | end) 126 | 127 | assert Process.get(:started) 128 | end 129 | 130 | test "raises when migrations path does not exist" do 131 | File.rm_rf!(@migrations_path) 132 | 133 | assert_raise Mix.Error, fn -> 134 | run(["-r", to_string(Repo)], fn _, _, _, _ -> [] end) 135 | end 136 | 137 | assert !Process.get(:started) 138 | end 139 | 140 | test "uses custom paths" do 141 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 142 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 143 | File.mkdir_p!(path1) 144 | File.mkdir_p!(path2) 145 | 146 | run( 147 | ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 148 | fn Repo, [^path1, ^path2], _, _ -> [] end 149 | ) 150 | end 151 | 152 | test "runs the migrator with --to_exclusive" do 153 | run(["-r", to_string(Repo), "--to-exclusive", "12345"], fn repo, [path], direction, opts -> 154 | assert repo == Repo 155 | refute path =~ ~r/_build/ 156 | assert direction == :up 157 | assert opts == [repo: "Elixir.Mix.Tasks.Ecto.MigrateTest.Repo", to_exclusive: 12345] 158 | [] 159 | end) 160 | 161 | assert Process.get(:started) 162 | end 163 | end 164 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.MigrationsTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Migrations, only: [run: 3] 5 | import Support.FileHelpers 6 | 7 | migrations_path = Path.join([tmp_path(), inspect(Ecto.Migrations), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(unquote(migrations_path)) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | 18 | Task.start_link(fn -> 19 | Process.flag(:trap_exit, true) 20 | 21 | receive do 22 | {:EXIT, _, :normal} -> :ok 23 | end 24 | end) 25 | end 26 | 27 | def stop() do 28 | :ok 29 | end 30 | 31 | def __adapter__ do 32 | EctoSQL.TestAdapter 33 | end 34 | 35 | def config do 36 | [priv: "tmp/#{inspect(Ecto.Migrations)}", otp_app: :ecto_sql] 37 | end 38 | end 39 | 40 | test "displays the up and down status for the default repo" do 41 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 42 | 43 | migrations = fn _, _, _ -> 44 | [ 45 | {:up, 0, "up_migration_0"}, 46 | {:up, 20_160_000_000_001, "up_migration_1"}, 47 | {:up, 20_160_000_000_002, "up_migration_2"}, 48 | {:up, 20_160_000_000_003, "up_migration_3"}, 49 | {:down, 20_160_000_000_004, "down_migration_1"}, 50 | {:down, 20_160_000_000_005, "down_migration_2"} 51 | ] 52 | end 53 | 54 | expected_output = """ 55 | 56 | Repo: Mix.Tasks.Ecto.MigrationsTest.Repo 57 | 58 | Status Migration ID Migration Name 59 | -------------------------------------------------- 60 | up 0 up_migration_0 61 | up 20160000000001 up_migration_1 62 | up 20160000000002 up_migration_2 63 | up 20160000000003 up_migration_3 64 | down 20160000000004 down_migration_1 65 | down 20160000000005 down_migration_2 66 | """ 67 | 68 | run([], migrations, fn i -> assert(i == expected_output) end) 69 | end 70 | 71 | test "migrations displays the up and down status for any given repo" do 72 | migrations = fn _, _, _ -> 73 | [ 74 | {:up, 20_160_000_000_001, "up_migration_1"}, 75 | {:down, 20_160_000_000_002, "down_migration_1"} 76 | ] 77 | end 78 | 79 | expected_output = """ 80 | 81 | Repo: Mix.Tasks.Ecto.MigrationsTest.Repo 82 | 83 | Status Migration ID Migration Name 84 | -------------------------------------------------- 85 | up 20160000000001 up_migration_1 86 | down 20160000000002 down_migration_1 87 | """ 88 | 89 | run(["-r", to_string(Repo)], migrations, fn i -> assert(i == expected_output) end) 90 | end 91 | 92 | test "does not run from _build" do 93 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 94 | 95 | migrations = fn repo, [path], _opts -> 96 | assert repo == Repo 97 | refute path =~ ~r/_build/ 98 | [] 99 | end 100 | 101 | run([], migrations, fn _ -> :ok end) 102 | end 103 | 104 | test "uses custom paths" do 105 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 106 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 107 | File.mkdir_p!(path1) 108 | File.mkdir_p!(path2) 109 | 110 | run( 111 | ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 112 | fn Repo, [^path1, ^path2], _opts -> [] end, 113 | fn _ -> :ok end 114 | ) 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.rollback_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.RollbackTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Rollback, only: [run: 2] 5 | import Support.FileHelpers 6 | 7 | @migrations_path Path.join([tmp_path(), inspect(Ecto.Migrate), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(@migrations_path) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | 18 | Task.start_link(fn -> 19 | Process.flag(:trap_exit, true) 20 | 21 | receive do 22 | {:EXIT, _, :normal} -> :ok 23 | end 24 | end) 25 | end 26 | 27 | def stop() do 28 | :ok 29 | end 30 | 31 | def __adapter__ do 32 | EctoSQL.TestAdapter 33 | end 34 | 35 | def config do 36 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 37 | end 38 | end 39 | 40 | defmodule StartedRepo do 41 | def start_link(_) do 42 | {:error, {:already_started, :whatever}} 43 | end 44 | 45 | def stop() do 46 | raise "should not be called" 47 | end 48 | 49 | def __adapter__ do 50 | EctoSQL.TestAdapter 51 | end 52 | 53 | def config do 54 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 55 | end 56 | end 57 | 58 | test "runs the migrator after starting repo" do 59 | run(["-r", to_string(Repo)], fn _, _, _, _ -> 60 | Process.put(:migrated, true) 61 | [] 62 | end) 63 | 64 | assert Process.get(:migrated) 65 | assert Process.get(:started) 66 | end 67 | 68 | test "runs the migrator with already started repo" do 69 | run(["-r", to_string(StartedRepo)], fn _, _, _, _ -> 70 | Process.put(:migrated, true) 71 | [] 72 | end) 73 | 74 | assert Process.get(:migrated) 75 | end 76 | 77 | test "runs the migrator yielding the repository and migrations path" do 78 | run(["-r", to_string(Repo), "--prefix", "foo"], fn repo, [path], direction, opts -> 79 | assert repo == Repo 80 | refute path =~ ~r/_build/ 81 | assert direction == :down 82 | assert opts[:step] == 1 83 | assert opts[:prefix] == "foo" 84 | [] 85 | end) 86 | 87 | assert Process.get(:started) 88 | end 89 | 90 | test "raises when migrations path does not exist" do 91 | File.rm_rf!(@migrations_path) 92 | 93 | assert_raise Mix.Error, fn -> 94 | run(["-r", to_string(Repo)], fn _, _, _, _ -> [] end) 95 | end 96 | 97 | assert !Process.get(:started) 98 | end 99 | 100 | test "uses custom paths" do 101 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 102 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 103 | File.mkdir_p!(path1) 104 | File.mkdir_p!(path2) 105 | 106 | run( 107 | ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 108 | fn Repo, [^path1, ^path2], _, _ -> [] end 109 | ) 110 | end 111 | 112 | test "runs the migrator with --to_exclusive" do 113 | run(["-r", to_string(Repo), "--to-exclusive", "12345"], fn repo, [path], direction, opts -> 114 | assert repo == Repo 115 | refute path =~ ~r/_build/ 116 | assert direction == :down 117 | assert opts == [repo: "Elixir.Mix.Tasks.Ecto.RollbackTest.Repo", to_exclusive: 12345] 118 | [] 119 | end) 120 | 121 | assert Process.get(:started) 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /test/support/connection_helpers.exs: -------------------------------------------------------------------------------- 1 | defmodule Support.ConnectionHelpers do 2 | @doc """ 3 | Reduces and intersperses a list in one pass. 4 | """ 5 | def intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) 6 | 7 | def intersperse_reduce([], _separator, user_acc, _reducer, acc), 8 | do: {acc, user_acc} 9 | 10 | def intersperse_reduce([elem], _separator, user_acc, reducer, acc) do 11 | {elem, user_acc} = reducer.(elem, user_acc) 12 | {[acc | elem], user_acc} 13 | end 14 | 15 | def intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do 16 | {elem, user_acc} = reducer.(elem, user_acc) 17 | intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /test/support/test_repo.exs: -------------------------------------------------------------------------------- 1 | defmodule MigrationsAgent do 2 | use Agent 3 | 4 | def start_link(versions) do 5 | Agent.start_link(fn -> versions end, name: __MODULE__) 6 | end 7 | 8 | def get do 9 | Agent.get(__MODULE__, & &1) 10 | end 11 | 12 | def up(version, opts) do 13 | Agent.update(__MODULE__, &[{version, opts[:prefix]} | &1]) 14 | end 15 | 16 | def down(version, opts) do 17 | Agent.update(__MODULE__, &List.delete(&1, {version, opts[:prefix]})) 18 | end 19 | end 20 | 21 | defmodule EctoSQL.TestAdapter do 22 | @behaviour Ecto.Adapter 23 | @behaviour Ecto.Adapter.Queryable 24 | @behaviour Ecto.Adapter.Schema 25 | @behaviour Ecto.Adapter.Transaction 26 | @behaviour Ecto.Adapter.Migration 27 | 28 | defmacro __before_compile__(_opts), do: :ok 29 | def ensure_all_started(_, _), do: {:ok, []} 30 | 31 | def init(_opts) do 32 | child_spec = Supervisor.child_spec({Task, fn -> :timer.sleep(:infinity) end}, []) 33 | {:ok, child_spec, %{meta: :meta}} 34 | end 35 | 36 | def checkout(_, _, _), do: raise("not implemented") 37 | def checked_out?(_), do: raise("not implemented") 38 | def delete(_, _, _, _, _), do: raise("not implemented") 39 | def insert_all(_, _, _, _, _, _, _, _), do: raise("not implemented") 40 | def rollback(_, _), do: raise("not implemented") 41 | def stream(_, _, _, _, _), do: raise("not implemented") 42 | def update(_, _, _, _, _, _), do: raise("not implemented") 43 | 44 | ## Types 45 | 46 | def loaders(_primitive, type), do: [type] 47 | def dumpers(_primitive, type), do: [type] 48 | def autogenerate(_), do: nil 49 | 50 | ## Queryable 51 | 52 | def prepare(operation, query), do: {:nocache, {operation, query}} 53 | 54 | # Migration emulation 55 | 56 | def execute(_, _, {:nocache, {:all, query}}, _, opts) do 57 | %{from: %{source: {"schema_migrations", _}}} = query 58 | :schema_migration = opts[:ecto_query] 59 | versions = MigrationsAgent.get() 60 | {length(versions), Enum.map(versions, &[elem(&1, 0)])} 61 | end 62 | 63 | def execute(_, _, {:nocache, {:delete_all, query}}, params, opts) do 64 | %{from: %{source: {"schema_migrations", _}}} = query 65 | [version] = params 66 | :schema_migration = opts[:ecto_query] 67 | MigrationsAgent.down(version, opts) 68 | {1, nil} 69 | end 70 | 71 | def insert(_, %{source: "schema_migrations"}, val, _, _, opts) do 72 | :schema_migration = opts[:ecto_query] 73 | version = Keyword.fetch!(val, :version) 74 | MigrationsAgent.up(version, opts) 75 | {:ok, []} 76 | end 77 | 78 | def in_transaction?(_), do: Process.get(:in_transaction?) || false 79 | 80 | def transaction(mod, _opts, fun) do 81 | Process.put(:in_transaction?, true) 82 | send(test_process(), {:transaction, mod, fun}) 83 | {:ok, fun.()} 84 | after 85 | Process.put(:in_transaction?, false) 86 | end 87 | 88 | ## Migrations 89 | 90 | def lock_for_migrations(mod, opts, fun) do 91 | send(test_process(), {:lock_for_migrations, mod, fun, opts}) 92 | fun.() 93 | end 94 | 95 | def execute_ddl(_, command, _) do 96 | Process.put(:last_command, command) 97 | {:ok, [{:info, "execute ddl", %{command: command}}]} 98 | end 99 | 100 | def supports_ddl_transaction? do 101 | get_config(:supports_ddl_transaction?, false) 102 | end 103 | 104 | defp test_process do 105 | get_config(:test_process, self()) 106 | end 107 | 108 | defp get_config(name, default) do 109 | :ecto_sql 110 | |> Application.get_env(__MODULE__, []) 111 | |> Keyword.get(name, default) 112 | end 113 | end 114 | 115 | defmodule EctoSQL.TestRepo do 116 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 117 | 118 | def default_options(_operation) do 119 | Process.get(:repo_default_options, []) 120 | end 121 | end 122 | 123 | defmodule EctoSQL.MigrationTestRepo do 124 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 125 | end 126 | 127 | EctoSQL.TestRepo.start_link() 128 | EctoSQL.TestRepo.start_link(name: :tenant_db) 129 | EctoSQL.MigrationTestRepo.start_link() 130 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | # For tasks/generators testing 2 | Mix.start() 3 | Mix.shell(Mix.Shell.Process) 4 | System.put_env("ECTO_EDITOR", "") 5 | Logger.configure(level: :info) 6 | 7 | Code.require_file("support/test_repo.exs", __DIR__) 8 | Code.require_file("../integration_test/support/file_helpers.exs", __DIR__) 9 | ExUnit.start() 10 | 11 | if function_exported?(ExUnit, :after_suite, 1) do 12 | ExUnit.after_suite(fn _ -> Mix.shell(Mix.Shell.IO) end) 13 | end 14 | --------------------------------------------------------------------------------