├── .gitignore ├── bench ├── bench_helper.exs ├── config.yml ├── support │ ├── migrations.exs │ ├── setup.exs │ ├── repo.exs │ └── schemas.exs ├── README.md └── scripts │ ├── macro │ ├── insert_bench.exs │ └── all_bench.exs │ └── micro │ ├── load_bench.exs │ └── to_sql_bench.exs ├── test ├── test_helper.exs ├── mix │ ├── ecto_sql_test.exs │ └── tasks │ │ ├── ecto.gen.migration_test.exs │ │ ├── ecto.rollback_test.exs │ │ ├── ecto.migrations_test.exs │ │ ├── ecto.migrate_test.exs │ │ └── ecto.dump_load_test.exs ├── ecto │ ├── type_test.exs │ ├── tenant_migrator_test.exs │ └── migrator_repo_test.exs └── test_repo.exs ├── lib ├── ecto │ ├── adapters │ │ ├── sql │ │ │ ├── application.ex │ │ │ ├── stream.ex │ │ │ └── connection.ex │ │ ├── mysql.ex │ │ └── tds │ │ │ └── types.ex │ ├── adapter │ │ ├── structure.ex │ │ └── migration.ex │ └── migration │ │ └── schema_migration.ex └── mix │ ├── ecto_sql.ex │ └── tasks │ ├── ecto.dump.ex │ ├── ecto.migrations.ex │ ├── ecto.load.ex │ ├── ecto.gen.migration.ex │ ├── ecto.rollback.ex │ └── ecto.migrate.ex ├── .formatter.exs ├── integration_test ├── pg │ ├── copy_test.exs │ ├── all_test.exs │ ├── explain_test.exs │ ├── test_helper.exs │ ├── migrations_test.exs │ ├── transaction_test.exs │ ├── constraints_test.exs │ └── storage_test.exs ├── tds │ ├── tds_type_test.exs │ ├── all_test.exs │ ├── explain_test.exs │ ├── lock_test.exs │ ├── storage_test.exs │ ├── constraints_test.exs │ ├── migrations_test.exs │ └── test_helper.exs ├── support │ ├── repo.exs │ ├── file_helpers.exs │ └── migration.exs ├── myxql │ ├── myxql_type_test.exs │ ├── all_test.exs │ ├── explain_test.exs │ ├── upsert_all_test.exs │ ├── migrations_test.exs │ ├── test_helper.exs │ └── storage_test.exs └── sql │ ├── stream.exs │ ├── lock.exs │ ├── logging.exs │ ├── alter.exs │ ├── subquery.exs │ ├── sql.exs │ ├── migrator.exs │ └── transaction.exs ├── .github ├── ISSUE_TEMPLATE.md └── workflows │ └── ci.yml ├── README.md ├── mix.lock ├── Earthfile └── mix.exs /.gitignore: -------------------------------------------------------------------------------- 1 | /_build 2 | /bench/results 3 | /deps 4 | /doc 5 | /tmp 6 | erl_crash.dump 7 | -------------------------------------------------------------------------------- /bench/bench_helper.exs: -------------------------------------------------------------------------------- 1 | # Micro benchmarks 2 | Code.require_file("scripts/micro/load_bench.exs", __DIR__) 3 | Code.require_file("scripts/micro/to_sql_bench.exs", __DIR__) 4 | 5 | ## Macro benchmarks needs postgresql and mysql up and running 6 | Code.require_file("scripts/macro/insert_bench.exs", __DIR__) 7 | Code.require_file("scripts/macro/all_bench.exs", __DIR__) 8 | -------------------------------------------------------------------------------- /bench/config.yml: -------------------------------------------------------------------------------- 1 | elixir: 1.5.2 2 | erlang: 20.1.2 3 | environment: 4 | PG_URL: postgres:postgres@localhost 5 | MYXQL_URL: root@localhost 6 | deps: 7 | docker: 8 | - container_name: postgres 9 | image: postgres:9.6.6-alpine 10 | - container_name: mysql 11 | image: mysql:5.7.20 12 | environment: 13 | MYSQL_ALLOW_EMPTY_PASSWORD: "true" 14 | -------------------------------------------------------------------------------- /bench/support/migrations.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Bench.CreateUser do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:users) do 6 | add(:name, :string) 7 | add(:email, :string) 8 | add(:password, :string) 9 | add(:time_attr, :time) 10 | add(:date_attr, :date) 11 | add(:naive_datetime_attr, :naive_datetime) 12 | add(:uuid, :binary_id) 13 | end 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | # For tasks/generators testing 2 | Mix.start() 3 | Mix.shell(Mix.Shell.Process) 4 | System.put_env("ECTO_EDITOR", "") 5 | Logger.configure(level: :info) 6 | 7 | Code.require_file "test_repo.exs", __DIR__ 8 | Code.require_file "../integration_test/support/file_helpers.exs", __DIR__ 9 | ExUnit.start() 10 | 11 | if function_exported?(ExUnit, :after_suite, 1) do 12 | ExUnit.after_suite(fn _ -> Mix.shell(Mix.Shell.IO) end) 13 | end 14 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Application do 2 | @moduledoc false 3 | use Application 4 | 5 | def start(_type, _args) do 6 | children = [ 7 | {DynamicSupervisor, strategy: :one_for_one, name: Ecto.MigratorSupervisor}, 8 | {Task.Supervisor, name: Ecto.Adapters.SQL.StorageSupervisor}, 9 | ] 10 | 11 | opts = [strategy: :one_for_one, name: Ecto.Adapters.SQL.Supervisor] 12 | Supervisor.start_link(children, opts) 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /test/mix/ecto_sql_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.EctoSQLTest do 2 | use ExUnit.Case, async: true 3 | import Mix.EctoSQL 4 | 5 | defmodule Repo do 6 | def config do 7 | [priv: Process.get(:priv), otp_app: :ecto_sql] 8 | end 9 | end 10 | 11 | test "source_priv_repo" do 12 | Process.put(:priv, nil) 13 | assert source_repo_priv(Repo) == Path.expand("priv/repo", File.cwd!()) 14 | Process.put(:priv, "hello") 15 | assert source_repo_priv(Repo) == Path.expand("hello", File.cwd!()) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/ecto/adapters/mysql.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.MySQL do 2 | @moduledoc false 3 | 4 | @behaviour Ecto.Adapter 5 | 6 | defp error!() do 7 | raise "Ecto.Adapters.MySQL is obsolete, use Ecto.Adapters.MyXQL instead" 8 | end 9 | 10 | defmacro __before_compile__(_env), do: error!() 11 | 12 | def ensure_all_started(_, _), do: error!() 13 | 14 | def init(_), do: error!() 15 | 16 | def checkout(_, _, _), do: error!() 17 | 18 | def checked_out?(_), do: error!() 19 | 20 | def loaders(_, _), do: error!() 21 | 22 | def dumpers(_, _), do: error!() 23 | end 24 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | locals_without_parens = [ 2 | add: 2, 3 | add: 3, 4 | alter: 2, 5 | create: 1, 6 | create: 2, 7 | create_if_not_exists: 1, 8 | create_if_not_exists: 2, 9 | drop: 1, 10 | drop_if_exists: 1, 11 | execute: 1, 12 | execute: 2, 13 | modify: 2, 14 | modify: 3, 15 | remove: 1, 16 | remove: 2, 17 | remove: 3, 18 | rename: 2, 19 | rename: 3, 20 | timestamps: 1 21 | ] 22 | 23 | [ 24 | import_deps: [:ecto], 25 | locals_without_parens: locals_without_parens, 26 | export: [ 27 | locals_without_parens: locals_without_parens 28 | ], 29 | inputs: [] 30 | ] 31 | -------------------------------------------------------------------------------- /integration_test/pg/copy_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.CopyTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | 7 | test "copy to and from table" do 8 | read = Ecto.Adapters.SQL.stream(TestRepo, "COPY posts TO STDOUT") 9 | write = Ecto.Adapters.SQL.stream(TestRepo, "COPY posts FROM STDIN") 10 | 11 | TestRepo.transaction fn -> 12 | one = TestRepo.insert!(%Post{title: "one"}) 13 | two = TestRepo.insert!(%Post{title: "two"}) 14 | 15 | data = Enum.map(read, &(&1.rows)) 16 | assert TestRepo.delete_all(Post) == {2, nil} 17 | 18 | assert ^write = Enum.into(data, write) 19 | assert TestRepo.all(Post) == [one, two] 20 | end 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /integration_test/tds/tds_type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TdsTypeTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Type 5 | alias Tds.Ecto.VarChar 6 | alias Ecto.Adapters.Tds 7 | 8 | @varchar_string "some string" 9 | 10 | test "dumps through the adapter" do 11 | assert adapter_dump(Tds, {:map, VarChar}, %{"a" => @varchar_string}) == 12 | {:ok, %{"a" => @varchar_string}} 13 | end 14 | 15 | test "loads through the adapter" do 16 | assert adapter_load(Tds, {:map, VarChar}, %{"a" => {@varchar_string, :varchar}}) == 17 | {:ok, %{"a" => @varchar_string}} 18 | 19 | assert adapter_load(Tds, {:map, VarChar}, %{"a" => @varchar_string}) == 20 | {:ok, %{"a" => @varchar_string}} 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /integration_test/support/repo.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.Repo do 2 | defmacro __using__(opts) do 3 | quote do 4 | use Ecto.Repo, unquote(opts) 5 | 6 | @query_event __MODULE__ 7 | |> Module.split() 8 | |> Enum.map(& &1 |> Macro.underscore() |> String.to_atom()) 9 | |> Kernel.++([:query]) 10 | 11 | def init(_, opts) do 12 | fun = &Ecto.Integration.Repo.handle_event/4 13 | :telemetry.attach_many(__MODULE__, [[:custom], @query_event], fun, :ok) 14 | {:ok, opts} 15 | end 16 | end 17 | end 18 | 19 | def handle_event(event, latency, metadata, _config) do 20 | handler = Process.delete(:telemetry) || fn _, _, _ -> :ok end 21 | handler.(event, latency, metadata) 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /bench/support/setup.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("repo.exs", __DIR__) 2 | Code.require_file("migrations.exs", __DIR__) 3 | Code.require_file("schemas.exs", __DIR__) 4 | 5 | alias Ecto.Bench.{PgRepo, MyXQLRepo, CreateUser} 6 | 7 | {:ok, _} = Ecto.Adapters.Postgres.ensure_all_started(PgRepo.config(), :temporary) 8 | {:ok, _} = Ecto.Adapters.MyXQL.ensure_all_started(MyXQLRepo.config(), :temporary) 9 | 10 | _ = Ecto.Adapters.Postgres.storage_down(PgRepo.config()) 11 | :ok = Ecto.Adapters.Postgres.storage_up(PgRepo.config()) 12 | 13 | _ = Ecto.Adapters.MyXQL.storage_down(MyXQLRepo.config()) 14 | :ok = Ecto.Adapters.MyXQL.storage_up(MyXQLRepo.config()) 15 | 16 | {:ok, _pid} = PgRepo.start_link(log: false) 17 | {:ok, _pid} = MyXQLRepo.start_link(log: false) 18 | 19 | :ok = Ecto.Migrator.up(PgRepo, 0, CreateUser, log: false) 20 | :ok = Ecto.Migrator.up(MyXQLRepo, 0, CreateUser, log: false) 21 | -------------------------------------------------------------------------------- /bench/support/repo.exs: -------------------------------------------------------------------------------- 1 | pg_bench_url = System.get_env("PG_URL") || "postgres:postgres@localhost" 2 | myxql_bench_url = System.get_env("MYXQL_URL") || "root@localhost" 3 | 4 | Application.put_env( 5 | :ecto_sql, 6 | Ecto.Bench.PgRepo, 7 | url: "ecto://" <> pg_bench_url <> "/ecto_test", 8 | adapter: Ecto.Adapters.Postgres, 9 | show_sensitive_data_on_connection_error: true 10 | ) 11 | 12 | Application.put_env( 13 | :ecto_sql, 14 | Ecto.Bench.MyXQLRepo, 15 | url: "ecto://" <> myxql_bench_url <> "/ecto_test_myxql", 16 | adapter: Ecto.Adapters.MyXQL, 17 | protocol: :tcp, 18 | show_sensitive_data_on_connection_error: true 19 | ) 20 | 21 | defmodule Ecto.Bench.PgRepo do 22 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres, log: false 23 | end 24 | 25 | defmodule Ecto.Bench.MyXQLRepo do 26 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL, log: false 27 | end 28 | -------------------------------------------------------------------------------- /integration_test/myxql/myxql_type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MyXQLTypeTest do 2 | use Ecto.Integration.Case, async: Application.get_env(:ecto, :async_integration_tests, true) 3 | alias Ecto.Integration.TestRepo 4 | import Ecto.Query 5 | 6 | defmodule Bool do 7 | use Ecto.Schema 8 | 9 | schema "bits" do 10 | field :bit, :boolean 11 | end 12 | end 13 | 14 | test "bit" do 15 | TestRepo.insert_all("bits", [[bit: <<1::1>>], [bit: <<0::1>>]]) 16 | 17 | assert TestRepo.all(from(b in "bits", select: b.bit, order_by: [desc: :bit])) == [ 18 | <<1::1>>, 19 | <<0::1>> 20 | ] 21 | end 22 | 23 | test "bit as boolean" do 24 | TestRepo.insert_all("bits", [[bit: <<1::1>>], [bit: <<0::1>>]]) 25 | 26 | assert TestRepo.all(from(b in Bool, select: b.bit, order_by: [desc: :bit])) == [ 27 | true, 28 | false 29 | ] 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /integration_test/tds/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | 9 | Code.require_file "../sql/alter.exs", __DIR__ 10 | Code.require_file "../sql/logging.exs", __DIR__ 11 | Code.require_file "../sql/migration.exs", __DIR__ 12 | Code.require_file "../sql/migrator.exs", __DIR__ 13 | Code.require_file "../sql/sandbox.exs", __DIR__ 14 | Code.require_file "../sql/sql.exs", __DIR__ 15 | # Code.require_file "../sql/stream.exs", __DIR__ 16 | Code.require_file "../sql/subquery.exs", __DIR__ 17 | Code.require_file "../sql/transaction.exs", __DIR__ 18 | -------------------------------------------------------------------------------- /integration_test/myxql/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | 9 | Code.require_file "../sql/alter.exs", __DIR__ 10 | Code.require_file "../sql/lock.exs", __DIR__ 11 | Code.require_file "../sql/logging.exs", __DIR__ 12 | Code.require_file "../sql/migration.exs", __DIR__ 13 | Code.require_file "../sql/migrator.exs", __DIR__ 14 | Code.require_file "../sql/sandbox.exs", __DIR__ 15 | Code.require_file "../sql/sql.exs", __DIR__ 16 | Code.require_file "../sql/stream.exs", __DIR__ 17 | Code.require_file "../sql/subquery.exs", __DIR__ 18 | Code.require_file "../sql/transaction.exs", __DIR__ 19 | -------------------------------------------------------------------------------- /bench/README.md: -------------------------------------------------------------------------------- 1 | # Ecto Benchmarks 2 | 3 | Ecto has a benchmark suite to track performance of sensitive operations. Benchmarks 4 | are run using the [Benchee](https://github.com/PragTob/benchee) library and 5 | need PostgreSQL and MySQL up and running. 6 | 7 | To run the benchmarks tests just type in the console: 8 | 9 | ``` 10 | # POSIX-compatible shells 11 | $ MIX_ENV=bench mix run bench/bench_helper.exs 12 | ``` 13 | 14 | ``` 15 | # other shells 16 | $ env MIX_ENV=bench mix run bench/bench_helper.exs 17 | ``` 18 | 19 | Benchmarks are inside the `scripts/` directory and are divided into two 20 | categories: 21 | 22 | * `micro benchmarks`: Operations that don't actually interface with the database, 23 | but might need it up and running to start the Ecto agents and processes. 24 | 25 | * `macro benchmarks`: Operations that are actually run in the database. This are 26 | more likely to integration tests. 27 | 28 | You can also run a benchmark individually by giving the path to the benchmark 29 | script instead of `bench/bench_helper.exs`. 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Precheck 2 | 3 | * Do not use the issues tracker for help or support requests (try Elixir Forum, Stack Overflow, IRC or mailing lists, etc). 4 | * For proposing a new feature, please start a discussion on [elixir-ecto](https://groups.google.com/forum/#!forum/elixir-ecto). 5 | * For bugs, do a quick search and make sure the bug has not yet been reported. 6 | * Finally, be nice and have fun! 7 | 8 | ### Environment 9 | 10 | * Elixir version (elixir -v): 11 | * Database and version (PostgreSQL 9.4, MongoDB 3.2, etc.): 12 | * Ecto version (mix deps): 13 | * Database adapter and version (mix deps): 14 | * Operating system: 15 | 16 | ### Current behavior 17 | 18 | Include code samples, errors and stacktraces if appropriate. 19 | 20 | If you can, please provide an example repository that reproduces the error that maintainers can clone and check locally. This will speed-up the process tremendously - it's very likely you'll be asked to provide one anyway. 21 | 22 | ### Expected behavior 23 | 24 | Describe expected behavior. 25 | -------------------------------------------------------------------------------- /integration_test/pg/all_test.exs: -------------------------------------------------------------------------------- 1 | ecto = Mix.Project.deps_paths()[:ecto] 2 | Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ 3 | Code.require_file "#{ecto}/integration_test/cases/interval.exs", __DIR__ 4 | Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ 5 | Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ 6 | Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ 7 | Code.require_file "#{ecto}/integration_test/cases/type.exs", __DIR__ 8 | Code.require_file "#{ecto}/integration_test/cases/windows.exs", __DIR__ 9 | 10 | Code.require_file "../sql/alter.exs", __DIR__ 11 | Code.require_file "../sql/lock.exs", __DIR__ 12 | Code.require_file "../sql/logging.exs", __DIR__ 13 | Code.require_file "../sql/migration.exs", __DIR__ 14 | Code.require_file "../sql/migrator.exs", __DIR__ 15 | Code.require_file "../sql/sandbox.exs", __DIR__ 16 | Code.require_file "../sql/sql.exs", __DIR__ 17 | Code.require_file "../sql/stream.exs", __DIR__ 18 | Code.require_file "../sql/subquery.exs", __DIR__ 19 | Code.require_file "../sql/transaction.exs", __DIR__ 20 | -------------------------------------------------------------------------------- /integration_test/support/file_helpers.exs: -------------------------------------------------------------------------------- 1 | defmodule Support.FileHelpers do 2 | import ExUnit.Assertions 3 | 4 | @doc """ 5 | Returns the `tmp_path` for tests. 6 | """ 7 | def tmp_path do 8 | Path.expand("../../tmp", __DIR__) 9 | end 10 | 11 | @doc """ 12 | Executes the given function in a temp directory 13 | tailored for this test case and test. 14 | """ 15 | defmacro in_tmp(fun) do 16 | path = Path.join([tmp_path(), "#{__CALLER__.module}", "#{elem(__CALLER__.function, 0)}"]) 17 | quote do 18 | path = unquote(path) 19 | File.rm_rf!(path) 20 | File.mkdir_p!(path) 21 | File.cd!(path, fn -> unquote(fun).(path) end) 22 | end 23 | end 24 | 25 | @doc """ 26 | Asserts a file was generated. 27 | """ 28 | def assert_file(file) do 29 | assert File.regular?(file), "Expected #{file} to exist, but does not" 30 | end 31 | 32 | @doc """ 33 | Asserts a file was generated and that it matches a given pattern. 34 | """ 35 | def assert_file(file, callback) when is_function(callback, 1) do 36 | assert_file(file) 37 | callback.(File.read!(file)) 38 | end 39 | 40 | def assert_file(file, match) do 41 | assert_file file, &(assert &1 =~ match) 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /integration_test/tds/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | describe "explain" do 9 | test "select" do 10 | explain = TestRepo.explain(:all, from(p in Post, where: p.title == "explain_test", limit: 1)) 11 | assert explain =~ "| Rows | Executes |" 12 | assert explain =~ "| Parallel | EstimateExecutions |" 13 | assert explain =~ "SELECT TOP(1)" 14 | assert explain =~ "explain_test" 15 | end 16 | 17 | test "delete" do 18 | explain = TestRepo.explain(:delete_all, Post) 19 | assert explain =~ "DELETE" 20 | assert explain =~ "p0" 21 | end 22 | 23 | test "update" do 24 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 25 | assert explain =~ "UPDATE" 26 | assert explain =~ "p0" 27 | assert explain =~ "new title" 28 | end 29 | 30 | test "invalid" do 31 | assert_raise(Tds.Error, fn -> 32 | TestRepo.explain(:all, from(p in "posts", select: p.invalid, where: p.invalid == "title")) 33 | end) 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/stream.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Stream do 2 | @moduledoc false 3 | 4 | defstruct [:meta, :statement, :params, :opts] 5 | 6 | def build(meta, statement, params, opts) do 7 | %__MODULE__{meta: meta, statement: statement, params: params, opts: opts} 8 | end 9 | end 10 | 11 | alias Ecto.Adapters.SQL.Stream 12 | 13 | defimpl Enumerable, for: Stream do 14 | def count(_), do: {:error, __MODULE__} 15 | 16 | def member?(_, _), do: {:error, __MODULE__} 17 | 18 | def slice(_), do: {:error, __MODULE__} 19 | 20 | def reduce(stream, acc, fun) do 21 | %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream 22 | Ecto.Adapters.SQL.reduce(meta, statement, params, opts, acc, fun) 23 | end 24 | end 25 | 26 | defimpl Collectable, for: Stream do 27 | def into(stream) do 28 | %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream 29 | {state, fun} = Ecto.Adapters.SQL.into(meta, statement, params, opts) 30 | {state, make_into(fun, stream)} 31 | end 32 | 33 | defp make_into(fun, stream) do 34 | fn 35 | state, :done -> 36 | fun.(state, :done) 37 | stream 38 | 39 | state, acc -> 40 | fun.(state, acc) 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /integration_test/myxql/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | describe "explain" do 9 | test "select" do 10 | explain = TestRepo.explain(:all, from(p in Post, where: p.title == "title"), timeout: 20000) 11 | 12 | assert explain =~ 13 | "| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |" 14 | 15 | assert explain =~ "p0" 16 | assert explain =~ "SIMPLE" 17 | assert explain =~ "Using where" 18 | end 19 | 20 | test "delete" do 21 | explain = TestRepo.explain(:delete_all, Post) 22 | assert explain =~ "DELETE" 23 | assert explain =~ "p0" 24 | end 25 | 26 | test "update" do 27 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 28 | assert explain =~ "UPDATE" 29 | assert explain =~ "p0" 30 | end 31 | 32 | test "invalid" do 33 | assert_raise(MyXQL.Error, fn -> 34 | TestRepo.explain(:all, from(p in "posts", select: p.invalid, where: p.invalid == "title")) 35 | end) 36 | end 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /bench/support/schemas.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Bench.User do 2 | use Ecto.Schema 3 | 4 | schema "users" do 5 | field(:name, :string) 6 | field(:email, :string) 7 | field(:password, :string) 8 | field(:time_attr, :time) 9 | field(:date_attr, :date) 10 | field(:naive_datetime_attr, :naive_datetime) 11 | field(:uuid, :binary_id) 12 | end 13 | 14 | @required_attrs [ 15 | :name, 16 | :email, 17 | :password, 18 | :time_attr, 19 | :date_attr, 20 | :naive_datetime_attr, 21 | :uuid 22 | ] 23 | 24 | def changeset() do 25 | changeset(sample_data()) 26 | end 27 | 28 | def changeset(data) do 29 | Ecto.Changeset.cast(%__MODULE__{}, data, @required_attrs) 30 | end 31 | 32 | def sample_data do 33 | %{ 34 | name: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", 35 | email: "foobar@email.com", 36 | password: "mypass", 37 | time_attr: Time.utc_now() |> Time.truncate(:second), 38 | date_attr: Date.utc_today(), 39 | naive_datetime_attr: NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second), 40 | uuid: Ecto.UUID.generate() 41 | } 42 | end 43 | end 44 | 45 | defmodule Ecto.Bench.Game do 46 | use Ecto.Schema 47 | 48 | schema "games" do 49 | field(:name, :string) 50 | field(:price, :float) 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/ecto/adapter/structure.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapter.Structure do 2 | @moduledoc """ 3 | Specifies the adapter structure (dump/load) API. 4 | """ 5 | 6 | @doc """ 7 | Dumps the given structure. 8 | 9 | The path will be looked in the `config` under :dump_path or 10 | default to the structure path inside `default`. 11 | 12 | Returns `:ok` if it was dumped successfully, an error tuple otherwise. 13 | 14 | ## Examples 15 | 16 | structure_dump("priv/repo", username: "postgres", 17 | database: "ecto_test", 18 | hostname: "localhost") 19 | 20 | """ 21 | @callback structure_dump(default :: String.t, config :: Keyword.t) :: 22 | {:ok, String.t} | {:error, term} 23 | 24 | @doc """ 25 | Loads the given structure. 26 | 27 | The path will be looked in the `config` under :dump_path or 28 | default to the structure path inside `default`. 29 | 30 | Returns `:ok` if it was loaded successfully, an error tuple otherwise. 31 | 32 | ## Examples 33 | 34 | structure_load("priv/repo", username: "postgres", 35 | database: "ecto_test", 36 | hostname: "localhost") 37 | 38 | """ 39 | @callback structure_load(default :: String.t, config :: Keyword.t) :: 40 | {:ok, String.t} | {:error, term} 41 | end 42 | -------------------------------------------------------------------------------- /integration_test/sql/stream.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.StreamTest do 2 | use Ecto.Integration.Case, async: Application.get_env(:ecto, :async_integration_tests, true) 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | alias Ecto.Integration.Comment 7 | import Ecto.Query 8 | 9 | test "stream empty" do 10 | assert {:ok, []} = TestRepo.transaction(fn() -> 11 | TestRepo.stream(Post) 12 | |> Enum.to_list() 13 | end) 14 | 15 | assert {:ok, []} = TestRepo.transaction(fn() -> 16 | TestRepo.stream(from p in Post) 17 | |> Enum.to_list() 18 | end) 19 | end 20 | 21 | test "stream without schema" do 22 | %Post{} = TestRepo.insert!(%Post{title: "title1"}) 23 | %Post{} = TestRepo.insert!(%Post{title: "title2"}) 24 | 25 | assert {:ok, ["title1", "title2"]} = TestRepo.transaction(fn() -> 26 | TestRepo.stream(from(p in "posts", order_by: p.title, select: p.title)) 27 | |> Enum.to_list() 28 | end) 29 | end 30 | 31 | test "stream with assoc" do 32 | p1 = TestRepo.insert!(%Post{title: "1"}) 33 | 34 | %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) 35 | %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) 36 | 37 | stream = TestRepo.stream(Ecto.assoc(p1, :comments)) 38 | assert {:ok, [c1, c2]} = TestRepo.transaction(fn() -> 39 | Enum.to_list(stream) 40 | end) 41 | assert c1.id == cid1 42 | assert c2.id == cid2 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /integration_test/tds/lock_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LockTest do 2 | # We can keep this test async as long as it 3 | # is the only one accessing the lock_test table. 4 | use ExUnit.Case, async: true 5 | 6 | import Ecto.Query 7 | alias Ecto.Integration.PoolRepo 8 | 9 | defmodule LockCounter do 10 | use Ecto.Schema 11 | 12 | schema "lock_counters" do 13 | field :count, :integer 14 | end 15 | end 16 | 17 | setup do 18 | PoolRepo.delete_all(LockCounter) 19 | :ok 20 | end 21 | 22 | test "hints for update" do 23 | %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) 24 | pid = self() 25 | query = from(lc in LockCounter, hints: ["UPDLOCK"], where: lc.id == ^id) 26 | 27 | {:ok, new_pid} = 28 | Task.start_link fn -> 29 | assert_receive :select_for_update, 5000 30 | 31 | PoolRepo.transaction(fn -> 32 | [post] = PoolRepo.all(query) # this should block until the other trans. commit 33 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 34 | end) 35 | 36 | send pid, :updated 37 | end 38 | 39 | PoolRepo.transaction(fn -> 40 | [post] = PoolRepo.all(query) # select and lock the row 41 | send new_pid, :select_for_update # signal second process to begin a transaction 42 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 43 | end) 44 | 45 | assert_receive :updated, 5000 46 | 47 | # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise 48 | assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /lib/mix/ecto_sql.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.EctoSQL do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Ensures the given repository's migrations paths exists on the file system. 6 | """ 7 | @spec ensure_migrations_paths(Ecto.Repo.t, Keyword.t) :: [String.t] 8 | def ensure_migrations_paths(repo, opts) do 9 | paths = Keyword.get_values(opts, :migrations_path) 10 | paths = if paths == [], do: [Path.join(source_repo_priv(repo), "migrations")], else: paths 11 | 12 | if not Mix.Project.umbrella?() do 13 | for path <- paths, not File.dir?(path) do 14 | raise_missing_migrations(Path.relative_to_cwd(path), repo) 15 | end 16 | end 17 | 18 | paths 19 | end 20 | 21 | defp raise_missing_migrations(path, repo) do 22 | Mix.raise """ 23 | Could not find migrations directory #{inspect path} 24 | for repo #{inspect repo}. 25 | 26 | This may be because you are in a new project and the 27 | migration directory has not been created yet. Creating an 28 | empty directory at the path above will fix this error. 29 | 30 | If you expected existing migrations to be found, please 31 | make sure your repository has been properly configured 32 | and the configured path exists. 33 | """ 34 | end 35 | 36 | @doc """ 37 | Returns the private repository path relative to the source. 38 | """ 39 | def source_repo_priv(repo) do 40 | config = repo.config() 41 | priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" 42 | app = Keyword.fetch!(config, :otp_app) 43 | Path.join(Mix.Project.deps_paths()[app] || File.cwd!(), priv) 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /integration_test/pg/explain_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ExplainTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Post 6 | import Ecto.Query, only: [from: 2] 7 | 8 | test "explain" do 9 | explain = TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000) 10 | assert explain =~ "cost=" 11 | assert explain =~ "actual time=" 12 | assert explain =~ "loops=" 13 | assert explain =~ "Output:" 14 | assert explain =~ ~r/Planning [T|t]ime:/ 15 | assert explain =~ ~r/Execution [T|t]ime:/ 16 | 17 | explain = TestRepo.explain(:delete_all, Post) 18 | assert explain =~ "Delete on posts p0" 19 | assert explain =~ "cost=" 20 | 21 | explain = TestRepo.explain(:update_all, from(p in Post, update: [set: [title: "new title"]])) 22 | assert explain =~ "Update on posts p0" 23 | assert explain =~ "cost=" 24 | 25 | assert_raise(ArgumentError, "bad boolean value 1", fn -> 26 | TestRepo.explain(:all, Post, analyze: "1") 27 | end) 28 | end 29 | 30 | test "explain MAP format" do 31 | [explain] = TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000, format: :map) 32 | keys = explain["Plan"] |> Map.keys 33 | assert Enum.member?(keys, "Actual Loops") 34 | assert Enum.member?(keys, "Actual Rows") 35 | assert Enum.member?(keys, "Actual Startup Time") 36 | end 37 | 38 | test "explain YAML format" do 39 | explain = TestRepo.explain(:all, Post, analyze: true, verbose: true, timeout: 20000, format: :yaml) 40 | assert explain =~ ~r/Plan:/ 41 | assert explain =~ ~r/Node Type:/ 42 | assert explain =~ ~r/Relation Name:/ 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /integration_test/myxql/upsert_all_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.UpsertAllTest do 2 | use Ecto.Integration.Case 3 | 4 | alias Ecto.Integration.TestRepo 5 | import Ecto.Query 6 | alias Ecto.Integration.Post 7 | 8 | test "on conflict raise" do 9 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 10 | {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: :raise) 11 | assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :raise)) 12 | end 13 | 14 | test "on conflict ignore" do 15 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 16 | assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == 17 | {1, nil} 18 | assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == 19 | {1, nil} 20 | end 21 | 22 | test "on conflict keyword list" do 23 | on_conflict = [set: [title: "second"]] 24 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 25 | {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict) 26 | 27 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 28 | {2, nil} 29 | assert TestRepo.all(from p in Post, select: p.title) == ["second"] 30 | end 31 | 32 | test "on conflict query and conflict target" do 33 | on_conflict = from Post, update: [set: [title: "second"]] 34 | post = [title: "first", uuid: "6fa459ea-ee8a-3ca4-894e-db77e160355e"] 35 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 36 | {1, nil} 37 | 38 | assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict) == 39 | {2, nil} 40 | assert TestRepo.all(from p in Post, select: p.title) == ["second"] 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /bench/scripts/macro/insert_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of inserting changesets and structs in the different 3 | # supported databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of inserting changesets and structs in the 7 | # database with Repo.insert!/1 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # returning the result of the transaction. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # Different inputs to be inserted, aka Changesets and Structs 17 | 18 | Code.require_file("../../support/setup.exs", __DIR__) 19 | 20 | alias Ecto.Bench.User 21 | 22 | inputs = %{ 23 | "Struct" => struct(User, User.sample_data()), 24 | "Changeset" => User.changeset(User.sample_data()) 25 | } 26 | 27 | jobs = %{ 28 | "Pg Insert" => fn entry -> Ecto.Bench.PgRepo.insert!(entry) end, 29 | "MyXQL Insert" => fn entry -> Ecto.Bench.MyXQLRepo.insert!(entry) end 30 | } 31 | 32 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 33 | file = Path.join(path, "insert.json") 34 | 35 | Benchee.run( 36 | jobs, 37 | inputs: inputs, 38 | formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], 39 | formatter_options: [json: [file: file]] 40 | ) 41 | 42 | # Clean inserted data 43 | Ecto.Bench.PgRepo.delete_all(User) 44 | Ecto.Bench.MyXQLRepo.delete_all(User) 45 | -------------------------------------------------------------------------------- /test/ecto/type_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.TypeTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Type 5 | alias Ecto.Adapters.{MyXQL, Postgres, Tds} 6 | 7 | @uuid_string "bfe0888c-5c59-4bb3-adfd-71f0b85d3db7" 8 | @uuid_binary <<191, 224, 136, 140, 92, 89, 75, 179, 173, 253, 113, 240, 184, 93, 61, 183>> 9 | @mssql_uuid_binary <<140, 136, 224, 191, 89, 92, 179, 75, 173, 253, 113, 240, 184, 93, 61, 183>> 10 | 11 | # We don't effectively dump because we need to keep JSON encoding 12 | test "dumps through the adapter" do 13 | assert adapter_dump(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 14 | {:ok, %{"a" => @uuid_string}} 15 | 16 | assert adapter_dump(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 17 | {:ok, %{"a" => @uuid_string}} 18 | 19 | assert adapter_dump(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @uuid_string}) == 20 | {:ok, %{"a" => @uuid_string}} 21 | end 22 | 23 | # Therefore we need to support both binaries and strings when loading 24 | test "loads through the adapter" do 25 | assert adapter_load(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_binary}) == 26 | {:ok, %{"a" => @uuid_string}} 27 | 28 | assert adapter_load(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_binary}) == 29 | {:ok, %{"a" => @uuid_string}} 30 | 31 | assert adapter_load(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @mssql_uuid_binary}) == 32 | {:ok, %{"a" => @uuid_string}} 33 | 34 | assert adapter_load(MyXQL, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 35 | {:ok, %{"a" => @uuid_string}} 36 | 37 | assert adapter_load(Postgres, {:map, Ecto.UUID}, %{"a" => @uuid_string}) == 38 | {:ok, %{"a" => @uuid_string}} 39 | 40 | assert adapter_load(Tds, {:map, Elixir.Tds.Ecto.UUID}, %{"a" => @uuid_string}) == 41 | {:ok, %{"a" => @uuid_string}} 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /integration_test/sql/lock.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LockTest do 2 | # We can keep this test async as long as it 3 | # is the only one accessing the lock_test table. 4 | use ExUnit.Case, async: true 5 | 6 | import Ecto.Query 7 | alias Ecto.Integration.PoolRepo 8 | 9 | defmodule LockCounter do 10 | use Ecto.Schema 11 | 12 | schema "lock_counters" do 13 | field :count, :integer 14 | end 15 | end 16 | 17 | setup do 18 | PoolRepo.delete_all(LockCounter) 19 | :ok 20 | end 21 | 22 | test "lock for update" do 23 | %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) 24 | pid = self() 25 | 26 | lock_for_update = 27 | Application.get_env(:ecto_sql, :lock_for_update) || 28 | raise ":lock_for_update not set in :ecto application" 29 | 30 | # Here we are manually inserting the lock in the query 31 | # to test multiple adapters. Never do this in actual 32 | # application code: it is not safe and not public. 33 | query = from(lc in LockCounter, where: lc.id == ^id) 34 | query = %{query | lock: lock_for_update} 35 | 36 | {:ok, new_pid} = 37 | Task.start_link fn -> 38 | assert_receive :select_for_update, 5000 39 | 40 | PoolRepo.transaction(fn -> 41 | [post] = PoolRepo.all(query) # this should block until the other trans. commit 42 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 43 | end) 44 | 45 | send pid, :updated 46 | end 47 | 48 | PoolRepo.transaction(fn -> 49 | [post] = PoolRepo.all(query) # select and lock the row 50 | send new_pid, :select_for_update # signal second process to begin a transaction 51 | post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! 52 | end) 53 | 54 | assert_receive :updated, 5000 55 | 56 | # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise 57 | assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /bench/scripts/macro/all_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of querying all objects of the different supported 3 | # databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of querying a set of objects registered in 7 | # the database with Repo.all/2 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # load the results into Ecto structures. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # There is only a unique parameter in this benchmark, the User objects to be 17 | # fetched. 18 | 19 | Code.require_file("../../support/setup.exs", __DIR__) 20 | 21 | alias Ecto.Bench.User 22 | 23 | limit = 5_000 24 | 25 | users = 26 | 1..limit 27 | |> Enum.map(fn _ -> User.sample_data() end) 28 | 29 | # We need to insert data to fetch 30 | Ecto.Bench.PgRepo.insert_all(User, users) 31 | Ecto.Bench.MyXQLRepo.insert_all(User, users) 32 | 33 | jobs = %{ 34 | "Pg Repo.all/2" => fn -> Ecto.Bench.PgRepo.all(User, limit: limit) end, 35 | "MyXQL Repo.all/2" => fn -> Ecto.Bench.MyXQLRepo.all(User, limit: limit) end 36 | } 37 | 38 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 39 | file = Path.join(path, "all.json") 40 | 41 | Benchee.run( 42 | jobs, 43 | formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], 44 | formatter_options: [json: [file: file]], 45 | time: 10, 46 | after_each: fn results -> 47 | ^limit = length(results) 48 | end 49 | ) 50 | 51 | # Clean inserted data 52 | Ecto.Bench.PgRepo.delete_all(User) 53 | Ecto.Bench.MyXQLRepo.delete_all(User) 54 | -------------------------------------------------------------------------------- /integration_test/tds/storage_test.exs: -------------------------------------------------------------------------------- 1 | Code.require_file "../support/file_helpers.exs", __DIR__ 2 | 3 | defmodule Ecto.Integration.StorageTest do 4 | use ExUnit.Case 5 | 6 | @moduletag :capture_log 7 | 8 | alias Ecto.Adapters.Tds 9 | 10 | def params do 11 | url = Application.get_env(:ecto_sql, :tds_test_url) <> "/storage_mgt" 12 | [log: false] ++ Ecto.Repo.Supervisor.parse_url(url) 13 | end 14 | 15 | def wrong_params() do 16 | Keyword.merge params(), 17 | [username: "randomuser", 18 | password: "password1234"] 19 | end 20 | 21 | test "storage up (twice in a row)" do 22 | assert :ok == Tds.storage_up(params()) 23 | assert {:error, :already_up} == Tds.storage_up(params()) 24 | after 25 | Tds.storage_down(params()) 26 | end 27 | 28 | test "storage down (twice in a row)" do 29 | assert :ok == Tds.storage_up(params()) 30 | assert :ok == Tds.storage_down(params()) 31 | assert {:error, :already_down} == Tds.storage_down(params()) 32 | end 33 | 34 | test "storage up and down (wrong credentials)" do 35 | refute :ok == Tds.storage_up(wrong_params()) 36 | assert :ok == Tds.storage_up(params()) 37 | refute :ok == Tds.storage_down(wrong_params()) 38 | after 39 | Tds.storage_down(params()) 40 | end 41 | 42 | defmodule Migration do 43 | use Ecto.Migration 44 | def change, do: :ok 45 | end 46 | 47 | test "storage status is up when database is created" do 48 | Tds.storage_up(params()) 49 | assert :up == Tds.storage_status(params()) 50 | after 51 | Tds.storage_down(params()) 52 | end 53 | 54 | test "storage status is down when database is not created" do 55 | Tds.storage_up(params()) 56 | Tds.storage_down(params()) 57 | assert :down == Tds.storage_status(params()) 58 | end 59 | 60 | test "storage status is an error when wrong credentials are passed" do 61 | assert ExUnit.CaptureLog.capture_log(fn -> 62 | assert {:error, _} = Tds.storage_status(wrong_params()) 63 | end) =~ ~r"Login failed for user 'randomuser'" 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /integration_test/tds/constraints_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ConstraintsTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Migrator, only: [up: 4] 5 | alias Ecto.Integration.PoolRepo 6 | 7 | defmodule ConstraintMigration do 8 | use Ecto.Migration 9 | 10 | @table table(:constraints_test) 11 | 12 | def change do 13 | create @table do 14 | add :price, :integer 15 | add :from, :integer 16 | add :to, :integer 17 | end 18 | create constraint(@table.name, :cannot_overlap, check: "[from] < [to]") 19 | end 20 | end 21 | 22 | defmodule Constraint do 23 | use Ecto.Integration.Schema 24 | 25 | schema "constraints_test" do 26 | field :price, :integer 27 | field :from, :integer 28 | field :to, :integer 29 | end 30 | end 31 | 32 | @base_migration 2_000_000 33 | 34 | setup_all do 35 | ExUnit.CaptureLog.capture_log(fn -> 36 | num = @base_migration + System.unique_integer([:positive]) 37 | up(PoolRepo, num, ConstraintMigration, log: false) 38 | end) 39 | 40 | :ok 41 | end 42 | 43 | test "check constraint" do 44 | changeset = Ecto.Changeset.change(%Constraint{}, from: 0, to: 10) 45 | {:ok, _} = PoolRepo.insert(changeset) 46 | 47 | non_overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 11, to: 12) 48 | {:ok, _} = PoolRepo.insert(non_overlapping_changeset) 49 | 50 | overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 1900, to: 12) 51 | 52 | exception = 53 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 54 | PoolRepo.insert(overlapping_changeset) 55 | end 56 | assert exception.message =~ "cannot_overlap (check_constraint)" 57 | assert exception.message =~ "The changeset has not defined any constraint." 58 | assert exception.message =~ "call `check_constraint/3`" 59 | 60 | {:error, changeset} = 61 | overlapping_changeset 62 | |> Ecto.Changeset.check_constraint(:from, name: :cannot_overlap) 63 | |> PoolRepo.insert() 64 | assert changeset.errors == [from: {"is invalid", [constraint: :check, constraint_name: "cannot_overlap"]}] 65 | assert changeset.data.__meta__.state == :built 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /test/ecto/tenant_migrator_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.TenantMigratorTest do 2 | use ExUnit.Case 3 | 4 | import Ecto.Migrator 5 | import ExUnit.CaptureLog 6 | 7 | alias EctoSQL.TestRepo 8 | 9 | defmodule Migration do 10 | use Ecto.Migration 11 | 12 | def up do 13 | execute "up" 14 | end 15 | 16 | def down do 17 | execute "down" 18 | end 19 | end 20 | 21 | defmodule ChangeMigration do 22 | use Ecto.Migration 23 | 24 | def change do 25 | create table(:posts) do 26 | add :name, :string 27 | end 28 | 29 | create index(:posts, [:title]) 30 | end 31 | end 32 | 33 | setup do 34 | {:ok, _} = start_supervised({MigrationsAgent, [{1, nil}, {2, nil}, {3, nil}]}) 35 | :ok 36 | end 37 | 38 | def put_test_adapter_config(config) do 39 | Application.put_env(:ecto_sql, EctoSQL.TestAdapter, config) 40 | 41 | on_exit fn -> 42 | Application.delete_env(:ecto, EctoSQL.TestAdapter) 43 | end 44 | end 45 | 46 | describe "dynamic_repo option" do 47 | test "upwards and downwards migrations" do 48 | assert run(TestRepo, [{3, ChangeMigration}, {4, Migration}], :up, to: 4, log: false, dynamic_repo: :tenant_db) == [4] 49 | assert run(TestRepo, [{2, ChangeMigration}, {3, Migration}], :down, all: true, log: false, dynamic_repo: :tenant_db) == [3, 2] 50 | end 51 | 52 | test "down invokes the repository adapter with down commands" do 53 | assert down(TestRepo, 0, Migration, log: false, dynamic_repo: :tenant_db) == :already_down 54 | assert down(TestRepo, 2, Migration, log: false, dynamic_repo: :tenant_db) == :ok 55 | end 56 | 57 | test "up invokes the repository adapter with up commands" do 58 | assert up(TestRepo, 3, Migration, log: false, dynamic_repo: :tenant_db) == :already_up 59 | assert up(TestRepo, 4, Migration, log: false, dynamic_repo: :tenant_db) == :ok 60 | end 61 | 62 | test "migrations run inside a transaction if the adapter supports ddl transactions" do 63 | capture_log fn -> 64 | put_test_adapter_config(supports_ddl_transaction?: true, test_process: self()) 65 | up(TestRepo, 0, Migration, dynamic_repo: :tenant_db) 66 | assert_receive {:transaction, _, _} 67 | end 68 | end 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /lib/ecto/migration/schema_migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Migration.SchemaMigration do 2 | # Defines a schema that works with a table that tracks schema migrations. 3 | # The table name defaults to `schema_migrations`. 4 | @moduledoc false 5 | use Ecto.Schema 6 | 7 | import Ecto.Query, only: [from: 2] 8 | 9 | @primary_key false 10 | schema "schema_migrations" do 11 | field :version, :integer 12 | timestamps updated_at: false 13 | end 14 | 15 | # The migration flag is used to signal to the repository 16 | # we are in a migration operation. 17 | @default_opts [timeout: :infinity, log: false, schema_migration: true] 18 | 19 | def ensure_schema_migrations_table!(repo, config, opts) do 20 | {repo, source} = get_repo_and_source(repo, config) 21 | table_name = String.to_atom(source) 22 | table = %Ecto.Migration.Table{name: table_name, prefix: opts[:prefix]} 23 | meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) 24 | 25 | commands = [ 26 | {:add, :version, :bigint, primary_key: true}, 27 | {:add, :inserted_at, :naive_datetime, []} 28 | ] 29 | 30 | repo.__adapter__().execute_ddl(meta, {:create_if_not_exists, table, commands}, @default_opts) 31 | end 32 | 33 | def versions(repo, config, prefix) do 34 | {repo, source} = get_repo_and_source(repo, config) 35 | {repo, from(m in source, select: type(m.version, :integer)), [prefix: prefix] ++ @default_opts} 36 | end 37 | 38 | def up(repo, config, version, opts) do 39 | {repo, source} = get_repo_and_source(repo, config) 40 | 41 | %__MODULE__{version: version} 42 | |> Ecto.put_meta(source: source) 43 | |> repo.insert(default_opts(opts)) 44 | end 45 | 46 | def down(repo, config, version, opts) do 47 | {repo, source} = get_repo_and_source(repo, config) 48 | 49 | from(m in source, where: m.version == type(^version, :integer)) 50 | |> repo.delete_all(default_opts(opts)) 51 | end 52 | 53 | def get_repo_and_source(repo, config) do 54 | {Keyword.get(config, :migration_repo, repo), 55 | Keyword.get(config, :migration_source, "schema_migrations")} 56 | end 57 | 58 | defp default_opts(opts) do 59 | Keyword.merge( 60 | @default_opts, 61 | [prefix: opts[:prefix], log: Keyword.get(opts, :log_migrator_sql, false)] 62 | ) 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | name: unittest 6 | runs-on: ubuntu-latest 7 | strategy: 8 | fail-fast: false 9 | matrix: 10 | elixirbase: 11 | - "1.11.0-erlang-23.1.1-alpine-3.13.1" 12 | - "1.11.0-erlang-21.3.8.21-alpine-3.13.1" 13 | steps: 14 | - uses: earthly/actions/setup-earthly@v1 15 | with: 16 | version: v0.5.10 17 | - uses: actions/checkout@v2 18 | - name: test ectl_sql 19 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} +test 20 | test-postgres: 21 | name: postgres integration test 22 | runs-on: ubuntu-latest 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | elixirbase: 27 | - "1.9.4-erlang-22.3.4.16-alpine-3.13.1" 28 | postgres: 29 | - "11.11" 30 | - "9.6" 31 | - "9.5" 32 | steps: 33 | - uses: earthly/actions/setup-earthly@v1 34 | with: 35 | version: v0.5.10 36 | - uses: actions/checkout@v2 37 | - name: test ecto_sql 38 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg POSTGRES=${{matrix.postgres}} +integration-test-postgres 39 | test-mysql: 40 | name: mysql integration test 41 | runs-on: ubuntu-latest 42 | strategy: 43 | fail-fast: false 44 | matrix: 45 | elixirbase: 46 | - "1.9.4-erlang-22.3.4.16-alpine-3.13.1" 47 | mysql: 48 | - "5.7" 49 | steps: 50 | - uses: earthly/actions/setup-earthly@v1 51 | with: 52 | version: v0.5.10 53 | - uses: actions/checkout@v2 54 | - name: test ecto_sql 55 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg POSTGRES=${{matrix.postgres}} +integration-test-mysql 56 | test-mssql: 57 | name: mssql integration test 58 | runs-on: ubuntu-latest 59 | strategy: 60 | fail-fast: false 61 | matrix: 62 | elixirbase: 63 | - "1.9.4-erlang-22.1.7-alpine-3.11.3" 64 | mssql: 65 | - "2017" 66 | - "2019" 67 | steps: 68 | - uses: earthly/actions/setup-earthly@v1 69 | with: 70 | version: v0.5.10 71 | - uses: actions/checkout@v2 72 | - name: test ecto_sql 73 | run: earthly -P --ci --build-arg ELIXIR_BASE=${{matrix.elixirbase}} --build-arg MSSQL=${{matrix.mssql}} +integration-test-mssql 74 | -------------------------------------------------------------------------------- /bench/scripts/micro/load_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the implementation of loading raw database data into Ecto structures by 3 | # the different database adapters 4 | 5 | # -------------------------------Description----------------------------------- 6 | # Repo.load/2 is an important step of a database query. 7 | # This benchmark tracks performance of loading "raw" data into ecto structures 8 | # Raw data can be in different types (e.g. keyword lists, maps), in this tests 9 | # we benchmark against map inputs 10 | 11 | # ----------------------------Factors(don't change)--------------------------- 12 | # Different adapters supported by Ecto, each one has its own implementation that 13 | # is tested against different inputs 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # Different sizes of raw data(small, medium, big) and different attribute types 17 | # such as UUID, Date and Time fetched from the database and needs to be 18 | # loaded into Ecto structures. 19 | 20 | Code.require_file("../../support/setup.exs", __DIR__) 21 | 22 | alias Ecto.Bench.User 23 | 24 | inputs = %{ 25 | "Small 1 Thousand" => 26 | 1..1_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 27 | "Medium 100 Thousand" => 28 | 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 29 | "Big 1 Million" => 30 | 1..1_000_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), 31 | "Time attr" => 32 | 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", time_attr: ~T[21:25:04.361140]} end), 33 | "Date attr" => 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", date_attr: ~D[2018-06-20]} end), 34 | "NaiveDateTime attr" => 35 | 1..100_000 36 | |> Enum.map(fn _ -> %{name: "Alice", naive_datetime_attr: ~N[2019-06-20 21:32:07.424178]} end), 37 | "UUID attr" => 38 | 1..100_000 39 | |> Enum.map(fn _ -> %{name: "Alice", uuid: Ecto.UUID.bingenerate()} end) 40 | } 41 | 42 | jobs = %{ 43 | "Pg Loader" => fn data -> Enum.map(data, &Ecto.Bench.PgRepo.load(User, &1)) end, 44 | "MyXQL Loader" => fn data -> Enum.map(data, &Ecto.Bench.MyXQLRepo.load(User, &1)) end 45 | } 46 | 47 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 48 | file = Path.join(path, "load.json") 49 | 50 | Benchee.run( 51 | jobs, 52 | inputs: inputs, 53 | formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], 54 | formatter_options: [json: [file: file]] 55 | ) 56 | -------------------------------------------------------------------------------- /lib/ecto/adapter/migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapter.Migration do 2 | @moduledoc """ 3 | Specifies the adapter migrations API. 4 | """ 5 | 6 | alias Ecto.Migration.Table 7 | alias Ecto.Migration.Index 8 | alias Ecto.Migration.Reference 9 | 10 | @type adapter_meta :: Ecto.Adapter.adapter_meta() 11 | 12 | @typedoc "All migration commands" 13 | @type command :: 14 | raw :: 15 | String.t() 16 | | {:create, Table.t(), [table_subcommand]} 17 | | {:create_if_not_exists, Table.t(), [table_subcommand]} 18 | | {:alter, Table.t(), [table_subcommand]} 19 | | {:drop, Table.t(), :restrict | :cascade} 20 | | {:drop_if_exists, Table.t(), :restrict | :cascade} 21 | | {:create, Index.t()} 22 | | {:create_if_not_exists, Index.t()} 23 | | {:drop, Index.t(), :restrict | :cascade} 24 | | {:drop_if_exists, Index.t(), :restrict | :cascade} 25 | 26 | @typedoc "All commands allowed within the block passed to `table/2`" 27 | @type table_subcommand :: 28 | {:add, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} 29 | | {:add_if_not_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} 30 | | {:modify, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} 31 | | {:remove, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} 32 | | {:remove, field :: atom} 33 | | {:remove_if_exists, type :: Ecto.Type.t() | Reference.t() | binary()} 34 | 35 | @typedoc """ 36 | A struct that represents a table or index in a database schema. 37 | 38 | These database objects can be modified through the use of a Data 39 | Definition Language, hence the name DDL object. 40 | """ 41 | @type ddl_object :: Table.t() | Index.t() 42 | 43 | @doc """ 44 | Checks if the adapter supports ddl transaction. 45 | """ 46 | @callback supports_ddl_transaction? :: boolean 47 | 48 | @doc """ 49 | Executes migration commands. 50 | """ 51 | @callback execute_ddl(adapter_meta, command, options :: Keyword.t()) :: 52 | {:ok, [{Logger.level, Logger.message, Logger.metadata}]} 53 | 54 | @doc """ 55 | Locks the migrations table and emits the locked versions for callback execution. 56 | 57 | It returns the result of calling the given function with a list of versions. 58 | """ 59 | @callback lock_for_migrations(adapter_meta, options :: Keyword.t(), fun) :: 60 | result 61 | when fun: (() -> result), result: var 62 | end 63 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.dump.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Dump do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Dumps the repository database structure" 7 | @default_opts [quiet: false] 8 | 9 | @aliases [ 10 | d: :dump_path, 11 | q: :quiet, 12 | r: :repo 13 | ] 14 | 15 | @switches [ 16 | dump_path: :string, 17 | quiet: :boolean, 18 | repo: [:string, :keep], 19 | no_compile: :boolean, 20 | no_deps_check: :boolean 21 | ] 22 | 23 | @moduledoc """ 24 | Dumps the current environment's database structure for the 25 | given repository into a structure file. 26 | 27 | The repository must be set under `:ecto_repos` in the 28 | current app configuration or given via the `-r` option. 29 | 30 | This task needs some shell utility to be present on the machine 31 | running the task. 32 | 33 | Database | Utility needed 34 | :--------- | :------------- 35 | PostgreSQL | pg_dump 36 | MySQL | mysqldump 37 | 38 | ## Example 39 | 40 | $ mix ecto.dump 41 | 42 | ## Command line options 43 | 44 | * `-r`, `--repo` - the repo to load the structure info from 45 | * `-d`, `--dump-path` - the path of the dump file to create 46 | * `-q`, `--quiet` - run the command quietly 47 | * `--no-compile` - does not compile applications before dumping 48 | * `--no-deps-check` - does not check dependencies before dumping 49 | """ 50 | 51 | @impl true 52 | def run(args) do 53 | {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases 54 | opts = Keyword.merge(@default_opts, opts) 55 | 56 | Enum.each parse_repo(args), fn repo -> 57 | ensure_repo(repo, args) 58 | ensure_implements(repo.__adapter__(), Ecto.Adapter.Structure, 59 | "dump structure for #{inspect repo}") 60 | 61 | migration_repo = repo.config()[:migration_repo] || repo 62 | 63 | for repo <- Enum.uniq([repo, migration_repo]) do 64 | config = Keyword.merge(repo.config(), opts) 65 | 66 | case repo.__adapter__().structure_dump(source_repo_priv(repo), config) do 67 | {:ok, location} -> 68 | unless opts[:quiet] do 69 | Mix.shell().info "The structure for #{inspect repo} has been dumped to #{location}" 70 | end 71 | {:error, term} when is_binary(term) -> 72 | Mix.raise "The structure for #{inspect repo} couldn't be dumped: #{term}" 73 | {:error, term} -> 74 | Mix.raise "The structure for #{inspect repo} couldn't be dumped: #{inspect term}" 75 | end 76 | end 77 | end 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /test/ecto/migrator_repo_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.MigratorRepoTest do 2 | use ExUnit.Case 3 | 4 | import Ecto.Migrator 5 | import ExUnit.CaptureLog 6 | 7 | defmodule Migration do 8 | use Ecto.Migration 9 | 10 | def up do 11 | execute "up" 12 | end 13 | 14 | def down do 15 | execute "down" 16 | end 17 | end 18 | 19 | defmodule ChangeMigration do 20 | use Ecto.Migration 21 | 22 | def change do 23 | create table(:posts) do 24 | add :name, :string 25 | end 26 | 27 | create index(:posts, [:title]) 28 | end 29 | end 30 | 31 | defmodule MainRepo do 32 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 33 | end 34 | 35 | defmodule MigrationRepo do 36 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 37 | end 38 | 39 | Application.put_env(:ecto_sql, MainRepo, [migration_repo: MigrationRepo]) 40 | 41 | setup do 42 | {:ok, _} = start_supervised({MigrationsAgent, [{1, nil}, {2, nil}, {3, nil}]}) 43 | :ok 44 | end 45 | 46 | def put_test_adapter_config(config) do 47 | Application.put_env(:ecto_sql, EctoSQL.TestAdapter, config) 48 | 49 | on_exit fn -> 50 | Application.delete_env(:ecto, EctoSQL.TestAdapter) 51 | end 52 | end 53 | 54 | setup_all do 55 | {:ok, _pid} = MainRepo.start_link() 56 | {:ok, _pid} = MigrationRepo.start_link() 57 | :ok 58 | end 59 | 60 | describe "migration_repo option" do 61 | test "upwards and downwards migrations" do 62 | assert run(MainRepo, [{3, ChangeMigration}, {4, Migration}], :up, to: 4, log: false) == [4] 63 | assert run(MainRepo, [{2, ChangeMigration}, {3, Migration}], :down, all: true, log: false) == [3, 2] 64 | end 65 | 66 | test "down invokes the repository adapter with down commands" do 67 | assert down(MainRepo, 0, Migration, log: false) == :already_down 68 | assert down(MainRepo, 2, Migration, log: false) == :ok 69 | end 70 | 71 | test "up invokes the repository adapter with up commands" do 72 | assert up(MainRepo, 3, Migration, log: false) == :already_up 73 | assert up(MainRepo, 4, Migration, log: false) == :ok 74 | end 75 | 76 | test "migrations run inside a transaction if the adapter supports ddl transactions when configuring a migration repo" do 77 | capture_log fn -> 78 | put_test_adapter_config(supports_ddl_transaction?: true, test_process: self()) 79 | up(MainRepo, 0, Migration) 80 | 81 | assert_receive {:transaction, %{repo: MainRepo}, _} 82 | assert_receive {:lock_for_migrations, %{repo: MigrationRepo}, _, _} 83 | end 84 | end 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.gen.migration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Gen.MigrationTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Support.FileHelpers 5 | import Mix.Tasks.Ecto.Gen.Migration, only: [run: 1] 6 | 7 | tmp_path = Path.join(tmp_path(), inspect(Ecto.Gen.Migration)) 8 | @migrations_path Path.join(tmp_path, "migrations") 9 | 10 | defmodule Repo do 11 | def __adapter__ do 12 | true 13 | end 14 | 15 | def config do 16 | [priv: "tmp/#{inspect(Ecto.Gen.Migration)}", otp_app: :ecto_sql] 17 | end 18 | end 19 | 20 | setup do 21 | File.rm_rf!(unquote(tmp_path)) 22 | :ok 23 | end 24 | 25 | test "generates a new migration" do 26 | [path] = run ["-r", to_string(Repo), "my_migration"] 27 | assert Path.dirname(path) == @migrations_path 28 | assert Path.basename(path) =~ ~r/^\d{14}_my_migration\.exs$/ 29 | assert_file path, fn file -> 30 | assert file =~ "defmodule Mix.Tasks.Ecto.Gen.MigrationTest.Repo.Migrations.MyMigration do" 31 | assert file =~ "use Ecto.Migration" 32 | assert file =~ "def change do" 33 | end 34 | end 35 | 36 | test "generates a new migration with Custom Migration Module" do 37 | Application.put_env(:ecto_sql, :migration_module, MyCustomApp.MigrationModule) 38 | [path] = run ["-r", to_string(Repo), "my_custom_migration"] 39 | Application.delete_env(:ecto_sql, :migration_module) 40 | assert Path.dirname(path) == @migrations_path 41 | assert Path.basename(path) =~ ~r/^\d{14}_my_custom_migration\.exs$/ 42 | assert_file path, fn file -> 43 | assert file =~ "defmodule Mix.Tasks.Ecto.Gen.MigrationTest.Repo.Migrations.MyCustomMigration do" 44 | assert file =~ "use MyCustomApp.MigrationModule" 45 | assert file =~ "def change do" 46 | end 47 | end 48 | 49 | test "underscores the filename when generating a migration" do 50 | run ["-r", to_string(Repo), "MyMigration"] 51 | assert [name] = File.ls!(@migrations_path) 52 | assert name =~ ~r/^\d{14}_my_migration\.exs$/ 53 | end 54 | 55 | test "custom migrations_path" do 56 | dir = Path.join([unquote(tmp_path), "custom_migrations"]) 57 | [path] = run ["-r", to_string(Repo), "--migrations-path", dir, "custom_path"] 58 | assert Path.dirname(path) == dir 59 | end 60 | 61 | test "raises when existing migration exists" do 62 | run ["-r", to_string(Repo), "my_migration"] 63 | assert_raise Mix.Error, ~r"migration can't be created", fn -> 64 | run ["-r", to_string(Repo), "my_migration"] 65 | end 66 | end 67 | 68 | test "raises when missing file" do 69 | assert_raise Mix.Error, fn -> run ["-r", to_string(Repo)] end 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /integration_test/sql/logging.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LoggingTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.PoolRepo 6 | alias Ecto.Integration.Post 7 | 8 | test "log entry is sent to telemetry" do 9 | log = fn event_name, measurements, metadata -> 10 | assert Enum.at(event_name, -1) == :query 11 | assert %{result: {:ok, _res}} = metadata 12 | assert measurements.total_time == measurements.query_time + measurements.decode_time + measurements.queue_time 13 | assert measurements.idle_time 14 | send(self(), :logged) 15 | end 16 | 17 | Process.put(:telemetry, log) 18 | _ = PoolRepo.all(Post) 19 | assert_received :logged 20 | end 21 | 22 | test "log entry is sent to telemetry with custom options" do 23 | log = fn event_name, _measurements, metadata -> 24 | assert Enum.at(event_name, -1) == :query 25 | assert metadata.options == [:custom_metadata] 26 | send(self(), :logged) 27 | end 28 | 29 | Process.put(:telemetry, log) 30 | _ = PoolRepo.all(Post, telemetry_options: [:custom_metadata]) 31 | assert_received :logged 32 | end 33 | 34 | test "log entry sent under another event name" do 35 | log = fn [:custom], measurements, metadata -> 36 | assert %{result: {:ok, _res}} = metadata 37 | assert measurements.total_time == measurements.query_time + measurements.decode_time + measurements.queue_time 38 | assert measurements.idle_time 39 | send(self(), :logged) 40 | end 41 | 42 | Process.put(:telemetry, log) 43 | _ = PoolRepo.all(Post, telemetry_event: [:custom]) 44 | assert_received :logged 45 | end 46 | 47 | test "log entry is not sent to telemetry under nil event name" do 48 | Process.put(:telemetry, fn _, _ -> raise "never called" end) 49 | _ = TestRepo.all(Post, telemetry_event: nil) 50 | refute_received :logged 51 | end 52 | 53 | test "log entry when some measurements are nil" do 54 | assert ExUnit.CaptureLog.capture_log(fn -> 55 | TestRepo.query("BEG", [], log: :error) 56 | end) =~ "[error]" 57 | end 58 | 59 | test "log entry with custom log level" do 60 | assert ExUnit.CaptureLog.capture_log(fn -> 61 | TestRepo.insert!(%Post{title: "1"}, [log: :error]) 62 | end) =~ "[error]" 63 | 64 | # We cannot assert on the result because it depends on the suite log level 65 | ExUnit.CaptureLog.capture_log(fn -> 66 | TestRepo.insert!(%Post{title: "1"}, [log: true]) 67 | end) 68 | 69 | # But this assertion is always true 70 | assert ExUnit.CaptureLog.capture_log(fn -> 71 | TestRepo.insert!(%Post{title: "1"}, [log: false]) 72 | end) == "" 73 | end 74 | end 75 | -------------------------------------------------------------------------------- /integration_test/sql/alter.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.AlterTest do 2 | use Ecto.Integration.Case, async: false 3 | 4 | alias Ecto.Integration.PoolRepo 5 | 6 | defmodule AlterMigrationOne do 7 | use Ecto.Migration 8 | 9 | def up do 10 | create table(:alter_col_type) do 11 | add :value, :integer 12 | end 13 | 14 | execute "INSERT INTO alter_col_type (value) VALUES (1)" 15 | end 16 | 17 | def down do 18 | drop table(:alter_col_type) 19 | end 20 | end 21 | 22 | defmodule AlterMigrationTwo do 23 | use Ecto.Migration 24 | 25 | def up do 26 | alter table(:alter_col_type) do 27 | modify :value, :numeric 28 | end 29 | end 30 | 31 | def down do 32 | alter table(:alter_col_type) do 33 | modify :value, :integer 34 | end 35 | end 36 | end 37 | 38 | import Ecto.Query, only: [from: 1, from: 2] 39 | 40 | defp run(direction, repo, module) do 41 | Ecto.Migration.Runner.run(repo, repo.config(), 1, module, :forward, direction, direction, log: false) 42 | end 43 | 44 | test "reset cache on returning query after alter column type" do 45 | values = from v in "alter_col_type", select: v.value 46 | 47 | assert :ok == run(:up, PoolRepo, AlterMigrationOne) 48 | assert PoolRepo.all(values) == [1] 49 | 50 | assert :ok == run(:up, PoolRepo, AlterMigrationTwo) 51 | [%Decimal{}] = PoolRepo.all(values) 52 | 53 | PoolRepo.transaction(fn() -> 54 | assert [%Decimal{}] = PoolRepo.all(values) 55 | assert :ok == run(:down, PoolRepo, AlterMigrationTwo) 56 | 57 | # Optionally fail once with database error when 58 | # already prepared on connection (and clear cache) 59 | try do 60 | PoolRepo.all(values, [mode: :savepoint]) 61 | rescue 62 | _ -> 63 | assert PoolRepo.all(values) == [1] 64 | else 65 | result -> 66 | assert result == [1] 67 | end 68 | end) 69 | after 70 | assert :ok == run(:down, PoolRepo, AlterMigrationOne) 71 | end 72 | 73 | test "reset cache on parameterized query after alter column type" do 74 | values = from v in "alter_col_type" 75 | 76 | assert :ok == run(:up, PoolRepo, AlterMigrationOne) 77 | assert PoolRepo.update_all(values, [set: [value: 2]]) == {1, nil} 78 | 79 | assert :ok == run(:up, PoolRepo, AlterMigrationTwo) 80 | assert PoolRepo.update_all(values, [set: [value: 3]]) == {1, nil} 81 | 82 | PoolRepo.transaction(fn() -> 83 | assert PoolRepo.update_all(values, [set: [value: Decimal.new(5)]]) == {1, nil} 84 | assert :ok == run(:down, PoolRepo, AlterMigrationTwo) 85 | assert PoolRepo.update_all(values, [set: [value: 6]]) == {1, nil} 86 | end) 87 | after 88 | assert :ok == run(:down, PoolRepo, AlterMigrationOne) 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /bench/scripts/micro/to_sql_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the implementation of parsing Ecto.Query objects into SQL queries by 3 | # the different database adapters 4 | 5 | # -------------------------------Description----------------------------------- 6 | # Repo.to_sql/2 is an important step of a database query. 7 | # This benchmark tracks performance of parsing Ecto.Query structures into 8 | # "raw" SQL query strings. 9 | # Different Ecto.Query objects has multiple combinations and some different attributes 10 | # depending on the query type. In this tests we benchmark against different 11 | # query types and complexity. 12 | 13 | # ----------------------------Factors(don't change)--------------------------- 14 | # Different adapters supported by Ecto, each one has its own implementation that 15 | # is tested against different query inputs 16 | 17 | # ----------------------------Parameters(change)------------------------------- 18 | # Different query objects (select, delete, update) to be translated into pure SQL 19 | # strings. 20 | 21 | Code.require_file("../../support/setup.exs", __DIR__) 22 | 23 | import Ecto.Query 24 | 25 | alias Ecto.Bench.{User, Game} 26 | 27 | inputs = %{ 28 | "Ordinary Select All" => {:all, from(User)}, 29 | "Ordinary Delete All" => {:delete_all, from(User)}, 30 | "Ordinary Update All" => {:update_all, from(User, update: [set: [name: "Thor"]])}, 31 | "Ordinary Where" => {:all, from(User, where: [name: "Thanos", email: "blah@blah"])}, 32 | "Fetch First Registry" => {:all, first(User)}, 33 | "Fetch Last Registry" => {:all, last(User)}, 34 | "Ordinary Order By" => {:all, order_by(User, desc: :name)}, 35 | "Complex Query 2 Joins" => 36 | {:all, 37 | from(User, where: [name: "Thanos"]) 38 | |> join(:left, [u], ux in User, on: u.id == ux.id) 39 | |> join(:right, [j], uj in User, on: j.id == 1 and j.email == "email@email") 40 | |> select([u, ux], {u.name, ux.email})}, 41 | "Complex Query 4 Joins" => 42 | {:all, 43 | from(User) 44 | |> join(:left, [u], g in Game, on: g.name == u.name) 45 | |> join(:right, [g], u in User, on: g.id == 1 and u.email == "email@email") 46 | |> join(:inner, [u], g in fragment("SELECT * from games where game.id = ?", u.id)) 47 | |> join(:left, [g], u in fragment("SELECT * from users = ?", g.id)) 48 | |> select([u, g], {u.name, g.price})} 49 | } 50 | 51 | jobs = %{ 52 | "Pg Query Builder" => fn {type, query} -> Ecto.Bench.PgRepo.to_sql(type, query) end, 53 | "MyXQL Query Builder" => fn {type, query} -> Ecto.Bench.MyXQLRepo.to_sql(type, query) end 54 | } 55 | 56 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 57 | file = Path.join(path, "to_sql.json") 58 | 59 | Benchee.run( 60 | jobs, 61 | inputs: inputs, 62 | formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], 63 | formatter_options: [json: [file: file]] 64 | ) 65 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.rollback_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.RollbackTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Rollback, only: [run: 2] 5 | import Support.FileHelpers 6 | 7 | @migrations_path Path.join([tmp_path(), inspect(Ecto.Migrate), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(@migrations_path) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | Task.start_link fn -> 18 | Process.flag(:trap_exit, true) 19 | receive do 20 | {:EXIT, _, :normal} -> :ok 21 | end 22 | end 23 | end 24 | 25 | def stop() do 26 | :ok 27 | end 28 | 29 | def __adapter__ do 30 | EctoSQL.TestAdapter 31 | end 32 | 33 | def config do 34 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 35 | end 36 | end 37 | 38 | defmodule StartedRepo do 39 | def start_link(_) do 40 | {:error, {:already_started, :whatever}} 41 | end 42 | 43 | def stop() do 44 | raise "should not be called" 45 | end 46 | 47 | def __adapter__ do 48 | EctoSQL.TestAdapter 49 | end 50 | 51 | def config do 52 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 53 | end 54 | end 55 | 56 | test "runs the migrator after starting repo" do 57 | run ["-r", to_string(Repo)], fn _, _, _, _ -> 58 | Process.put(:migrated, true) 59 | [] 60 | end 61 | assert Process.get(:migrated) 62 | assert Process.get(:started) 63 | end 64 | 65 | test "runs the migrator with already started repo" do 66 | run ["-r", to_string(StartedRepo)], fn _, _, _, _ -> 67 | Process.put(:migrated, true) 68 | [] 69 | end 70 | assert Process.get(:migrated) 71 | end 72 | 73 | test "runs the migrator yielding the repository and migrations path" do 74 | run ["-r", to_string(Repo), "--prefix", "foo"], fn repo, [path], direction, opts -> 75 | assert repo == Repo 76 | refute path =~ ~r/_build/ 77 | assert direction == :down 78 | assert opts[:step] == 1 79 | assert opts[:prefix] == "foo" 80 | [] 81 | end 82 | assert Process.get(:started) 83 | end 84 | 85 | test "raises when migrations path does not exist" do 86 | File.rm_rf!(@migrations_path) 87 | assert_raise Mix.Error, fn -> 88 | run ["-r", to_string(Repo)], fn _, _, _, _ -> [] end 89 | end 90 | assert !Process.get(:started) 91 | end 92 | 93 | test "uses custom paths" do 94 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 95 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 96 | File.mkdir_p!(path1) 97 | File.mkdir_p!(path2) 98 | 99 | run ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 100 | fn Repo, [^path1, ^path2], _, _ -> [] end 101 | end 102 | end 103 | -------------------------------------------------------------------------------- /integration_test/tds/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | import ExUnit.CaptureLog 6 | 7 | @moduletag :capture_log 8 | @base_migration 3_000_000 9 | 10 | defmodule NormalMigration do 11 | use Ecto.Migration 12 | 13 | def change do 14 | create_if_not_exists table(:log_mode_table) 15 | end 16 | end 17 | 18 | describe "Migrator" do 19 | @get_lock_command ~s(sp_getapplock @Resource = 'ecto_Ecto.Integration.PoolRepo', @LockMode = 'Exclusive', @LockOwner = 'Transaction', @LockTimeout = -1) 20 | @create_table_sql ~s(CREATE TABLE [log_mode_table]) 21 | @create_table_log "create table if not exists log_mode_table" 22 | @drop_table_sql ~s(DROP TABLE [log_mode_table]) 23 | @drop_table_log "drop table if exists log_mode_table" 24 | @version_insert ~s(INSERT INTO [schema_migrations]) 25 | @version_delete ~s(DELETE s0 FROM [schema_migrations]) 26 | 27 | test "logs locking and transaction commands" do 28 | num = @base_migration + System.unique_integer([:positive]) 29 | up_log = 30 | capture_log(fn -> 31 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 32 | end) 33 | 34 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 35 | assert up_log =~ @get_lock_command 36 | assert up_log =~ @create_table_sql 37 | assert up_log =~ @create_table_log 38 | assert up_log =~ @version_insert 39 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 40 | 41 | down_log = 42 | capture_log(fn -> 43 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 44 | end) 45 | 46 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 47 | assert down_log =~ @get_lock_command 48 | assert down_log =~ @drop_table_sql 49 | assert down_log =~ @drop_table_log 50 | assert down_log =~ @version_delete 51 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 52 | end 53 | 54 | test "does not log sql when log is default" do 55 | num = @base_migration + System.unique_integer([:positive]) 56 | up_log = 57 | capture_log(fn -> 58 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 59 | end) 60 | 61 | refute up_log =~ "begin []" 62 | refute up_log =~ @get_lock_command 63 | refute up_log =~ @create_table_sql 64 | assert up_log =~ @create_table_log 65 | refute up_log =~ @version_insert 66 | refute up_log =~ "commit []" 67 | 68 | down_log = 69 | capture_log(fn -> 70 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 71 | end) 72 | 73 | refute down_log =~ "begin []" 74 | refute down_log =~ @get_lock_command 75 | refute down_log =~ @drop_table_sql 76 | assert down_log =~ @drop_table_log 77 | refute down_log =~ @version_delete 78 | refute down_log =~ "commit []" 79 | end 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /integration_test/pg/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | # Configure Ecto for support and tests 4 | Application.put_env(:ecto, :primary_key_type, :id) 5 | Application.put_env(:ecto, :async_integration_tests, true) 6 | Application.put_env(:ecto_sql, :lock_for_update, "FOR UPDATE") 7 | 8 | # Configure PG connection 9 | Application.put_env(:ecto_sql, :pg_test_url, 10 | "ecto://" <> (System.get_env("PG_URL") || "postgres:postgres@127.0.0.1") 11 | ) 12 | 13 | Code.require_file "../support/repo.exs", __DIR__ 14 | 15 | # Pool repo for async, safe tests 16 | alias Ecto.Integration.TestRepo 17 | 18 | Application.put_env(:ecto_sql, TestRepo, 19 | url: Application.get_env(:ecto_sql, :pg_test_url) <> "/ecto_test", 20 | pool: Ecto.Adapters.SQL.Sandbox, 21 | show_sensitive_data_on_connection_error: true 22 | ) 23 | 24 | defmodule Ecto.Integration.TestRepo do 25 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres 26 | 27 | def create_prefix(prefix) do 28 | "create schema #{prefix}" 29 | end 30 | 31 | def drop_prefix(prefix) do 32 | "drop schema #{prefix}" 33 | end 34 | 35 | def uuid do 36 | Ecto.UUID 37 | end 38 | end 39 | 40 | # Pool repo for non-async tests 41 | alias Ecto.Integration.PoolRepo 42 | 43 | Application.put_env(:ecto_sql, PoolRepo, 44 | url: Application.get_env(:ecto_sql, :pg_test_url) <> "/ecto_test", 45 | pool_size: 10, 46 | max_restarts: 20, 47 | max_seconds: 10) 48 | 49 | defmodule Ecto.Integration.PoolRepo do 50 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres 51 | end 52 | 53 | # Load support files 54 | ecto = Mix.Project.deps_paths()[:ecto] 55 | Code.require_file "#{ecto}/integration_test/support/schemas.exs", __DIR__ 56 | Code.require_file "../support/migration.exs", __DIR__ 57 | 58 | defmodule Ecto.Integration.Case do 59 | use ExUnit.CaseTemplate 60 | 61 | setup do 62 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) 63 | end 64 | end 65 | 66 | {:ok, _} = Ecto.Adapters.Postgres.ensure_all_started(TestRepo.config(), :temporary) 67 | 68 | # Load up the repository, start it, and run migrations 69 | _ = Ecto.Adapters.Postgres.storage_down(TestRepo.config()) 70 | :ok = Ecto.Adapters.Postgres.storage_up(TestRepo.config()) 71 | 72 | {:ok, _pid} = TestRepo.start_link() 73 | {:ok, _pid} = PoolRepo.start_link() 74 | 75 | %{rows: [[version]]} = TestRepo.query!("SHOW server_version", []) 76 | 77 | version = 78 | case Regex.named_captures(~r/(?[0-9]*)(\.(?[0-9]*))?.*/, version) do 79 | %{"major" => major, "minor" => minor} -> "#{major}.#{minor}.0" 80 | %{"major" => major} -> "#{major}.0.0" 81 | _other -> version 82 | end 83 | 84 | excludes_above_9_5 = [:without_conflict_target] 85 | excludes_below_9_6 = [:add_column_if_not_exists, :no_error_on_conditional_column_migration] 86 | 87 | if Version.match?(version, "< 9.6.0") do 88 | ExUnit.configure(exclude: excludes_above_9_5 ++ excludes_below_9_6) 89 | else 90 | ExUnit.configure(exclude: excludes_above_9_5) 91 | end 92 | 93 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false) 94 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 95 | Process.flag(:trap_exit, true) 96 | 97 | ExUnit.start() 98 | -------------------------------------------------------------------------------- /integration_test/myxql/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | import ExUnit.CaptureLog 6 | 7 | @moduletag :capture_log 8 | @base_migration 3_000_000 9 | 10 | defmodule NormalMigration do 11 | use Ecto.Migration 12 | 13 | def change do 14 | create_if_not_exists table(:log_mode_table) 15 | end 16 | end 17 | 18 | describe "Migrator" do 19 | @get_lock_command ~s[SELECT GET_LOCK("ecto_Ecto.Integration.PoolRepo", -1)] 20 | @release_lock_command ~s[SELECT RELEASE_LOCK("ecto_Ecto.Integration.PoolRepo")] 21 | @create_table_sql ~s[CREATE TABLE IF NOT EXISTS `log_mode_table`] 22 | @create_table_log "create table if not exists log_mode_table" 23 | @drop_table_sql ~s[DROP TABLE IF EXISTS `log_mode_table`] 24 | @drop_table_log "drop table if exists log_mode_table" 25 | @version_insert ~s[INSERT INTO `schema_migrations`] 26 | @version_delete ~s[DELETE s0.* FROM `schema_migrations`] 27 | 28 | test "logs locking and transaction commands" do 29 | num = @base_migration + System.unique_integer([:positive]) 30 | up_log = 31 | capture_log(fn -> 32 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 33 | end) 34 | 35 | assert up_log =~ "begin []" 36 | assert up_log =~ @get_lock_command 37 | assert up_log =~ @create_table_sql 38 | assert up_log =~ @create_table_log 39 | assert up_log =~ @release_lock_command 40 | assert up_log =~ @version_insert 41 | assert up_log =~ "commit []" 42 | 43 | down_log = 44 | capture_log(fn -> 45 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 46 | end) 47 | 48 | assert down_log =~ "begin []" 49 | assert down_log =~ @get_lock_command 50 | assert down_log =~ @drop_table_sql 51 | assert down_log =~ @drop_table_log 52 | assert down_log =~ @release_lock_command 53 | assert down_log =~ @version_delete 54 | assert down_log =~ "commit []" 55 | end 56 | 57 | test "does not log sql when log is default" do 58 | num = @base_migration + System.unique_integer([:positive]) 59 | up_log = 60 | capture_log(fn -> 61 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 62 | end) 63 | 64 | refute up_log =~ "begin []" 65 | refute up_log =~ @get_lock_command 66 | refute up_log =~ @create_table_sql 67 | assert up_log =~ @create_table_log 68 | refute up_log =~ @release_lock_command 69 | refute up_log =~ @version_insert 70 | refute up_log =~ "commit []" 71 | 72 | down_log = 73 | capture_log(fn -> 74 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 75 | end) 76 | 77 | refute down_log =~ "begin []" 78 | refute down_log =~ @get_lock_command 79 | refute down_log =~ @drop_table_sql 80 | assert down_log =~ @drop_table_log 81 | refute down_log =~ @release_lock_command 82 | refute down_log =~ @version_delete 83 | refute down_log =~ "commit []" 84 | end 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.migrations.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Migrations do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Displays the repository migration status" 7 | 8 | @aliases [ 9 | r: :repo 10 | ] 11 | 12 | @switches [ 13 | repo: [:keep, :string], 14 | no_compile: :boolean, 15 | no_deps_check: :boolean, 16 | migrations_path: :keep, 17 | prefix: :string 18 | ] 19 | 20 | @moduledoc """ 21 | Displays the up / down migration status for the given repository. 22 | 23 | The repository must be set under `:ecto_repos` in the 24 | current app configuration or given via the `-r` option. 25 | 26 | By default, migrations are expected at "priv/YOUR_REPO/migrations" 27 | directory of the current application but it can be configured 28 | by specifying the `:priv` key under the repository configuration. 29 | 30 | If the repository has not been started yet, one will be 31 | started outside our application supervision tree and shutdown 32 | afterwards. 33 | 34 | ## Examples 35 | 36 | $ mix ecto.migrations 37 | $ mix ecto.migrations -r Custom.Repo 38 | 39 | ## Command line options 40 | 41 | * `--migrations-path` - the path to load the migrations from, defaults to 42 | `"priv/repo/migrations"`. This option may be given multiple times in which 43 | case the migrations are loaded from all the given directories and sorted as 44 | if they were in the same one. 45 | 46 | Note, if you have previously run migrations from paths `a/` and `b/`, and now 47 | run `mix ecto.migrations --migrations-path a/` (omitting path `b/`), the 48 | migrations from the path `b/` will be shown in the output as `** FILE NOT FOUND **`. 49 | 50 | * `--no-compile` - does not compile applications before running 51 | 52 | * `--no-deps-check` - does not check dependencies before running 53 | 54 | * `--prefix` - the prefix to check migrations on 55 | 56 | * `-r`, `--repo` - the repo to obtain the status for 57 | 58 | """ 59 | 60 | @impl true 61 | def run(args, migrations \\ &Ecto.Migrator.migrations/3, puts \\ &IO.puts/1) do 62 | repos = parse_repo(args) 63 | {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases 64 | 65 | for repo <- repos do 66 | ensure_repo(repo, args) 67 | paths = ensure_migrations_paths(repo, opts) 68 | 69 | case Ecto.Migrator.with_repo(repo, &migrations.(&1, paths, opts), [mode: :temporary]) do 70 | {:ok, repo_status, _} -> 71 | puts.( 72 | """ 73 | 74 | Repo: #{inspect(repo)} 75 | 76 | Status Migration ID Migration Name 77 | -------------------------------------------------- 78 | """ <> 79 | Enum.map_join(repo_status, "\n", fn {status, number, description} -> 80 | " #{format(status, 10)}#{format(number, 16)}#{description}" 81 | end) <> "\n" 82 | ) 83 | 84 | {:error, error} -> 85 | Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" 86 | end 87 | end 88 | 89 | :ok 90 | end 91 | 92 | defp format(content, pad) do 93 | content 94 | |> to_string 95 | |> String.pad_trailing(pad) 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Ecto SQL 2 | ========= 3 | 4 | [![Build Status](https://github.com/elixir-ecto/ecto_sql/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto_sql/actions) 5 | 6 | Ecto SQL ([documentation](https://hexdocs.pm/ecto_sql)) provides building blocks for writing SQL adapters for Ecto. It features: 7 | 8 | * The Ecto.Adapters.SQL module as an entry point for all SQL-based adapters 9 | * Default implementations for Postgres (Ecto.Adapters.Postgres), MySQL (Ecto.Adapters.MyXQL), and MSSQL (Ecto.Adapters.Tds) 10 | * A test sandbox (Ecto.Adapters.SQL.Sandbox) that concurrently runs database tests inside transactions 11 | * Support for database migrations via Mix tasks 12 | 13 | To learn more about getting started, [see the Ecto repository](https://github.com/elixir-ecto/ecto). 14 | 15 | ## Running tests 16 | 17 | Clone the repo and fetch its dependencies: 18 | 19 | $ git clone https://github.com/elixir-ecto/ecto_sql.git 20 | $ cd ecto_sql 21 | $ mix deps.get 22 | $ mix test.all 23 | 24 | Note that `mix test.all` runs the tests in `test/` and the `integration_test`s for each adapter: `pg`, `myxql` and `tds`. 25 | 26 | You can also use a local Ecto checkout if desired: 27 | 28 | $ ECTO_PATH=../ecto mix test.all 29 | 30 | You can run tests against a specific Ecto adapter by using the `ECTO_ADAPTER` environment variable: 31 | 32 | $ ECTO_ADAPTER=pg mix test 33 | 34 | MySQL and PostgreSQL can be installed directly on most systems. For MSSQL, you may need to run it as a Docker image: 35 | 36 | docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' mcr.microsoft.com/mssql/server:2017-latest 37 | 38 | ### Running containerized tests 39 | 40 | It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly): 41 | 42 | $ earthly -P +all 43 | 44 | You can also use this to interactively debug any failing integration tests using the corresponding commands: 45 | 46 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MYSQL=5.7 +integration-test-mysql 47 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MSSQL=2019 +integration-test-mssql 48 | $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg POSTGRES=11.11 +integration-test-postgres 49 | 50 | Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: 51 | 52 | PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test 53 | MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test 54 | sqlcmd -U sa -P 'some!Password' 55 | 56 | ## License 57 | 58 | Copyright (c) 2012 Plataformatec \ 59 | Copyright (c) 2020 Dashbit 60 | 61 | Licensed under the Apache License, Version 2.0 (the "License"); 62 | you may not use this file except in compliance with the License. 63 | You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) 64 | 65 | Unless required by applicable law or agreed to in writing, software 66 | distributed under the License is distributed on an "AS IS" BASIS, 67 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 68 | See the License for the specific language governing permissions and 69 | limitations under the License. 70 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.MigrationsTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Migrations, only: [run: 3] 5 | import Support.FileHelpers 6 | 7 | migrations_path = Path.join([tmp_path(), inspect(Ecto.Migrations), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(unquote(migrations_path)) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | Task.start_link fn -> 18 | Process.flag(:trap_exit, true) 19 | receive do 20 | {:EXIT, _, :normal} -> :ok 21 | end 22 | end 23 | end 24 | 25 | def stop() do 26 | :ok 27 | end 28 | 29 | def __adapter__ do 30 | EctoSQL.TestAdapter 31 | end 32 | 33 | def config do 34 | [priv: "tmp/#{inspect(Ecto.Migrations)}", otp_app: :ecto_sql] 35 | end 36 | end 37 | 38 | test "displays the up and down status for the default repo" do 39 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 40 | 41 | migrations = fn _, _, _ -> 42 | [ 43 | {:up, 0, "up_migration_0"}, 44 | {:up, 20160000000001, "up_migration_1"}, 45 | {:up, 20160000000002, "up_migration_2"}, 46 | {:up, 20160000000003, "up_migration_3"}, 47 | {:down, 20160000000004, "down_migration_1"}, 48 | {:down, 20160000000005, "down_migration_2"} 49 | ] 50 | end 51 | 52 | expected_output = """ 53 | 54 | Repo: Mix.Tasks.Ecto.MigrationsTest.Repo 55 | 56 | Status Migration ID Migration Name 57 | -------------------------------------------------- 58 | up 0 up_migration_0 59 | up 20160000000001 up_migration_1 60 | up 20160000000002 up_migration_2 61 | up 20160000000003 up_migration_3 62 | down 20160000000004 down_migration_1 63 | down 20160000000005 down_migration_2 64 | """ 65 | run [], migrations, fn i -> assert(i == expected_output) end 66 | end 67 | 68 | test "migrations displays the up and down status for any given repo" do 69 | migrations = fn _, _, _ -> 70 | [ 71 | {:up, 20160000000001, "up_migration_1"}, 72 | {:down, 20160000000002, "down_migration_1"} 73 | ] 74 | end 75 | 76 | expected_output = """ 77 | 78 | Repo: Mix.Tasks.Ecto.MigrationsTest.Repo 79 | 80 | Status Migration ID Migration Name 81 | -------------------------------------------------- 82 | up 20160000000001 up_migration_1 83 | down 20160000000002 down_migration_1 84 | """ 85 | 86 | run ["-r", to_string(Repo)], migrations, fn i -> assert(i == expected_output) end 87 | end 88 | 89 | test "does not run from _build" do 90 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 91 | 92 | migrations = fn repo, [path], _opts -> 93 | assert repo == Repo 94 | refute path =~ ~r/_build/ 95 | [] 96 | end 97 | 98 | run [], migrations, fn _ -> :ok end 99 | end 100 | 101 | test "uses custom paths" do 102 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 103 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 104 | File.mkdir_p!(path1) 105 | File.mkdir_p!(path2) 106 | 107 | run ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 108 | fn Repo, [^path1, ^path2], _opts -> [] end, 109 | fn _ -> :ok end 110 | end 111 | end 112 | -------------------------------------------------------------------------------- /integration_test/pg/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | import ExUnit.CaptureLog 6 | 7 | @moduletag :capture_log 8 | @base_migration 3_000_000 9 | 10 | defmodule DuplicateTableMigration do 11 | use Ecto.Migration 12 | 13 | def change do 14 | create_if_not_exists table(:duplicate_table) 15 | create_if_not_exists table(:duplicate_table) 16 | end 17 | end 18 | 19 | defmodule NormalMigration do 20 | use Ecto.Migration 21 | 22 | def change do 23 | create_if_not_exists table(:log_mode_table) 24 | end 25 | end 26 | 27 | test "logs Postgres notice messages" do 28 | log = 29 | capture_log(fn -> 30 | num = @base_migration + System.unique_integer([:positive]) 31 | Ecto.Migrator.up(PoolRepo, num, DuplicateTableMigration, log: false) 32 | end) 33 | 34 | assert log =~ ~s(relation "duplicate_table" already exists, skipping) 35 | end 36 | 37 | describe "Migrator" do 38 | @get_lock_command ~s(LOCK TABLE "schema_migrations" IN SHARE UPDATE EXCLUSIVE MODE) 39 | @create_table_sql ~s(CREATE TABLE IF NOT EXISTS "log_mode_table") 40 | @create_table_log "create table if not exists log_mode_table" 41 | @drop_table_sql ~s(DROP TABLE IF EXISTS "log_mode_table") 42 | @drop_table_log "drop table if exists log_mode_table" 43 | @version_insert ~s(INSERT INTO "schema_migrations") 44 | @version_delete ~s(DELETE FROM "schema_migrations") 45 | 46 | test "logs locking and transaction commands" do 47 | num = @base_migration + System.unique_integer([:positive]) 48 | up_log = 49 | capture_log(fn -> 50 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 51 | end) 52 | 53 | assert Regex.scan(~r/(begin \[\])/, up_log) |> length() == 2 54 | assert up_log =~ @get_lock_command 55 | assert up_log =~ @create_table_sql 56 | assert up_log =~ @create_table_log 57 | assert up_log =~ @version_insert 58 | assert Regex.scan(~r/(commit \[\])/, up_log) |> length() == 2 59 | 60 | down_log = 61 | capture_log(fn -> 62 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log_migrator_sql: :info, log_migrations_sql: :info, log: :info) 63 | end) 64 | 65 | assert down_log =~ "begin []" 66 | assert down_log =~ @get_lock_command 67 | assert down_log =~ @drop_table_sql 68 | assert down_log =~ @drop_table_log 69 | assert down_log =~ @version_delete 70 | assert down_log =~ "commit []" 71 | end 72 | 73 | test "does not log sql when log is default" do 74 | num = @base_migration + System.unique_integer([:positive]) 75 | up_log = 76 | capture_log(fn -> 77 | Ecto.Migrator.up(PoolRepo, num, NormalMigration, log: :info) 78 | end) 79 | 80 | refute up_log =~ "begin []" 81 | refute up_log =~ @get_lock_command 82 | refute up_log =~ @create_table_sql 83 | assert up_log =~ @create_table_log 84 | refute up_log =~ @version_insert 85 | refute up_log =~ "commit []" 86 | 87 | down_log = 88 | capture_log(fn -> 89 | Ecto.Migrator.down(PoolRepo, num, NormalMigration, log: :info) 90 | end) 91 | 92 | refute down_log =~ "begin []" 93 | refute down_log =~ @get_lock_command 94 | refute down_log =~ @drop_table_sql 95 | assert down_log =~ @drop_table_log 96 | refute down_log =~ @version_delete 97 | refute down_log =~ "commit []" 98 | end 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /integration_test/myxql/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | # Configure Ecto for support and tests 4 | Application.put_env(:ecto, :primary_key_type, :id) 5 | Application.put_env(:ecto, :async_integration_tests, false) 6 | Application.put_env(:ecto_sql, :lock_for_update, "FOR UPDATE") 7 | 8 | Code.require_file "../support/repo.exs", __DIR__ 9 | 10 | # Configure MySQL connection 11 | Application.put_env(:ecto_sql, :mysql_test_url, 12 | "ecto://" <> (System.get_env("MYSQL_URL") || "root@127.0.0.1") 13 | ) 14 | 15 | # Pool repo for async, safe tests 16 | alias Ecto.Integration.TestRepo 17 | 18 | Application.put_env(:ecto_sql, TestRepo, 19 | url: Application.get_env(:ecto_sql, :mysql_test_url) <> "/ecto_test", 20 | pool: Ecto.Adapters.SQL.Sandbox, 21 | show_sensitive_data_on_connection_error: true 22 | ) 23 | 24 | defmodule Ecto.Integration.TestRepo do 25 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL 26 | 27 | def create_prefix(prefix) do 28 | "create database #{prefix}" 29 | end 30 | 31 | def drop_prefix(prefix) do 32 | "drop database #{prefix}" 33 | end 34 | 35 | def uuid do 36 | Ecto.UUID 37 | end 38 | end 39 | 40 | # Pool repo for non-async tests 41 | alias Ecto.Integration.PoolRepo 42 | 43 | Application.put_env(:ecto_sql, PoolRepo, 44 | adapter: Ecto.Adapters.MyXQL, 45 | url: Application.get_env(:ecto_sql, :mysql_test_url) <> "/ecto_test", 46 | pool_size: 10, 47 | show_sensitive_data_on_connection_error: true 48 | ) 49 | 50 | defmodule Ecto.Integration.PoolRepo do 51 | use Ecto.Integration.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL 52 | end 53 | 54 | # Load support files 55 | ecto = Mix.Project.deps_paths()[:ecto] 56 | Code.require_file "#{ecto}/integration_test/support/schemas.exs", __DIR__ 57 | Code.require_file "../support/migration.exs", __DIR__ 58 | 59 | defmodule Ecto.Integration.Case do 60 | use ExUnit.CaseTemplate 61 | 62 | setup do 63 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) 64 | end 65 | end 66 | 67 | {:ok, _} = Ecto.Adapters.MyXQL.ensure_all_started(TestRepo.config(), :temporary) 68 | 69 | # Load up the repository, start it, and run migrations 70 | _ = Ecto.Adapters.MyXQL.storage_down(TestRepo.config()) 71 | :ok = Ecto.Adapters.MyXQL.storage_up(TestRepo.config()) 72 | 73 | {:ok, _pid} = TestRepo.start_link() 74 | {:ok, _pid} = PoolRepo.start_link() 75 | 76 | %{rows: [[version]]} = TestRepo.query!("SELECT @@version", []) 77 | 78 | version = 79 | case Regex.named_captures(~r/(?[0-9]*)(\.(?[0-9]*))?.*/, version) do 80 | %{"major" => major, "minor" => minor} -> "#{major}.#{minor}.0" 81 | %{"major" => major} -> "#{major}.0.0" 82 | _other -> version 83 | end 84 | 85 | excludes = [ 86 | # MySQL does not have an array type 87 | :array_type, 88 | # The next two features rely on RETURNING, which MySQL does not support 89 | :read_after_writes, 90 | :returning, 91 | # Unsupported query features 92 | :aggregate_filters, 93 | :transaction_isolation, 94 | :with_conflict_target, 95 | # Unsupported migration features 96 | :create_index_if_not_exists, 97 | :add_column_if_not_exists, 98 | :remove_column_if_exists, 99 | # MySQL doesn't have a boolean type, so this ends up returning 0/1 100 | :map_boolean_in_expression, 101 | # MySQL doesn't support indexed parameters 102 | :placeholders 103 | ] 104 | 105 | if Version.match?(version, ">= 8.0.0") do 106 | ExUnit.configure(exclude: excludes) 107 | else 108 | ExUnit.configure(exclude: [:rename_column | excludes]) 109 | end 110 | 111 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false) 112 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 113 | Process.flag(:trap_exit, true) 114 | 115 | ExUnit.start() 116 | -------------------------------------------------------------------------------- /integration_test/pg/transaction_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.PGTransactionTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.PoolRepo 5 | alias Ecto.Integration.TestRepo 6 | alias Ecto.Integration.Post 7 | 8 | require Logger 9 | @timeout 500 10 | 11 | describe "aborts on corrupted transactions" do 12 | test "outside sandbox" do 13 | PoolRepo.transaction fn -> 14 | {:error, _} = PoolRepo.query("INVALID") 15 | end 16 | 17 | PoolRepo.transaction fn -> 18 | # This will taint the whole inner transaction 19 | {:error, _} = PoolRepo.query("INVALID") 20 | 21 | assert_raise Postgrex.Error, ~r/current transaction is aborted/, fn -> 22 | PoolRepo.insert(%Post{}, skip_transaction: true) 23 | end 24 | end 25 | end 26 | 27 | test "inside sandbox" do 28 | TestRepo.transaction fn -> 29 | {:error, _} = TestRepo.query("INVALID") 30 | end 31 | 32 | TestRepo.transaction fn -> 33 | # This will taint the whole inner transaction 34 | {:error, _} = TestRepo.query("INVALID") 35 | 36 | assert_raise Postgrex.Error, ~r/current transaction is aborted/, fn -> 37 | TestRepo.insert(%Post{}, skip_transaction: true) 38 | end 39 | end 40 | end 41 | end 42 | 43 | describe "deadlocks" do 44 | test "reset worker" do 45 | tx1 = self() 46 | 47 | %Task{pid: tx2} = tx2_task = Task.async fn -> 48 | PoolRepo.transaction fn -> 49 | acquire_deadlock(tx1, [2, 1]) 50 | end 51 | end 52 | 53 | tx1_result = PoolRepo.transaction fn -> 54 | acquire_deadlock(tx2, [1, 2]) 55 | end 56 | 57 | tx2_result = Task.await(tx2_task) 58 | assert Enum.sort([tx1_result, tx2_result]) == [{:error, :deadlocked}, {:ok, :acquired}] 59 | end 60 | end 61 | 62 | defp acquire_deadlock(other_tx, [key1, key2] = _locks) do 63 | pg_advisory_xact_lock(key1) # acquire first lock 64 | Logger.debug "#{inspect self()} acquired #{key1}" 65 | send other_tx, :acquired1 # signal other_tx that we acquired lock on key1 66 | assert_receive :acquired1, @timeout # wait for other_tx to signal us that it acquired lock on its key1 67 | Logger.debug "#{inspect self()} continuing" 68 | 69 | try do 70 | Logger.debug "#{inspect self()} acquiring #{key2}" 71 | pg_advisory_xact_lock(key2) # try to acquire lock on key2 (might deadlock) 72 | rescue 73 | err in [Postgrex.Error] -> 74 | Logger.debug "#{inspect self()} got killed by deadlock detection" 75 | assert %Postgrex.Error{postgres: %{code: :deadlock_detected}} = err 76 | 77 | assert_tx_aborted() 78 | 79 | # Trapping a transaction should still be fine. 80 | try do 81 | Process.flag(:trap_exit, true) 82 | PoolRepo.transaction fn -> :ok end 83 | catch 84 | class, msg -> 85 | Logger.debug inspect([class, msg]) 86 | after 87 | Process.flag(:trap_exit, false) 88 | end 89 | 90 | # Even aborted transactions can be rolled back. 91 | PoolRepo.rollback(:deadlocked) 92 | else 93 | _ -> 94 | Logger.debug "#{inspect self()} acquired #{key2}" 95 | :acquired 96 | end 97 | end 98 | 99 | defp assert_tx_aborted do 100 | try do 101 | PoolRepo.query!("SELECT 1"); 102 | rescue 103 | err in [Postgrex.Error] -> 104 | # current transaction is aborted, commands ignored until end of transaction block 105 | assert %Postgrex.Error{postgres: %{code: :in_failed_sql_transaction}} = err 106 | else 107 | _ -> flunk "transaction should be aborted" 108 | end 109 | end 110 | 111 | defp pg_advisory_xact_lock(key) do 112 | %{rows: [[:void]]} = 113 | PoolRepo.query!("SELECT pg_advisory_xact_lock($1);", [key]) 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /test/test_repo.exs: -------------------------------------------------------------------------------- 1 | defmodule MigrationsAgent do 2 | use Agent 3 | 4 | def start_link(versions) do 5 | Agent.start_link(fn -> versions end, name: __MODULE__) 6 | end 7 | 8 | def get do 9 | Agent.get(__MODULE__, & &1) 10 | end 11 | 12 | def up(version, opts) do 13 | Agent.update(__MODULE__, &[{version, opts[:prefix]} | &1]) 14 | end 15 | 16 | def down(version, opts) do 17 | Agent.update(__MODULE__, &List.delete(&1, {version, opts[:prefix]})) 18 | end 19 | end 20 | 21 | defmodule EctoSQL.TestAdapter do 22 | @behaviour Ecto.Adapter 23 | @behaviour Ecto.Adapter.Queryable 24 | @behaviour Ecto.Adapter.Schema 25 | @behaviour Ecto.Adapter.Transaction 26 | @behaviour Ecto.Adapter.Migration 27 | 28 | defmacro __before_compile__(_opts), do: :ok 29 | def ensure_all_started(_, _), do: {:ok, []} 30 | 31 | def init(_opts) do 32 | child_spec = Supervisor.child_spec {Task, fn -> :timer.sleep(:infinity) end}, [] 33 | {:ok, child_spec, %{meta: :meta}} 34 | end 35 | 36 | def checkout(_, _, _), do: raise "not implemented" 37 | def checked_out?(_), do: raise "not implemented" 38 | def delete(_, _, _, _), do: raise "not implemented" 39 | def insert_all(_, _, _, _, _, _, _, _), do: raise "not implemented" 40 | def rollback(_, _), do: raise "not implemented" 41 | def stream(_, _, _, _, _), do: raise "not implemented" 42 | def update(_, _, _, _, _, _), do: raise "not implemented" 43 | 44 | ## Types 45 | 46 | def loaders(_primitive, type), do: [type] 47 | def dumpers(_primitive, type), do: [type] 48 | def autogenerate(_), do: nil 49 | 50 | ## Queryable 51 | 52 | def prepare(operation, query), do: {:nocache, {operation, query}} 53 | 54 | # Migration emulation 55 | 56 | def execute(_, _, {:nocache, {:all, %{from: %{source: {"schema_migrations", _}}}}}, _, opts) do 57 | true = opts[:schema_migration] 58 | versions = MigrationsAgent.get() 59 | {length(versions), Enum.map(versions, &[elem(&1, 0)])} 60 | end 61 | 62 | def execute(_, _meta, {:nocache, {:delete_all, %{from: %{source: {"schema_migrations", _}}}}}, [version], opts) do 63 | true = opts[:schema_migration] 64 | MigrationsAgent.down(version, opts) 65 | {1, nil} 66 | end 67 | 68 | def insert(_, %{source: "schema_migrations"}, val, _, _, opts) do 69 | true = opts[:schema_migration] 70 | version = Keyword.fetch!(val, :version) 71 | MigrationsAgent.up(version, opts) 72 | {:ok, []} 73 | end 74 | 75 | def in_transaction?(_), do: Process.get(:in_transaction?) || false 76 | 77 | def transaction(mod, _opts, fun) do 78 | Process.put(:in_transaction?, true) 79 | send test_process(), {:transaction, mod, fun} 80 | {:ok, fun.()} 81 | after 82 | Process.put(:in_transaction?, false) 83 | end 84 | 85 | ## Migrations 86 | 87 | def lock_for_migrations(mod, opts, fun) do 88 | send test_process(), {:lock_for_migrations, mod, fun, opts} 89 | fun.() 90 | end 91 | 92 | def execute_ddl(_, command, _) do 93 | Process.put(:last_command, command) 94 | {:ok, []} 95 | end 96 | 97 | def supports_ddl_transaction? do 98 | get_config(:supports_ddl_transaction?, false) 99 | end 100 | 101 | defp test_process do 102 | get_config(:test_process, self()) 103 | end 104 | 105 | defp get_config(name, default) do 106 | :ecto_sql 107 | |> Application.get_env(__MODULE__, []) 108 | |> Keyword.get(name, default) 109 | end 110 | end 111 | 112 | defmodule EctoSQL.TestRepo do 113 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 114 | 115 | def default_options(_operation) do 116 | Process.get(:repo_default_options, []) 117 | end 118 | end 119 | 120 | defmodule EctoSQL.MigrationTestRepo do 121 | use Ecto.Repo, otp_app: :ecto_sql, adapter: EctoSQL.TestAdapter 122 | end 123 | 124 | EctoSQL.TestRepo.start_link() 125 | EctoSQL.TestRepo.start_link(name: :tenant_db) 126 | EctoSQL.MigrationTestRepo.start_link() 127 | -------------------------------------------------------------------------------- /integration_test/support/migration.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.Migration do 2 | use Ecto.Migration 3 | 4 | def change do 5 | # IO.puts "TESTING MIGRATION LOCK" 6 | # Process.sleep(10000) 7 | 8 | create table(:users, comment: "users table") do 9 | add :name, :string, comment: "name column" 10 | add :custom_id, :uuid 11 | timestamps() 12 | end 13 | 14 | create table(:posts) do 15 | add :title, :string, size: 100 16 | add :counter, :integer 17 | add :blob, :binary 18 | add :bid, :binary_id 19 | add :uuid, :uuid 20 | add :meta, :map 21 | add :links, {:map, :string} 22 | add :intensities, {:map, :float} 23 | add :public, :boolean 24 | add :cost, :decimal, precision: 2, scale: 1 25 | add :visits, :integer 26 | add :wrapped_visits, :integer 27 | add :intensity, :float 28 | add :author_id, :integer 29 | add :posted, :date 30 | timestamps(null: true) 31 | end 32 | 33 | create table(:posts_users, primary_key: false) do 34 | add :post_id, references(:posts) 35 | add :user_id, references(:users) 36 | end 37 | 38 | create table(:posts_users_pk) do 39 | add :post_id, references(:posts) 40 | add :user_id, references(:users) 41 | timestamps() 42 | end 43 | 44 | # Add a unique index on uuid. We use this 45 | # to verify the behaviour that the index 46 | # only matters if the UUID column is not NULL. 47 | create unique_index(:posts, [:uuid], comment: "posts index") 48 | 49 | create table(:permalinks) do 50 | add :uniform_resource_locator, :string 51 | add :title, :string 52 | add :post_id, references(:posts) 53 | add :user_id, references(:users) 54 | end 55 | 56 | create unique_index(:permalinks, [:post_id]) 57 | create unique_index(:permalinks, [:uniform_resource_locator]) 58 | 59 | create table(:comments) do 60 | add :text, :string, size: 100 61 | add :lock_version, :integer, default: 1 62 | add :post_id, references(:posts) 63 | add :author_id, references(:users) 64 | end 65 | 66 | create table(:customs, primary_key: false) do 67 | add :bid, :binary_id, primary_key: true 68 | add :uuid, :uuid 69 | end 70 | 71 | create unique_index(:customs, [:uuid]) 72 | 73 | create table(:customs_customs, primary_key: false) do 74 | add :custom_id1, references(:customs, column: :bid, type: :binary_id) 75 | add :custom_id2, references(:customs, column: :bid, type: :binary_id) 76 | end 77 | 78 | create table(:barebones) do 79 | add :num, :integer 80 | end 81 | 82 | create table(:transactions) do 83 | add :num, :integer 84 | end 85 | 86 | create table(:lock_counters) do 87 | add :count, :integer 88 | end 89 | 90 | create table(:orders) do 91 | add :item, :map 92 | add :items, :map 93 | add :meta, :map 94 | add :permalink_id, references(:permalinks) 95 | end 96 | 97 | unless :array_type in ExUnit.configuration()[:exclude] do 98 | create table(:tags) do 99 | add :ints, {:array, :integer} 100 | add :uuids, {:array, :uuid}, default: [] 101 | add :items, {:array, :map} 102 | end 103 | end 104 | 105 | create table(:composite_pk, primary_key: false) do 106 | add :a, :integer, primary_key: true 107 | add :b, :integer, primary_key: true 108 | add :name, :string 109 | end 110 | 111 | create table(:corrupted_pk, primary_key: false) do 112 | add :a, :string 113 | end 114 | 115 | create table(:posts_users_composite_pk) do 116 | add :post_id, references(:posts), primary_key: true 117 | add :user_id, references(:users), primary_key: true 118 | timestamps() 119 | end 120 | 121 | create unique_index(:posts_users_composite_pk, [:post_id, :user_id]) 122 | 123 | create table(:usecs) do 124 | add :naive_datetime_usec, :naive_datetime_usec 125 | add :utc_datetime_usec, :utc_datetime_usec 126 | end 127 | 128 | create table(:bits) do 129 | add :bit, :bit 130 | end 131 | end 132 | end 133 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.load.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Load do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Loads previously dumped database structure" 7 | @default_opts [force: false, quiet: false] 8 | 9 | @aliases [ 10 | d: :dump_path, 11 | f: :force, 12 | q: :quiet, 13 | r: :repo 14 | ] 15 | 16 | @switches [ 17 | dump_path: :string, 18 | force: :boolean, 19 | quiet: :boolean, 20 | repo: [:string, :keep], 21 | no_compile: :boolean, 22 | no_deps_check: :boolean, 23 | skip_if_loaded: :boolean 24 | ] 25 | 26 | @moduledoc """ 27 | Loads the current environment's database structure for the 28 | given repository from a previously dumped structure file. 29 | 30 | The repository must be set under `:ecto_repos` in the 31 | current app configuration or given via the `-r` option. 32 | 33 | This task needs some shell utility to be present on the machine 34 | running the task. 35 | 36 | Database | Utility needed 37 | :--------- | :------------- 38 | PostgreSQL | psql 39 | MySQL | mysql 40 | 41 | ## Example 42 | 43 | $ mix ecto.load 44 | 45 | ## Command line options 46 | 47 | * `-r`, `--repo` - the repo to load the structure info into 48 | * `-d`, `--dump-path` - the path of the dump file to load from 49 | * `-q`, `--quiet` - run the command quietly 50 | * `-f`, `--force` - do not ask for confirmation when loading data. 51 | Configuration is asked only when `:start_permanent` is set to true 52 | (typically in production) 53 | * `--no-compile` - does not compile applications before loading 54 | * `--no-deps-check` - does not check dependencies before loading 55 | * `--skip-if-loaded` - does not load the dump file if the repo has the migrations table up 56 | """ 57 | 58 | @impl true 59 | def run(args, table_exists? \\ &Ecto.Adapters.SQL.table_exists?/2) do 60 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 61 | opts = Keyword.merge(@default_opts, opts) 62 | 63 | Enum.each(parse_repo(args), fn repo -> 64 | ensure_repo(repo, args) 65 | 66 | ensure_implements( 67 | repo.__adapter__(), 68 | Ecto.Adapter.Structure, 69 | "load structure for #{inspect(repo)}" 70 | ) 71 | 72 | {migration_repo, source} = Ecto.Migration.SchemaMigration.get_repo_and_source(repo, repo.config()) 73 | {:ok, loaded?, _} = Ecto.Migrator.with_repo(migration_repo, &table_exists?.(&1, source)) 74 | 75 | for repo <- Enum.uniq([repo, migration_repo]) do 76 | cond do 77 | loaded? and opts[:skip_if_loaded] -> 78 | :ok 79 | 80 | (skip_safety_warnings?() and not loaded?) or opts[:force] or confirm_load(repo, loaded?) -> 81 | load_structure(repo, opts) 82 | 83 | true -> 84 | :ok 85 | end 86 | end 87 | end) 88 | end 89 | 90 | defp skip_safety_warnings? do 91 | Mix.Project.config()[:start_permanent] != true 92 | end 93 | 94 | defp confirm_load(repo, false) do 95 | Mix.shell().yes?( 96 | "Are you sure you want to load a new structure for #{inspect(repo)}? Any existing data in this repo may be lost." 97 | ) 98 | end 99 | 100 | defp confirm_load(repo, true) do 101 | Mix.shell().yes?(""" 102 | It looks like a structure was already loaded for #{inspect(repo)}. Any attempt to load it again might fail. 103 | Are you sure you want to proceed? 104 | """) 105 | end 106 | 107 | defp load_structure(repo, opts) do 108 | config = Keyword.merge(repo.config(), opts) 109 | 110 | case repo.__adapter__().structure_load(source_repo_priv(repo), config) do 111 | {:ok, location} -> 112 | unless opts[:quiet] do 113 | Mix.shell().info "The structure for #{inspect repo} has been loaded from #{location}" 114 | end 115 | {:error, term} when is_binary(term) -> 116 | Mix.raise "The structure for #{inspect repo} couldn't be loaded: #{term}" 117 | {:error, term} -> 118 | Mix.raise "The structure for #{inspect repo} couldn't be loaded: #{inspect term}" 119 | end 120 | end 121 | end 122 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.migrate_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.MigrateTest do 2 | use ExUnit.Case 3 | 4 | import Mix.Tasks.Ecto.Migrate, only: [run: 2] 5 | import Support.FileHelpers 6 | 7 | @migrations_path Path.join([tmp_path(), inspect(Ecto.Migrate), "migrations"]) 8 | 9 | setup do 10 | File.mkdir_p!(@migrations_path) 11 | :ok 12 | end 13 | 14 | defmodule Repo do 15 | def start_link(_) do 16 | Process.put(:started, true) 17 | Task.start_link fn -> 18 | Process.flag(:trap_exit, true) 19 | receive do 20 | {:EXIT, _, :normal} -> :ok 21 | end 22 | end 23 | end 24 | 25 | def stop do 26 | :ok 27 | end 28 | 29 | def __adapter__ do 30 | EctoSQL.TestAdapter 31 | end 32 | 33 | def config do 34 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 35 | end 36 | end 37 | 38 | defmodule StartedRepo do 39 | def start_link(_) do 40 | Process.put(:already_started, true) 41 | {:error, {:already_started, :whatever}} 42 | end 43 | 44 | def stop do 45 | raise "should not be called" 46 | end 47 | 48 | def __adapter__ do 49 | EctoSQL.TestAdapter 50 | end 51 | 52 | def config do 53 | [priv: "tmp/#{inspect(Ecto.Migrate)}", otp_app: :ecto_sql] 54 | end 55 | end 56 | 57 | test "runs the migrator with app_repo config" do 58 | Application.put_env(:ecto_sql, :ecto_repos, [Repo]) 59 | run [], fn _, _, _, _ -> 60 | Process.put(:migrated, true) 61 | [] 62 | end 63 | assert Process.get(:migrated) 64 | assert Process.get(:started) 65 | after 66 | Application.delete_env(:ecto, :ecto_repos) 67 | end 68 | 69 | test "runs the migrator after starting repo" do 70 | run ["-r", to_string(Repo)], fn _, _, _, _ -> 71 | Process.put(:migrated, true) 72 | [] 73 | end 74 | assert Process.get(:migrated) 75 | assert Process.get(:started) 76 | end 77 | 78 | test "runs the migrator with the already started repo" do 79 | run ["-r", to_string(StartedRepo)], fn _, _, _, _ -> 80 | Process.put(:migrated, true) 81 | [] 82 | end 83 | assert Process.get(:migrated) 84 | assert Process.get(:already_started) 85 | end 86 | 87 | test "runs the migrator with two repos" do 88 | run ["-r", to_string(Repo), "-r", to_string(StartedRepo)], fn _, _, _, _ -> 89 | Process.put(:migrated, true) 90 | [] 91 | end 92 | assert Process.get(:migrated) 93 | assert Process.get(:started) 94 | assert Process.get(:already_started) 95 | end 96 | 97 | test "runs the migrator yielding the repository and migrations path" do 98 | run ["-r", to_string(Repo), "--quiet", "--prefix", "foo"], fn repo, [path], direction, opts -> 99 | assert repo == Repo 100 | refute path =~ ~r/_build/ 101 | assert direction == :up 102 | assert opts[:all] == true 103 | assert opts[:log] == false 104 | assert opts[:prefix] == "foo" 105 | [] 106 | end 107 | assert Process.get(:started) 108 | end 109 | 110 | test "runs the migrator with --step" do 111 | run ["-r", to_string(Repo), "-n", "1"], fn repo, [path], direction, opts -> 112 | assert repo == Repo 113 | refute path =~ ~r/_build/ 114 | assert direction == :up 115 | assert opts == [repo: "Elixir.Mix.Tasks.Ecto.MigrateTest.Repo", step: 1] 116 | [] 117 | end 118 | assert Process.get(:started) 119 | end 120 | 121 | test "raises when migrations path does not exist" do 122 | File.rm_rf!(@migrations_path) 123 | assert_raise Mix.Error, fn -> 124 | run ["-r", to_string(Repo)], fn _, _, _, _ -> [] end 125 | end 126 | assert !Process.get(:started) 127 | end 128 | 129 | test "uses custom paths" do 130 | path1 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_1"]) 131 | path2 = Path.join([unquote(tmp_path()), inspect(Ecto.Migrate), "migrations_2"]) 132 | File.mkdir_p!(path1) 133 | File.mkdir_p!(path2) 134 | 135 | run ["-r", to_string(Repo), "--migrations-path", path1, "--migrations-path", path2], 136 | fn Repo, [^path1, ^path2], _, _ -> [] end 137 | end 138 | end 139 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.gen.migration.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Gen.Migration do 2 | use Mix.Task 3 | 4 | import Macro, only: [camelize: 1, underscore: 1] 5 | import Mix.Generator 6 | import Mix.Ecto 7 | import Mix.EctoSQL 8 | 9 | @shortdoc "Generates a new migration for the repo" 10 | 11 | @aliases [ 12 | r: :repo 13 | ] 14 | 15 | @switches [ 16 | change: :string, 17 | repo: [:string, :keep], 18 | no_compile: :boolean, 19 | no_deps_check: :boolean, 20 | migrations_path: :string 21 | ] 22 | 23 | @moduledoc """ 24 | Generates a migration. 25 | 26 | The repository must be set under `:ecto_repos` in the 27 | current app configuration or given via the `-r` option. 28 | 29 | ## Examples 30 | 31 | $ mix ecto.gen.migration add_posts_table 32 | $ mix ecto.gen.migration add_posts_table -r Custom.Repo 33 | 34 | The generated migration filename will be prefixed with the current 35 | timestamp in UTC which is used for versioning and ordering. 36 | 37 | By default, the migration will be generated to the 38 | "priv/YOUR_REPO/migrations" directory of the current application 39 | but it can be configured to be any subdirectory of `priv` by 40 | specifying the `:priv` key under the repository configuration. 41 | 42 | This generator will automatically open the generated file if 43 | you have `ECTO_EDITOR` set in your environment variable. 44 | 45 | ## Command line options 46 | 47 | * `-r`, `--repo` - the repo to generate migration for 48 | * `--no-compile` - does not compile applications before running 49 | * `--no-deps-check` - does not check dependencies before running 50 | * `--migrations-path` - the path to run the migrations from, defaults to `priv/repo/migrations` 51 | 52 | ## Configuration 53 | 54 | If the current app configuration specifies a custom migration module 55 | the generated migration code will use that rather than the default 56 | `Ecto.Migration`: 57 | 58 | config :ecto_sql, migration_module: MyApplication.CustomMigrationModule 59 | 60 | """ 61 | 62 | @impl true 63 | def run(args) do 64 | repos = parse_repo(args) 65 | 66 | Enum.map repos, fn repo -> 67 | case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do 68 | {opts, [name]} -> 69 | ensure_repo(repo, args) 70 | path = opts[:migrations_path] || Path.join(source_repo_priv(repo), "migrations") 71 | base_name = "#{underscore(name)}.exs" 72 | file = Path.join(path, "#{timestamp()}_#{base_name}") 73 | unless File.dir?(path), do: create_directory path 74 | 75 | fuzzy_path = Path.join(path, "*_#{base_name}") 76 | if Path.wildcard(fuzzy_path) != [] do 77 | Mix.raise "migration can't be created, there is already a migration file with name #{name}." 78 | end 79 | 80 | # The :change option may be used by other tasks but not the CLI 81 | assigns = [mod: Module.concat([repo, Migrations, camelize(name)]), change: opts[:change]] 82 | create_file file, migration_template(assigns) 83 | 84 | if open?(file) and Mix.shell().yes?("Do you want to run this migration?") do 85 | Mix.Task.run "ecto.migrate", ["-r", inspect(repo), "--migrations-path", path] 86 | end 87 | 88 | file 89 | 90 | {_, _} -> 91 | Mix.raise "expected ecto.gen.migration to receive the migration file name, " <> 92 | "got: #{inspect Enum.join(args, " ")}" 93 | end 94 | end 95 | end 96 | 97 | defp timestamp do 98 | {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() 99 | "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" 100 | end 101 | 102 | defp pad(i) when i < 10, do: <> 103 | defp pad(i), do: to_string(i) 104 | 105 | defp migration_module do 106 | case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do 107 | migration_module when is_atom(migration_module) -> migration_module 108 | other -> Mix.raise "Expected :migration_module to be a module, got: #{inspect(other)}" 109 | end 110 | end 111 | 112 | embed_template :migration, """ 113 | defmodule <%= inspect @mod %> do 114 | use <%= inspect migration_module() %> 115 | 116 | def change do 117 | <%= @change %> 118 | end 119 | end 120 | """ 121 | end 122 | -------------------------------------------------------------------------------- /lib/ecto/adapters/sql/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.SQL.Connection do 2 | @moduledoc """ 3 | Specifies the behaviour to be implemented by all SQL connections. 4 | """ 5 | 6 | @typedoc "The query name" 7 | @type name :: String.t 8 | 9 | @typedoc "The SQL statement" 10 | @type statement :: String.t 11 | 12 | @typedoc "The cached query which is a DBConnection Query" 13 | @type cached :: map 14 | 15 | @type connection :: DBConnection.conn() 16 | @type params :: [term] 17 | 18 | @doc """ 19 | Receives options and returns `DBConnection` supervisor child 20 | specification. 21 | """ 22 | @callback child_spec(options :: Keyword.t) :: {module, Keyword.t} 23 | 24 | @doc """ 25 | Prepares and executes the given query with `DBConnection`. 26 | """ 27 | @callback prepare_execute(connection, name, statement, params, options :: Keyword.t) :: 28 | {:ok, cached, term} | {:error, Exception.t} 29 | 30 | @doc """ 31 | Executes a cached query. 32 | """ 33 | @callback execute(connection, cached, params, options :: Keyword.t) :: 34 | {:ok, cached, term} | {:ok, term} | {:error | :reset, Exception.t} 35 | 36 | @doc """ 37 | Runs the given statement as query. 38 | """ 39 | @callback query(connection, statement, params, options :: Keyword.t) :: 40 | {:ok, term} | {:error, Exception.t} 41 | 42 | @doc """ 43 | Returns a stream that prepares and executes the given query with 44 | `DBConnection`. 45 | """ 46 | @callback stream(connection, statement, params, options :: Keyword.t) :: 47 | Enum.t 48 | 49 | @doc """ 50 | Receives the exception returned by `c:query/4`. 51 | 52 | The constraints are in the keyword list and must return the 53 | constraint type, like `:unique`, and the constraint name as 54 | a string, for example: 55 | 56 | [unique: "posts_title_index"] 57 | 58 | Must return an empty list if the error does not come 59 | from any constraint. 60 | """ 61 | @callback to_constraints(exception :: Exception.t, options :: Keyword.t) :: Keyword.t 62 | 63 | ## Queries 64 | 65 | @doc """ 66 | Receives a query and must return a SELECT query. 67 | """ 68 | @callback all(query :: Ecto.Query.t) :: iodata 69 | 70 | @doc """ 71 | Receives a query and values to update and must return an UPDATE query. 72 | """ 73 | @callback update_all(query :: Ecto.Query.t) :: iodata 74 | 75 | @doc """ 76 | Receives a query and must return a DELETE query. 77 | """ 78 | @callback delete_all(query :: Ecto.Query.t) :: iodata 79 | 80 | @doc """ 81 | Returns an INSERT for the given `rows` in `table` returning 82 | the given `returning`. 83 | """ 84 | @callback insert(prefix ::String.t, table :: String.t, 85 | header :: [atom], rows :: [[atom | nil]], 86 | on_conflict :: Ecto.Adapter.Schema.on_conflict, returning :: [atom], 87 | placeholders :: [term]) :: iodata 88 | 89 | @doc """ 90 | Returns an UPDATE for the given `fields` in `table` filtered by 91 | `filters` returning the given `returning`. 92 | """ 93 | @callback update(prefix :: String.t, table :: String.t, fields :: [atom], 94 | filters :: [atom], returning :: [atom]) :: iodata 95 | 96 | @doc """ 97 | Returns a DELETE for the `filters` returning the given `returning`. 98 | """ 99 | @callback delete(prefix :: String.t, table :: String.t, 100 | filters :: [atom], returning :: [atom]) :: iodata 101 | 102 | @doc """ 103 | Executes an EXPLAIN query or similar depending on the adapter to obtains statistics of the given query. 104 | 105 | Receives the `connection`, `query`, `params` for the query, 106 | and all `opts` including those related to the EXPLAIN statement and shared opts. 107 | 108 | Must execute the explain query and return the result. 109 | """ 110 | @callback explain_query(connection, query :: String.t, params :: Keyword.t, opts :: Keyword.t) :: 111 | {:ok, term} | {:error, Exception.t} 112 | 113 | ## DDL 114 | 115 | @doc """ 116 | Receives a DDL command and returns a query that executes it. 117 | """ 118 | @callback execute_ddl(command :: Ecto.Adapter.Migration.command) :: String.t | [iodata] 119 | 120 | @doc """ 121 | Receives a query result and returns a list of logs. 122 | """ 123 | @callback ddl_logs(result :: term) :: [{Logger.level, Logger.message, Logger.metadata}] 124 | 125 | @doc """ 126 | Returns a queryable to check if the given `table` exists. 127 | """ 128 | @callback table_exists_query(table :: String.t) :: {iodata, [term]} 129 | end 130 | -------------------------------------------------------------------------------- /integration_test/pg/constraints_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ConstraintsTest do 2 | use ExUnit.Case, async: true 3 | 4 | import Ecto.Migrator, only: [up: 4] 5 | alias Ecto.Integration.PoolRepo 6 | 7 | defmodule ConstraintMigration do 8 | use Ecto.Migration 9 | 10 | @table table(:constraints_test) 11 | 12 | def change do 13 | create @table do 14 | add :price, :integer 15 | add :from, :integer 16 | add :to, :integer 17 | end 18 | create constraint(@table.name, :cannot_overlap, exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|) 19 | create constraint(@table.name, "positive_price", check: "price > 0") 20 | end 21 | end 22 | 23 | defmodule Constraint do 24 | use Ecto.Integration.Schema 25 | 26 | schema "constraints_test" do 27 | field :price, :integer 28 | field :from, :integer 29 | field :to, :integer 30 | end 31 | end 32 | 33 | @base_migration 2_000_000 34 | 35 | setup_all do 36 | ExUnit.CaptureLog.capture_log(fn -> 37 | num = @base_migration + System.unique_integer([:positive]) 38 | up(PoolRepo, num, ConstraintMigration, log: false) 39 | end) 40 | 41 | :ok 42 | end 43 | 44 | test "exclusion constraint" do 45 | changeset = Ecto.Changeset.change(%Constraint{}, from: 0, to: 10) 46 | {:ok, _} = PoolRepo.insert(changeset) 47 | 48 | non_overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 11, to: 12) 49 | {:ok, _} = PoolRepo.insert(non_overlapping_changeset) 50 | 51 | overlapping_changeset = Ecto.Changeset.change(%Constraint{}, from: 9, to: 12) 52 | 53 | exception = 54 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 55 | PoolRepo.insert(overlapping_changeset) 56 | end 57 | assert exception.message =~ "cannot_overlap (exclusion_constraint)" 58 | assert exception.message =~ "The changeset has not defined any constraint." 59 | assert exception.message =~ "call `exclusion_constraint/3`" 60 | 61 | message = ~r/constraint error when attempting to insert struct/ 62 | exception = 63 | assert_raise Ecto.ConstraintError, message, fn -> 64 | overlapping_changeset 65 | |> Ecto.Changeset.exclusion_constraint(:from) 66 | |> PoolRepo.insert() 67 | end 68 | assert exception.message =~ "cannot_overlap (exclusion_constraint)" 69 | 70 | {:error, changeset} = 71 | overlapping_changeset 72 | |> Ecto.Changeset.exclusion_constraint(:from, name: :cannot_overlap) 73 | |> PoolRepo.insert() 74 | assert changeset.errors == [from: {"violates an exclusion constraint", [constraint: :exclusion, constraint_name: "cannot_overlap"]}] 75 | assert changeset.data.__meta__.state == :built 76 | end 77 | 78 | test "check constraint" do 79 | # When the changeset doesn't expect the db error 80 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 81 | exception = 82 | assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> 83 | PoolRepo.insert(changeset) 84 | end 85 | 86 | assert exception.message =~ "positive_price (check_constraint)" 87 | assert exception.message =~ "The changeset has not defined any constraint." 88 | assert exception.message =~ "call `check_constraint/3`" 89 | 90 | # When the changeset does expect the db error, but doesn't give a custom message 91 | {:error, changeset} = 92 | changeset 93 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price) 94 | |> PoolRepo.insert() 95 | assert changeset.errors == [price: {"is invalid", [constraint: :check, constraint_name: "positive_price"]}] 96 | assert changeset.data.__meta__.state == :built 97 | 98 | # When the changeset does expect the db error and gives a custom message 99 | changeset = Ecto.Changeset.change(%Constraint{}, price: -10) 100 | {:error, changeset} = 101 | changeset 102 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 103 | |> PoolRepo.insert() 104 | assert changeset.errors == [price: {"price must be greater than 0", [constraint: :check, constraint_name: "positive_price"]}] 105 | assert changeset.data.__meta__.state == :built 106 | 107 | # When the change does not violate the check constraint 108 | changeset = Ecto.Changeset.change(%Constraint{}, price: 10, from: 100, to: 200) 109 | {:ok, changeset} = 110 | changeset 111 | |> Ecto.Changeset.check_constraint(:price, name: :positive_price, message: "price must be greater than 0") 112 | |> PoolRepo.insert() 113 | assert is_integer(changeset.id) 114 | end 115 | end 116 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.rollback.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Rollback do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Rolls back the repository migrations" 7 | 8 | @aliases [ 9 | r: :repo, 10 | n: :step 11 | ] 12 | 13 | @switches [ 14 | all: :boolean, 15 | step: :integer, 16 | to: :integer, 17 | quiet: :boolean, 18 | prefix: :string, 19 | pool_size: :integer, 20 | log_sql: :boolean, 21 | log_migrations_sql: :boolean, 22 | log_migrator_sql: :boolean, 23 | repo: [:keep, :string], 24 | no_compile: :boolean, 25 | no_deps_check: :boolean, 26 | migrations_path: :keep 27 | ] 28 | 29 | @moduledoc """ 30 | Reverts applied migrations in the given repository. 31 | 32 | Migrations are expected at "priv/YOUR_REPO/migrations" directory 33 | of the current application, where "YOUR_REPO" is the last segment 34 | in your repository name. For example, the repository `MyApp.Repo` 35 | will use "priv/repo/migrations". The repository `Whatever.MyRepo` 36 | will use "priv/my_repo/migrations". 37 | 38 | You can configure a repository to use another directory by specifying 39 | the `:priv` key under the repository configuration. The "migrations" 40 | part will be automatically appended to it. For instance, to use 41 | "priv/custom_repo/migrations": 42 | 43 | config :my_app, MyApp.Repo, priv: "priv/custom_repo" 44 | 45 | This task rolls back the last applied migration by default. To roll 46 | back to a version number, supply `--to version_number`. To roll 47 | back a specific number of times, use `--step n`. To undo all applied 48 | migrations, provide `--all`. 49 | 50 | The repositories to rollback are the ones specified under the 51 | `:ecto_repos` option in the current app configuration. However, 52 | if the `-r` option is given, it replaces the `:ecto_repos` config. 53 | 54 | If a repository has not yet been started, one will be started outside 55 | your application supervision tree and shutdown afterwards. 56 | 57 | ## Examples 58 | 59 | $ mix ecto.rollback 60 | $ mix ecto.rollback -r Custom.Repo 61 | 62 | $ mix ecto.rollback -n 3 63 | $ mix ecto.rollback --step 3 64 | 65 | $ mix ecto.rollback --to 20080906120000 66 | 67 | ## Command line options 68 | 69 | * `--all` - run all pending migrations 70 | 71 | * `--log-migrations-sql` - log SQL generated by migration commands 72 | 73 | * `--log-migrator-sql` - log SQL generated by the migrator, such as 74 | transactions, table locks, etc 75 | 76 | * `--migrations-path` - the path to load the migrations from, defaults to 77 | `"priv/repo/migrations"`. This option may be given multiple times in which 78 | case the migrations are loaded from all the given directories and sorted 79 | as if they were in the same one 80 | 81 | * `--no-compile` - does not compile applications before migrating 82 | 83 | * `--no-deps-check` - does not check dependencies before migrating 84 | 85 | * `--pool-size` - the pool size if the repository is started 86 | only for the task (defaults to 2) 87 | 88 | * `--prefix` - the prefix to run migrations on 89 | 90 | * `--quiet` - do not log migration commands 91 | 92 | * `-r`, `--repo` - the repo to migrate 93 | 94 | * `--step`, `-n` - revert n migrations 95 | 96 | * `--strict-version-order` - abort when applying a migration with old 97 | timestamp (otherwise it emits a warning) 98 | 99 | * `--to` - revert all migrations down to and including version 100 | 101 | """ 102 | 103 | @impl true 104 | def run(args, migrator \\ &Ecto.Migrator.run/4) do 105 | repos = parse_repo(args) 106 | {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) 107 | 108 | opts = 109 | if opts[:to] || opts[:step] || opts[:all], 110 | do: opts, 111 | else: Keyword.put(opts, :step, 1) 112 | 113 | opts = 114 | if opts[:quiet], 115 | do: Keyword.merge(opts, [log: false, log_migrations_sql: false, log_migrator_sql: false]), 116 | else: opts 117 | 118 | # Start ecto_sql explicitly before as we don't need 119 | # to restart those apps if migrated. 120 | {:ok, _} = Application.ensure_all_started(:ecto_sql) 121 | 122 | for repo <- repos do 123 | ensure_repo(repo, args) 124 | paths = ensure_migrations_paths(repo, opts) 125 | pool = repo.config[:pool] 126 | 127 | fun = 128 | if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do 129 | &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :down, opts) end) 130 | else 131 | &migrator.(&1, paths, :down, opts) 132 | end 133 | 134 | case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do 135 | {:ok, _migrated, _apps} -> :ok 136 | {:error, error} -> Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" 137 | end 138 | end 139 | 140 | :ok 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /lib/mix/tasks/ecto.migrate.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.Migrate do 2 | use Mix.Task 3 | import Mix.Ecto 4 | import Mix.EctoSQL 5 | 6 | @shortdoc "Runs the repository migrations" 7 | 8 | @aliases [ 9 | n: :step, 10 | r: :repo 11 | ] 12 | 13 | @switches [ 14 | all: :boolean, 15 | step: :integer, 16 | to: :integer, 17 | quiet: :boolean, 18 | prefix: :string, 19 | pool_size: :integer, 20 | log_sql: :boolean, 21 | log_migrations_sql: :boolean, 22 | log_migrator_sql: :boolean, 23 | strict_version_order: :boolean, 24 | repo: [:keep, :string], 25 | no_compile: :boolean, 26 | no_deps_check: :boolean, 27 | migrations_path: :keep 28 | ] 29 | 30 | @moduledoc """ 31 | Runs the pending migrations for the given repository. 32 | 33 | Migrations are expected at "priv/YOUR_REPO/migrations" directory 34 | of the current application, where "YOUR_REPO" is the last segment 35 | in your repository name. For example, the repository `MyApp.Repo` 36 | will use "priv/repo/migrations". The repository `Whatever.MyRepo` 37 | will use "priv/my_repo/migrations". 38 | 39 | You can configure a repository to use another directory by specifying 40 | the `:priv` key under the repository configuration. The "migrations" 41 | part will be automatically appended to it. For instance, to use 42 | "priv/custom_repo/migrations": 43 | 44 | config :my_app, MyApp.Repo, priv: "priv/custom_repo" 45 | 46 | This task runs all pending migrations by default. To migrate up to a 47 | specific version number, supply `--to version_number`. To migrate a 48 | specific number of times, use `--step n`. 49 | 50 | The repositories to migrate are the ones specified under the 51 | `:ecto_repos` option in the current app configuration. However, 52 | if the `-r` option is given, it replaces the `:ecto_repos` config. 53 | 54 | Since Ecto tasks can only be executed once, if you need to migrate 55 | multiple repositories, set `:ecto_repos` accordingly or pass the `-r` 56 | flag multiple times. 57 | 58 | If a repository has not yet been started, one will be started outside 59 | your application supervision tree and shutdown afterwards. 60 | 61 | ## Examples 62 | 63 | $ mix ecto.migrate 64 | $ mix ecto.migrate -r Custom.Repo 65 | 66 | $ mix ecto.migrate -n 3 67 | $ mix ecto.migrate --step 3 68 | 69 | $ mix ecto.migrate --to 20080906120000 70 | 71 | ## Command line options 72 | 73 | * `--all` - run all pending migrations 74 | 75 | * `--log-migrations-sql` - log SQL generated by migration commands 76 | 77 | * `--log-migrator-sql` - log SQL generated by the migrator, such as 78 | transactions, table locks, etc 79 | 80 | * `--migrations-path` - the path to load the migrations from, defaults to 81 | `"priv/repo/migrations"`. This option may be given multiple times in which 82 | case the migrations are loaded from all the given directories and sorted 83 | as if they were in the same one 84 | 85 | * `--no-compile` - does not compile applications before migrating 86 | 87 | * `--no-deps-check` - does not check dependencies before migrating 88 | 89 | * `--pool-size` - the pool size if the repository is started 90 | only for the task (defaults to 2) 91 | 92 | * `--prefix` - the prefix to run migrations on 93 | 94 | * `--quiet` - do not log migration commands 95 | 96 | * `-r`, `--repo` - the repo to migrate 97 | 98 | * `--step`, `-n` - run n number of pending migrations 99 | 100 | * `--strict-version-order` - abort when applying a migration with old 101 | timestamp (otherwise it emits a warning) 102 | 103 | * `--to` - run all migrations up to and including version 104 | 105 | """ 106 | 107 | @impl true 108 | def run(args, migrator \\ &Ecto.Migrator.run/4) do 109 | repos = parse_repo(args) 110 | {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases 111 | 112 | opts = 113 | if opts[:to] || opts[:step] || opts[:all], 114 | do: opts, 115 | else: Keyword.put(opts, :all, true) 116 | 117 | opts = 118 | if opts[:quiet], 119 | do: Keyword.merge(opts, [log: false, log_migrations_sql: false, log_migrator_sql: false]), 120 | else: opts 121 | 122 | # Start ecto_sql explicitly before as we don't need 123 | # to restart those apps if migrated. 124 | {:ok, _} = Application.ensure_all_started(:ecto_sql) 125 | 126 | for repo <- repos do 127 | ensure_repo(repo, args) 128 | paths = ensure_migrations_paths(repo, opts) 129 | pool = repo.config[:pool] 130 | 131 | fun = 132 | if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do 133 | &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :up, opts) end) 134 | else 135 | &migrator.(&1, paths, :up, opts) 136 | end 137 | 138 | case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do 139 | {:ok, _migrated, _apps} -> :ok 140 | {:error, error} -> Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" 141 | end 142 | end 143 | 144 | :ok 145 | end 146 | end 147 | -------------------------------------------------------------------------------- /integration_test/sql/subquery.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.SubQueryTest do 2 | use Ecto.Integration.Case, async: Application.get_env(:ecto, :async_integration_tests, true) 3 | 4 | alias Ecto.Integration.TestRepo 5 | import Ecto.Query 6 | alias Ecto.Integration.Post 7 | alias Ecto.Integration.Comment 8 | 9 | test "from: subqueries with select source" do 10 | TestRepo.insert!(%Post{title: "hello", public: true}) 11 | 12 | query = from p in Post, select: p 13 | assert ["hello"] = 14 | TestRepo.all(from p in subquery(query), select: p.title) 15 | assert [post] = 16 | TestRepo.all(from p in subquery(query), select: p) 17 | 18 | assert %NaiveDateTime{} = post.inserted_at 19 | assert post.__meta__.state == :loaded 20 | end 21 | 22 | @tag :map_boolean_in_expression 23 | test "from: subqueries with map and select expression" do 24 | TestRepo.insert!(%Post{title: "hello", public: true}) 25 | 26 | query = from p in Post, select: %{title: p.title, pub: not p.public} 27 | assert ["hello"] = 28 | TestRepo.all(from p in subquery(query), select: p.title) 29 | assert [%{title: "hello", pub: false}] = 30 | TestRepo.all(from p in subquery(query), select: p) 31 | assert [{"hello", %{title: "hello", pub: false}}] = 32 | TestRepo.all(from p in subquery(query), select: {p.title, p}) 33 | assert [{%{title: "hello", pub: false}, false}] = 34 | TestRepo.all(from p in subquery(query), select: {p, p.pub}) 35 | end 36 | 37 | @tag :map_boolean_in_expression 38 | test "from: subqueries with map update and select expression" do 39 | TestRepo.insert!(%Post{title: "hello", public: true}) 40 | 41 | query = from p in Post, select: %{p | public: not p.public} 42 | assert ["hello"] = 43 | TestRepo.all(from p in subquery(query), select: p.title) 44 | assert [%Post{title: "hello", public: false}] = 45 | TestRepo.all(from p in subquery(query), select: p) 46 | assert [{"hello", %Post{title: "hello", public: false}}] = 47 | TestRepo.all(from p in subquery(query), select: {p.title, p}) 48 | assert [{%Post{title: "hello", public: false}, false}] = 49 | TestRepo.all(from p in subquery(query), select: {p, p.public}) 50 | end 51 | 52 | test "from: subqueries with map update on virtual field and select expression" do 53 | TestRepo.insert!(%Post{title: "hello"}) 54 | 55 | query = from p in Post, select: %{p | temp: p.title} 56 | assert ["hello"] = 57 | TestRepo.all(from p in subquery(query), select: p.temp) 58 | assert [%Post{title: "hello", temp: "hello"}] = 59 | TestRepo.all(from p in subquery(query), select: p) 60 | end 61 | 62 | @tag :subquery_aggregates 63 | test "from: subqueries with aggregates" do 64 | TestRepo.insert!(%Post{visits: 10}) 65 | TestRepo.insert!(%Post{visits: 11}) 66 | TestRepo.insert!(%Post{visits: 13}) 67 | 68 | query = from p in Post, select: [:visits], order_by: [asc: :visits] 69 | assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 70 | query = from p in Post, select: [:visits], order_by: [asc: :visits], limit: 2 71 | assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 72 | 73 | query = from p in Post, order_by: [asc: :visits] 74 | assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 75 | query = from p in Post, order_by: [asc: :visits], limit: 2 76 | assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) 77 | end 78 | 79 | test "from: subqueries with parameters" do 80 | TestRepo.insert!(%Post{visits: 10, title: "hello"}) 81 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 82 | TestRepo.insert!(%Post{visits: 13, title: "world"}) 83 | 84 | query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 85 | query = from p in subquery(query), where: p.title == ^"hello", select: fragment("? + ?", p.visits, ^1) 86 | assert [12] = TestRepo.all(query) 87 | end 88 | 89 | test "join: subqueries with select source" do 90 | %{id: id} = TestRepo.insert!(%Post{title: "hello", public: true}) 91 | TestRepo.insert!(%Comment{post_id: id}) 92 | 93 | query = from p in Post, select: p 94 | assert ["hello"] = 95 | TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p.title) 96 | assert [%Post{inserted_at: %NaiveDateTime{}}] = 97 | TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p) 98 | end 99 | 100 | test "join: subqueries with parameters" do 101 | TestRepo.insert!(%Post{visits: 10, title: "hello"}) 102 | TestRepo.insert!(%Post{visits: 11, title: "hello"}) 103 | TestRepo.insert!(%Post{visits: 13, title: "world"}) 104 | TestRepo.insert!(%Comment{}) 105 | TestRepo.insert!(%Comment{}) 106 | 107 | query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 108 | query = from c in Comment, 109 | join: p in subquery(query), 110 | where: p.title == ^"hello", 111 | select: fragment("? + ?", p.visits, ^1) 112 | assert [12, 12] = TestRepo.all(query) 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /integration_test/pg/storage_test.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("../support/file_helpers.exs", __DIR__) 2 | 3 | defmodule Ecto.Integration.StorageTest do 4 | use ExUnit.Case 5 | 6 | @moduletag :capture_log 7 | @base_migration 5_000_000 8 | 9 | import Support.FileHelpers 10 | alias Ecto.Adapters.Postgres 11 | alias Ecto.Integration.{PoolRepo, TestRepo} 12 | 13 | def params do 14 | # Pass log false to ensure we can still create/drop. 15 | url = Application.get_env(:ecto_sql, :pg_test_url) <> "/storage_mgt" 16 | [log: false] ++ Ecto.Repo.Supervisor.parse_url(url) 17 | end 18 | 19 | def wrong_params do 20 | Keyword.merge(params(), 21 | username: "randomuser", 22 | password: "password1234" 23 | ) 24 | end 25 | 26 | def drop_database do 27 | run_psql("DROP DATABASE #{params()[:database]};") 28 | end 29 | 30 | def create_database(owner \\ nil) do 31 | query = "CREATE DATABASE #{params()[:database]}" 32 | query = if owner do 33 | query <> " OWNER #{owner};" 34 | else 35 | query <> ";" 36 | end 37 | run_psql(query) 38 | end 39 | 40 | def create_posts do 41 | run_psql("CREATE TABLE posts (title varchar(20));", [params()[:database]]) 42 | end 43 | 44 | def run_psql(sql, args \\ []) do 45 | params = params() 46 | env = if password = params[:password], do: [{"PGPASSWORD", password}], else: [] 47 | 48 | args = [ 49 | "-U", 50 | params[:username], 51 | "--host", 52 | params[:hostname], 53 | "-p", 54 | to_string(params[:port] || 5432), 55 | "-c", 56 | sql | args 57 | ] 58 | 59 | System.cmd("psql", args, env: env) 60 | end 61 | 62 | test "storage up (twice in a row)" do 63 | assert Postgres.storage_up(params()) == :ok 64 | assert Postgres.storage_up(params()) == {:error, :already_up} 65 | after 66 | drop_database() 67 | end 68 | 69 | test "storage down (twice in a row)" do 70 | create_database() 71 | assert Postgres.storage_down(params()) == :ok 72 | assert Postgres.storage_down(params()) == {:error, :already_down} 73 | end 74 | 75 | test "storage up and down (wrong credentials)" do 76 | refute Postgres.storage_up(wrong_params()) == :ok 77 | create_database() 78 | refute Postgres.storage_down(wrong_params()) == :ok 79 | after 80 | drop_database() 81 | end 82 | 83 | test "storage up with unprivileged user with access to the database" do 84 | unprivileged_params = Keyword.merge(params(), 85 | username: "unprivileged", 86 | password: "pass" 87 | ) 88 | run_psql("CREATE USER unprivileged WITH NOCREATEDB PASSWORD 'pass'") 89 | refute Postgres.storage_up(unprivileged_params) == :ok 90 | create_database("unprivileged") 91 | assert Postgres.storage_up(unprivileged_params) == {:error, :already_up} 92 | after 93 | run_psql("DROP USER unprivileged") 94 | drop_database() 95 | end 96 | 97 | test "structure dump and load" do 98 | create_database() 99 | create_posts() 100 | 101 | # Default path 102 | {:ok, _} = Postgres.structure_dump(tmp_path(), params()) 103 | dump = File.read!(Path.join(tmp_path(), "structure.sql")) 104 | 105 | drop_database() 106 | create_database() 107 | 108 | # Load custom 109 | dump_path = Path.join(tmp_path(), "custom.sql") 110 | File.rm(dump_path) 111 | {:error, _} = Postgres.structure_load(tmp_path(), [dump_path: dump_path] ++ params()) 112 | 113 | # Dump custom 114 | {:ok, _} = Postgres.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 115 | assert dump != File.read!(dump_path) 116 | 117 | # Load original 118 | {:ok, _} = Postgres.structure_load(tmp_path(), params()) 119 | 120 | {:ok, _} = Postgres.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 121 | assert dump == File.read!(dump_path) 122 | after 123 | drop_database() 124 | end 125 | 126 | test "structure load will fail on SQL errors" do 127 | File.mkdir_p!(tmp_path()) 128 | error_path = Path.join(tmp_path(), "error.sql") 129 | File.write!(error_path, "DO $$ BEGIN RAISE EXCEPTION 'failing SQL'; END $$;") 130 | 131 | {:error, message} = 132 | Postgres.structure_load(tmp_path(), [dump_path: error_path] ++ TestRepo.config()) 133 | 134 | assert message =~ ~r/ERROR.*failing SQL/ 135 | end 136 | 137 | defmodule Migration do 138 | use Ecto.Migration 139 | def change, do: :ok 140 | end 141 | 142 | test "structure dump and load with migrations table" do 143 | num = @base_migration + System.unique_integer([:positive]) 144 | :ok = Ecto.Migrator.up(PoolRepo, num, Migration, log: false) 145 | {:ok, path} = Postgres.structure_dump(tmp_path(), TestRepo.config()) 146 | contents = File.read!(path) 147 | assert contents =~ ~s[INSERT INTO public."schema_migrations" (version) VALUES] 148 | end 149 | 150 | test "storage status is up when database is created" do 151 | create_database() 152 | assert :up == Postgres.storage_status(params()) 153 | after 154 | drop_database() 155 | end 156 | 157 | test "storage status is down when database is not created" do 158 | create_database() 159 | drop_database() 160 | assert :down == Postgres.storage_status(params()) 161 | end 162 | 163 | test "storage status is an error when wrong credentials are passed" do 164 | assert ExUnit.CaptureLog.capture_log(fn -> 165 | assert {:error, _} = Postgres.storage_status(wrong_params()) 166 | end) =~ ~r"FATAL (28000|28P01)" 167 | end 168 | end 169 | -------------------------------------------------------------------------------- /integration_test/myxql/storage_test.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("../support/file_helpers.exs", __DIR__) 2 | 3 | defmodule Ecto.Integration.StorageTest do 4 | use ExUnit.Case 5 | 6 | @moduletag :capture_log 7 | @base_migration 5_000_000 8 | 9 | import Support.FileHelpers 10 | alias Ecto.Integration.{PoolRepo, TestRepo} 11 | 12 | def params do 13 | # Pass log false to ensure we can still create/drop. 14 | url = Application.get_env(:ecto_sql, :mysql_test_url) <> "/storage_mgt" 15 | [log: false] ++ Ecto.Repo.Supervisor.parse_url(url) 16 | end 17 | 18 | def wrong_params do 19 | Keyword.merge(params(), 20 | username: "randomuser", 21 | password: "password1234" 22 | ) 23 | end 24 | 25 | def drop_database do 26 | run_mysql("DROP DATABASE #{params()[:database]};") 27 | end 28 | 29 | def create_database(grant_privileges_to \\ nil) do 30 | run_mysql("CREATE DATABASE #{params()[:database]};") 31 | if grant_privileges_to do 32 | run_mysql("GRANT ALL PRIVILEGES ON #{params()[:database]}.* to #{grant_privileges_to}") 33 | end 34 | end 35 | 36 | def create_posts do 37 | run_mysql("CREATE TABLE posts (title varchar(20));", ["-D", params()[:database]]) 38 | end 39 | 40 | def run_mysql(sql, args \\ []) do 41 | params = params() 42 | env = if password = params[:password], do: [{"MYSQL_PWD", password}], else: [] 43 | 44 | args = [ 45 | "-u", 46 | params[:username], 47 | "--host", 48 | params[:hostname], 49 | "--port", 50 | to_string(params[:port] || 3306), 51 | "-e", 52 | sql | args 53 | ] 54 | 55 | System.cmd("mysql", args, env: env) 56 | end 57 | 58 | test "storage up (twice in a row)" do 59 | assert Ecto.Adapters.MyXQL.storage_up(params()) == :ok 60 | assert Ecto.Adapters.MyXQL.storage_up(params()) == {:error, :already_up} 61 | after 62 | drop_database() 63 | end 64 | 65 | test "storage down (twice in a row)" do 66 | create_database() 67 | assert Ecto.Adapters.MyXQL.storage_down(params()) == :ok 68 | assert Ecto.Adapters.MyXQL.storage_down(params()) == {:error, :already_down} 69 | end 70 | 71 | test "storage up and down (wrong credentials)" do 72 | refute Ecto.Adapters.MyXQL.storage_up(wrong_params()) == :ok 73 | create_database() 74 | refute Ecto.Adapters.MyXQL.storage_down(wrong_params()) == :ok 75 | after 76 | drop_database() 77 | end 78 | 79 | test "storage up with unprivileged user with access to the database" do 80 | unprivileged_params = Keyword.merge(params(), 81 | username: "unprivileged", 82 | password: "pass" 83 | ) 84 | run_mysql("CREATE USER unprivileged IDENTIFIED BY 'pass'") 85 | refute Ecto.Adapters.MyXQL.storage_up(unprivileged_params) == :ok 86 | create_database("unprivileged") 87 | assert Ecto.Adapters.MyXQL.storage_up(unprivileged_params) == {:error, :already_up} 88 | after 89 | run_mysql("DROP USER unprivileged") 90 | drop_database() 91 | end 92 | 93 | test "structure dump and load" do 94 | create_database() 95 | create_posts() 96 | 97 | # Default path 98 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), params()) 99 | dump = File.read!(Path.join(tmp_path(), "structure.sql")) 100 | 101 | drop_database() 102 | create_database() 103 | 104 | # Load custom 105 | dump_path = Path.join(tmp_path(), "custom.sql") 106 | File.rm(dump_path) 107 | 108 | {:error, _} = 109 | Ecto.Adapters.MyXQL.structure_load(tmp_path(), [dump_path: dump_path] ++ params()) 110 | 111 | # Dump custom 112 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 113 | assert strip_timestamp(dump) != strip_timestamp(File.read!(dump_path)) 114 | 115 | # Load original 116 | {:ok, _} = Ecto.Adapters.MyXQL.structure_load(tmp_path(), params()) 117 | 118 | {:ok, _} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), [dump_path: dump_path] ++ params()) 119 | assert strip_timestamp(dump) == strip_timestamp(File.read!(dump_path)) 120 | after 121 | drop_database() 122 | end 123 | 124 | test "storage status is up when database is created" do 125 | create_database() 126 | assert :up == Ecto.Adapters.MyXQL.storage_status(params()) 127 | after 128 | drop_database() 129 | end 130 | 131 | test "storage status is down when database is not created" do 132 | create_database() 133 | drop_database() 134 | assert :down == Ecto.Adapters.MyXQL.storage_status(params()) 135 | end 136 | 137 | test "storage status is an error when wrong credentials are passed" do 138 | assert ExUnit.CaptureLog.capture_log(fn -> 139 | assert {:error, _} = Ecto.Adapters.MyXQL.storage_status(wrong_params()) 140 | end) =~ "(1045) (ER_ACCESS_DENIED_ERROR)" 141 | end 142 | 143 | defmodule Migration do 144 | use Ecto.Migration 145 | def change, do: :ok 146 | end 147 | 148 | test "structure dump and load with migrations table" do 149 | num = @base_migration + System.unique_integer([:positive]) 150 | :ok = Ecto.Migrator.up(PoolRepo, num, Migration, log: false) 151 | {:ok, path} = Ecto.Adapters.MyXQL.structure_dump(tmp_path(), TestRepo.config()) 152 | contents = File.read!(path) 153 | assert contents =~ "INSERT INTO `schema_migrations` (version) VALUES (" 154 | end 155 | 156 | defp strip_timestamp(dump) do 157 | dump 158 | |> String.split("\n") 159 | |> Enum.reject(&String.contains?(&1, "completed on")) 160 | |> Enum.join("\n") 161 | end 162 | end 163 | -------------------------------------------------------------------------------- /integration_test/tds/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | ExUnit.start( 4 | exclude: [ 5 | # not sure how to support this yet 6 | :aggregate_filters, 7 | # subquery contains ORDER BY and that is not supported 8 | :subquery_aggregates, 9 | # sql don't have array type 10 | :array_type, 11 | # upserts can only be supported with MERGE statement and it is tricky to make it fast 12 | :upsert, 13 | :upsert_all, 14 | # mssql rounds differently than ecto/integration_test/cases/interval.exs 15 | :uses_msec, 16 | # unique index compares even NULL values for post_id, so below fails inserting permalinks without setting valid post_id 17 | :insert_cell_wise_defaults, 18 | # MSSQL does not support strings on text fields 19 | :text_type_as_string, 20 | # IDENTITY_INSERT ON/OFF must be manually executed 21 | :assigns_id_type, 22 | # without schema we don't know anything about :map and :embeds, where value is kept in nvarchar(max) column 23 | :map_type_schemaless, 24 | # SELECT NOT(t.bool_fields) is not supported by sql server 25 | :map_boolean_in_expression, 26 | # Decimal casting can not be precise in MSSQL adapter since precision is kept in migration file :( 27 | # or in case of schema-less queries we don't know at all about precision 28 | :decimal_precision, 29 | # this fails because schema-less queries in select uses Decimal casting, 30 | # see below comment about :decimal_type_cast exclusion or :decimal_type_cast 31 | :union_with_literals, 32 | # inline queries can't use order by 33 | :inline_order_by, 34 | # running destruction of PK columns requires that PK constraint is dropped first 35 | :alter_primary_key, 36 | # below 2 exclusions (in theory) requires filtered unique index on permalinks table post_id column e.g. 37 | # CREATE UNIQUE NONCLUSTERED INDEX idx_tbl_TestUnique_ID 38 | # ON [permalinks] ([post_id]) 39 | # WHERE [post_id] IS NOT NULL 40 | # But I couldn't make it work :( 41 | :on_replace_nilify, 42 | :on_replace_update, 43 | # Tds allows nested transactions so this will never raise and SQL query should be "BEGIN TRAN" 44 | :transaction_checkout_raises, 45 | # JSON_VALUE always returns strings (even for e.g. integers) and returns null for 46 | # arrays/objects (JSON_QUERY must be used for these) 47 | :json_extract_path 48 | ] 49 | ) 50 | 51 | Application.put_env(:tds, :json_library, Jason) 52 | Application.put_env(:ecto, :primary_key_type, :id) 53 | Application.put_env(:ecto, :async_integration_tests, false) 54 | Application.put_env(:ecto_sql, :lock_for_update, "(UPDLOCK)") 55 | 56 | Application.put_env( 57 | :ecto_sql, 58 | :tds_test_url, 59 | "ecto://" <> (System.get_env("MSSQL_URL") || "sa:some!Password@localhost") 60 | ) 61 | 62 | alias Ecto.Integration.TestRepo 63 | 64 | # Load support files 65 | ecto = Mix.Project.deps_paths()[:ecto] 66 | Code.require_file("../support/repo.exs", __DIR__) 67 | 68 | Application.put_env( 69 | :ecto_sql, 70 | TestRepo, 71 | url: Application.get_env(:ecto_sql, :tds_test_url) <> "/ecto_test", 72 | pool: Ecto.Adapters.SQL.Sandbox, 73 | set_allow_snapshot_isolation: :on, 74 | show_sensitive_data_on_connection_error: true 75 | ) 76 | 77 | defmodule Ecto.Integration.TestRepo do 78 | use Ecto.Integration.Repo, 79 | otp_app: :ecto_sql, 80 | adapter: Ecto.Adapters.Tds 81 | 82 | def uuid, do: Tds.Ecto.UUID 83 | 84 | def create_prefix(prefix) do 85 | """ 86 | CREATE SCHEMA #{prefix}; 87 | """ 88 | end 89 | 90 | def drop_prefix(prefix) do 91 | """ 92 | DROP SCHEMA #{prefix}; 93 | """ 94 | end 95 | end 96 | 97 | Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) 98 | Code.require_file("../support/migration.exs", __DIR__) 99 | 100 | alias Ecto.Integration.PoolRepo 101 | 102 | Application.put_env( 103 | :ecto_sql, 104 | PoolRepo, 105 | url: "#{Application.get_env(:ecto_sql, :tds_test_url)}/ecto_test", 106 | pool_size: 10, 107 | set_allow_snapshot_isolation: :on 108 | ) 109 | 110 | defmodule Ecto.Integration.PoolRepo do 111 | use Ecto.Integration.Repo, 112 | otp_app: :ecto_sql, 113 | adapter: Ecto.Adapters.Tds 114 | 115 | def create_prefix(prefix) do 116 | "create schema #{prefix}" 117 | end 118 | 119 | def drop_prefix(prefix) do 120 | "drop schema #{prefix}" 121 | end 122 | end 123 | 124 | defmodule Ecto.Integration.Case do 125 | use ExUnit.CaseTemplate 126 | 127 | setup context do 128 | level = Map.get(context, :isolation_level, :read_committed) 129 | :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo, isolation_level: level) 130 | end 131 | end 132 | 133 | # :dbg.start() 134 | # :dbg.tracer() 135 | # :dbg.p(:all,:c) 136 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :column_change, :x) 137 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :execute_ddl, :x) 138 | # :dbg.tpl(Ecto.Adapters.Tds.Connection, :all, :x) 139 | # :dbg.tpl(Tds.Parameter, :prepare_params, :x) 140 | # :dbg.tpl(Tds.Parameter, :prepared_params, :x) 141 | 142 | {:ok, _} = Ecto.Adapters.Tds.ensure_all_started(TestRepo.config(), :temporary) 143 | 144 | # Load up the repository, start it, and run migrations 145 | _ = Ecto.Adapters.Tds.storage_down(TestRepo.config()) 146 | :ok = Ecto.Adapters.Tds.storage_up(TestRepo.config()) 147 | 148 | {:ok, _pid} = TestRepo.start_link() 149 | {:ok, _pid} = PoolRepo.start_link() 150 | :ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: :debug) 151 | Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) 152 | Process.flag(:trap_exit, true) 153 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "0.11.0", "cf96e328ff5d69838dd89c21a9db22716bfcc6ef772e9d9dddf7ba622102722d", [:mix], [{:deep_merge, "~> 0.1", [hex: :deep_merge, repo: "hexpm", optional: false]}], "hexpm", "c345e090e0a61bf33e0385aa3ad394fcb7d863e313bc3fca522e390c7f39166e"}, 3 | "benchee_json": {:hex, :benchee_json, "0.4.0", "59d3277829bd1dca8373cdb20b916cb435c2647be235d09963fc0959db908c36", [:mix], [{:benchee, "~> 0.10", [hex: :benchee, repo: "hexpm", optional: false]}, {:poison, ">= 1.4.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm", "71a3edb6a30708de2a01368aa8f288e1c0ed7897b125adc396ce7c2c7245b1e7"}, 4 | "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, 5 | "db_connection": {:hex, :db_connection, "2.3.1", "4c9f3ed1ef37471cbdd2762d6655be11e38193904d9c5c1c9389f1b891a3088e", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}], "hexpm", "abaab61780dde30301d840417890bd9f74131041afd02174cf4e10635b3a63f5"}, 6 | "decimal": {:hex, :decimal, "1.9.0", "83e8daf59631d632b171faabafb4a9f4242c514b0a06ba3df493951c08f64d07", [:mix], [], "hexpm", "b1f2343568eed6928f3e751cf2dffde95bfaa19dd95d09e8a9ea92ccfd6f7d85"}, 7 | "deep_merge": {:hex, :deep_merge, "0.2.0", "c1050fa2edf4848b9f556fba1b75afc66608a4219659e3311d9c9427b5b680b3", [:mix], [], "hexpm", "e3bf435a54ed27b0ba3a01eb117ae017988804e136edcbe8a6a14c310daa966e"}, 8 | "earmark_parser": {:hex, :earmark_parser, "1.4.16", "607709303e1d4e3e02f1444df0c821529af1c03b8578dfc81bb9cf64553d02b9", [:mix], [], "hexpm", "69fcf696168f5a274dd012e3e305027010658b2d1630cef68421d6baaeaccead"}, 9 | "ecto": {:hex, :ecto, "3.7.0", "0b250b4aa5a9cdb80252802bd535c54c963e2d83f5bd179a57c093ed0779994b", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3a212cecd544a6f3d00921bc3e7545070eb50b9a1454525323027bf07eba1165"}, 10 | "ex_doc": {:hex, :ex_doc, "0.25.3", "3edf6a0d70a39d2eafde030b8895501b1c93692effcbd21347296c18e47618ce", [:mix], [{:earmark_parser, "~> 1.4.0", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "9ebebc2169ec732a38e9e779fd0418c9189b3ca93f4a676c961be6c1527913f5"}, 11 | "jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"}, 12 | "makeup": {:hex, :makeup, "1.0.5", "d5a830bc42c9800ce07dd97fa94669dfb93d3bf5fcf6ea7a0c67b2e0e4a7f26c", [:mix], [{:nimble_parsec, "~> 0.5 or ~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cfa158c02d3f5c0c665d0af11512fed3fba0144cf1aadee0f2ce17747fba2ca9"}, 13 | "makeup_elixir": {:hex, :makeup_elixir, "0.15.1", "b5888c880d17d1cc3e598f05cdb5b5a91b7b17ac4eaf5f297cb697663a1094dd", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.1", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "db68c173234b07ab2a07f645a5acdc117b9f99d69ebf521821d89690ae6c6ec8"}, 14 | "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, 15 | "myxql": {:hex, :myxql, "0.5.1", "42cc502f9f373eeebfe6753266c0b601c01a6a96e4d861d429a4952ffb396689", [:mix], [{:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:geo, "~> 3.3", [hex: :geo, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "73c6b684ae119ef9707a755f185f1410ec611ee748e54b9b1b1ff4aab4bc48d7"}, 16 | "nimble_parsec": {:hex, :nimble_parsec, "1.1.0", "3a6fca1550363552e54c216debb6a9e95bd8d32348938e13de5eda962c0d7f89", [:mix], [], "hexpm", "08eb32d66b706e913ff748f11694b17981c0b04a33ef470e33e11b3d3ac8f54b"}, 17 | "poison": {:hex, :poison, "4.0.1", "bcb755a16fac91cad79bfe9fc3585bb07b9331e50cfe3420a24bcc2d735709ae", [:mix], [], "hexpm", "ba8836feea4b394bb718a161fc59a288fe0109b5006d6bdf97b6badfcf6f0f25"}, 18 | "postgrex": {:hex, :postgrex, "0.15.5", "aec40306a622d459b01bff890fa42f1430dac61593b122754144ad9033a2152f", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "ed90c81e1525f65a2ba2279dbcebf030d6d13328daa2f8088b9661eb9143af7f"}, 19 | "tds": {:hex, :tds, "2.1.1", "b6163ea716d74ed90a3a83668db2e7c74c1e722fd3538ef5758e0a084fde8d60", [:mix], [{:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "6b28e1f06a57867eb6b1a957ae5d872b09214ba771ef08cf5ca9d52d6d372876"}, 20 | "telemetry": {:hex, :telemetry, "0.4.3", "a06428a514bdbc63293cd9a6263aad00ddeb66f608163bdec7c8995784080818", [:rebar3], [], "hexpm", "eb72b8365ffda5bed68a620d1da88525e326cb82a75ee61354fc24b844768041"}, 21 | } 22 | -------------------------------------------------------------------------------- /test/mix/tasks/ecto.dump_load_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Ecto.DumpLoadTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Mix.Tasks.Ecto.{Load, Dump} 5 | 6 | # Mocked adapters 7 | 8 | defmodule Adapter do 9 | @behaviour Ecto.Adapter 10 | @behaviour Ecto.Adapter.Structure 11 | 12 | defmacro __before_compile__(_), do: :ok 13 | def dumpers(_, _), do: raise "not implemented" 14 | def loaders(_, _), do: raise "not implemented" 15 | def checkout(_, _, _), do: raise "not implemented" 16 | def checked_out?(_), do: raise "not implemented" 17 | def ensure_all_started(_, _), do: {:ok, []} 18 | 19 | def init(_opts) do 20 | child_spec = Supervisor.child_spec({Task, fn -> :timer.sleep(:infinity) end}, []) 21 | {:ok, child_spec, %{}} 22 | end 23 | 24 | def structure_dump(_, _), do: Process.get(:structure_dump) || raise "no structure_dump" 25 | def structure_load(_, _), do: Process.get(:structure_load) || raise "no structure_load" 26 | end 27 | 28 | defmodule NoStructureAdapter do 29 | @behaviour Ecto.Adapter 30 | defmacro __before_compile__(_), do: :ok 31 | def dumpers(_, _), do: raise "not implemented" 32 | def loaders(_, _), do: raise "not implemented" 33 | def init(_), do: raise "not implemented" 34 | def checkout(_, _, _), do: raise "not implemented" 35 | def checked_out?(_), do: raise "not implemented" 36 | def ensure_all_started(_, _), do: raise "not implemented" 37 | end 38 | 39 | # Mocked repos 40 | 41 | defmodule Repo do 42 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Adapter 43 | end 44 | 45 | defmodule MigrationRepo do 46 | use Ecto.Repo, otp_app: :ecto_sql, adapter: Adapter 47 | end 48 | 49 | defmodule NoStructureRepo do 50 | use Ecto.Repo, otp_app: :ecto_sql, adapter: NoStructureAdapter 51 | end 52 | 53 | setup do 54 | Application.put_env(:ecto_sql, __MODULE__.Repo, []) 55 | Application.put_env(:ecto_sql, __MODULE__.NoStructureRepo, []) 56 | end 57 | 58 | ## Dump 59 | 60 | test "runs the adapter structure_dump" do 61 | Process.put(:structure_dump, {:ok, "foo"}) 62 | Dump.run ["-r", to_string(Repo)] 63 | assert_received {:mix_shell, :info, ["The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been dumped to foo"]} 64 | end 65 | 66 | test "runs the adapter structure_dump for migration_repo" do 67 | Application.put_env(:ecto_sql, Repo, [migration_repo: MigrationRepo]) 68 | 69 | Process.put(:structure_dump, {:ok, "foo"}) 70 | Dump.run ["-r", to_string(Repo)] 71 | 72 | repo_msg = "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been dumped to foo" 73 | assert_received {:mix_shell, :info, [^repo_msg]} 74 | 75 | migration_repo_msg = "The structure for Mix.Tasks.Ecto.DumpLoadTest.MigrationRepo has been dumped to foo" 76 | assert_received {:mix_shell, :info, [^migration_repo_msg]} 77 | end 78 | 79 | test "runs the adapter structure_dump with --quiet" do 80 | Process.put(:structure_dump, {:ok, "foo"}) 81 | Dump.run ["-r", to_string(Repo), "--quiet"] 82 | refute_received {:mix_shell, :info, [_]} 83 | end 84 | 85 | test "raises an error when structure_dump gives an unknown feedback" do 86 | Process.put(:structure_dump, {:error, :confused}) 87 | assert_raise Mix.Error, fn -> 88 | Dump.run ["-r", to_string(Repo)] 89 | end 90 | end 91 | 92 | test "raises an error on structure_dump when the adapter doesn't define a storage" do 93 | assert_raise Mix.Error, ~r/to implement Ecto.Adapter.Structure/, fn -> 94 | Dump.run ["-r", to_string(NoStructureRepo)] 95 | end 96 | end 97 | 98 | ## Load 99 | 100 | test "runs the adapter structure_load" do 101 | table_exists? = fn _, _ -> false end 102 | 103 | Process.put(:structure_load, {:ok, "foo"}) 104 | Load.run ["-r", to_string(Repo)], table_exists? 105 | 106 | msg = "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been loaded from foo" 107 | assert_received {:mix_shell, :info, [^msg]} 108 | end 109 | 110 | test "runs the adapter structure_load for migration_repo" do 111 | Application.put_env(:ecto_sql, Repo, [migration_repo: MigrationRepo]) 112 | 113 | table_exists? = fn _, _ -> false end 114 | 115 | Process.put(:structure_load, {:ok, "foo"}) 116 | Load.run ["-r", to_string(Repo)], table_exists? 117 | 118 | repo_msg = "The structure for Mix.Tasks.Ecto.DumpLoadTest.Repo has been loaded from foo" 119 | assert_received {:mix_shell, :info, [^repo_msg]} 120 | 121 | migration_repo_msg = "The structure for Mix.Tasks.Ecto.DumpLoadTest.MigrationRepo has been loaded from foo" 122 | assert_received {:mix_shell, :info, [^migration_repo_msg]} 123 | end 124 | 125 | test "runs the adapter structure_load with --quiet" do 126 | table_exists? = fn _, _ -> false end 127 | Process.put(:structure_load, {:ok, "foo"}) 128 | Load.run ["-r", to_string(Repo), "--quiet"], table_exists? 129 | refute_received {:mix_shell, :info, [_]} 130 | end 131 | 132 | test "skips when the database is loaded with --skip-if-loaded" do 133 | table_exists? = fn _, _ -> true end 134 | assert :ok == Load.run ["-r", to_string(Repo), "--skip-if-loaded"], table_exists? 135 | end 136 | 137 | test "raises an error when structure_load gives an unknown feedback" do 138 | table_exists? = fn _, _ -> false end 139 | 140 | Process.put(:structure_load, {:error, :confused}) 141 | assert_raise Mix.Error, fn -> 142 | Load.run ["-r", to_string(Repo)], table_exists? 143 | end 144 | end 145 | 146 | test "raises an error on structure_load when the adapter doesn't define a storage" do 147 | assert_raise Mix.Error, ~r/to implement Ecto.Adapter.Structure/, fn -> 148 | Load.run ["-r", to_string(NoStructureRepo)] 149 | end 150 | end 151 | end 152 | -------------------------------------------------------------------------------- /integration_test/sql/sql.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.SQLTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias Ecto.Integration.Barebone 6 | alias Ecto.Integration.Post 7 | alias Ecto.Integration.CorruptedPk 8 | import Ecto.Query, only: [from: 2] 9 | 10 | test "fragmented types" do 11 | datetime = ~N[2014-01-16 20:26:51] 12 | TestRepo.insert!(%Post{inserted_at: datetime}) 13 | query = from p in Post, where: fragment("? >= ?", p.inserted_at, ^datetime), select: p.inserted_at 14 | assert [^datetime] = TestRepo.all(query) 15 | end 16 | 17 | test "fragmented schemaless types" do 18 | TestRepo.insert!(%Post{visits: 123}) 19 | assert [123] = TestRepo.all(from p in "posts", select: type(fragment("visits"), :integer)) 20 | end 21 | 22 | @tag :array_type 23 | test "fragment array types" do 24 | text1 = "foo" 25 | text2 = "bar" 26 | result = TestRepo.query!("SELECT $1::text[]", [[text1, text2]]) 27 | assert result.rows == [[[text1, text2]]] 28 | end 29 | 30 | test "query!/4 with dynamic repo" do 31 | TestRepo.put_dynamic_repo(:unknown) 32 | assert_raise RuntimeError, ~r/:unknown/, fn -> TestRepo.query!("SELECT 1") end 33 | end 34 | 35 | test "query!/4" do 36 | result = TestRepo.query!("SELECT 1") 37 | assert result.rows == [[1]] 38 | end 39 | 40 | test "query!/4 with iodata" do 41 | result = TestRepo.query!(["SELECT", ?\s, ?1]) 42 | assert result.rows == [[1]] 43 | end 44 | 45 | test "to_sql/3" do 46 | {sql, []} = TestRepo.to_sql(:all, Barebone) 47 | assert sql =~ "SELECT" 48 | assert sql =~ "barebones" 49 | 50 | {sql, [0]} = TestRepo.to_sql(:update_all, from(b in Barebone, update: [set: [num: ^0]])) 51 | assert sql =~ "UPDATE" 52 | assert sql =~ "barebones" 53 | assert sql =~ "SET" 54 | 55 | {sql, []} = TestRepo.to_sql(:delete_all, Barebone) 56 | assert sql =~ "DELETE" 57 | assert sql =~ "barebones" 58 | end 59 | 60 | test "raises when primary key is not unique on struct operation" do 61 | schema = %CorruptedPk{a: "abc"} 62 | TestRepo.insert!(schema) 63 | TestRepo.insert!(schema) 64 | TestRepo.insert!(schema) 65 | 66 | assert_raise Ecto.MultiplePrimaryKeyError, 67 | ~r|expected delete on corrupted_pk to return at most one entry but got 3 entries|, 68 | fn -> TestRepo.delete!(schema) end 69 | end 70 | 71 | test "Repo.insert! escape" do 72 | TestRepo.insert!(%Post{title: "'"}) 73 | 74 | query = from(p in Post, select: p.title) 75 | assert ["'"] == TestRepo.all(query) 76 | end 77 | 78 | test "Repo.update! escape" do 79 | p = TestRepo.insert!(%Post{title: "hello"}) 80 | TestRepo.update!(Ecto.Changeset.change(p, title: "'")) 81 | 82 | query = from(p in Post, select: p.title) 83 | assert ["'"] == TestRepo.all(query) 84 | end 85 | 86 | @tag :insert_cell_wise_defaults 87 | test "Repo.insert_all escape" do 88 | TestRepo.insert_all(Post, [%{title: "'"}]) 89 | 90 | query = from(p in Post, select: p.title) 91 | assert ["'"] == TestRepo.all(query) 92 | end 93 | 94 | test "Repo.update_all escape" do 95 | TestRepo.insert!(%Post{title: "hello"}) 96 | 97 | TestRepo.update_all(Post, set: [title: "'"]) 98 | reader = from(p in Post, select: p.title) 99 | assert ["'"] == TestRepo.all(reader) 100 | 101 | query = from(Post, where: "'" != "") 102 | TestRepo.update_all(query, set: [title: "''"]) 103 | assert ["''"] == TestRepo.all(reader) 104 | end 105 | 106 | test "Repo.delete_all escape" do 107 | TestRepo.insert!(%Post{title: "hello"}) 108 | assert [_] = TestRepo.all(Post) 109 | 110 | TestRepo.delete_all(from(Post, where: "'" == "'")) 111 | assert [] == TestRepo.all(Post) 112 | end 113 | 114 | test "load" do 115 | inserted_at = ~N[2016-01-01 09:00:00] 116 | TestRepo.insert!(%Post{title: "title1", inserted_at: inserted_at, public: false}) 117 | 118 | result = Ecto.Adapters.SQL.query!(TestRepo, "SELECT * FROM posts", []) 119 | posts = Enum.map(result.rows, &TestRepo.load(Post, {result.columns, &1})) 120 | assert [%Post{title: "title1", inserted_at: ^inserted_at, public: false}] = posts 121 | end 122 | 123 | test "returns true when table exists" do 124 | assert Ecto.Adapters.SQL.table_exists?(TestRepo, "posts") 125 | end 126 | 127 | test "returns false table doesn't exists" do 128 | refute Ecto.Adapters.SQL.table_exists?(TestRepo, "unknown") 129 | end 130 | 131 | test "returns result as a formatted table" do 132 | TestRepo.insert_all(Post, [%{title: "my post title", counter: 1, public: nil}]) 133 | 134 | # resolve correct query for each adapter 135 | query = from(p in Post, select: [p.title, p.counter, p.public]) 136 | {query, _} = Ecto.Adapters.SQL.to_sql(:all, TestRepo, query) 137 | 138 | table = 139 | query 140 | |> TestRepo.query!() 141 | |> Ecto.Adapters.SQL.format_table() 142 | 143 | assert table == "+---------------+---------+--------+\n| title | counter | public |\n+---------------+---------+--------+\n| my post title | 1 | NULL |\n+---------------+---------+--------+" 144 | end 145 | 146 | test "format_table edge cases" do 147 | assert Ecto.Adapters.SQL.format_table(nil) == "" 148 | assert Ecto.Adapters.SQL.format_table(%{columns: nil, rows: nil}) == "" 149 | assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: []}) == "" 150 | assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: [["test"]]}) == "" 151 | assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: []}) == "+------+\n| test |\n+------+\n+------+" 152 | assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: nil}) == "+------+\n| test |\n+------+\n+------+" 153 | end 154 | end 155 | -------------------------------------------------------------------------------- /Earthfile: -------------------------------------------------------------------------------- 1 | all: 2 | BUILD +test-all 3 | BUILD +integration-test-all 4 | 5 | 6 | test-all: 7 | BUILD \ 8 | --build-arg ELIXIR_BASE=1.11.0-erlang-23.1.1-alpine-3.13.1 \ 9 | --build-arg ELIXIR_BASE=1.11.0-erlang-21.3.8.21-alpine-3.13.1 \ 10 | +test 11 | 12 | 13 | test: 14 | FROM +test-setup 15 | RUN MIX_ENV=test mix deps.compile 16 | COPY --dir bench integration_test lib test ./ 17 | 18 | RUN mix deps.get && mix deps.unlock --check-unused 19 | RUN mix deps.compile 20 | RUN mix compile #--warnings-as-errors 21 | RUN mix test 22 | 23 | 24 | integration-test-all: 25 | ARG ELIXIR_BASE=1.11.0-erlang-23.1.1-alpine-3.13.1 26 | BUILD \ 27 | --build-arg POSTGRES=11.11 \ 28 | --build-arg POSTGRES=9.6 \ 29 | --build-arg POSTGRES=9.5 \ 30 | +integration-test-postgres 31 | 32 | BUILD \ 33 | --build-arg MYSQL=5.7 \ 34 | +integration-test-mysql 35 | 36 | BUILD \ 37 | --build-arg MSSQL=2017 \ 38 | --build-arg MSSQL=2019 \ 39 | +integration-test-mssql 40 | 41 | 42 | integration-test-base: 43 | FROM +setup-base 44 | RUN apk add --no-progress --update docker docker-compose 45 | 46 | RUN mix local.rebar --force 47 | RUN mix local.hex --force 48 | 49 | 50 | COMMON_INTEGRATION_SETUP_AND_MIX: 51 | COMMAND 52 | COPY mix.exs mix.lock .formatter.exs . 53 | COPY --dir bench integration_test lib test ./ 54 | RUN mix deps.get 55 | RUN mix deps.compile 56 | RUN mix compile #--warnings-as-errors 57 | 58 | 59 | integration-test-postgres: 60 | FROM +integration-test-base 61 | ARG POSTGRES="11.11" 62 | 63 | IF [ "$POSTGRES" = "9.5" ] 64 | # for 9.5 we require a downgraded version of pg_dump; 65 | # and in the 3.4 version, it is not included in postgresql-client but rather in postgresql 66 | RUN echo 'http://dl-cdn.alpinelinux.org/alpine/v3.4/main' >> /etc/apk/repositories 67 | RUN apk add postgresql=9.5.13-r0 68 | ELSE 69 | RUN apk add postgresql-client 70 | END 71 | 72 | DO +COMMON_INTEGRATION_SETUP_AND_MIX 73 | 74 | # then run the tests 75 | WITH DOCKER \ 76 | --pull "postgres:$POSTGRES" 77 | RUN set -e; \ 78 | timeout=$(expr $(date +%s) + 30); \ 79 | docker run --name pg --network=host -d -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=postgres -e POSTGRES_DB=postgres "postgres:$POSTGRES"; \ 80 | # wait for postgres to start 81 | while ! pg_isready --host=127.0.0.1 --port=5432 --quiet; do \ 82 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for postgres"; exit 1); \ 83 | echo "waiting for postgres"; \ 84 | sleep 1; \ 85 | done; \ 86 | # run tests 87 | PG_URL=postgres:postgres@127.0.0.1 ECTO_ADAPTER=pg mix test; 88 | END 89 | 90 | 91 | integration-test-mysql: 92 | FROM +integration-test-base 93 | RUN apk add mysql-client 94 | 95 | DO +COMMON_INTEGRATION_SETUP_AND_MIX 96 | 97 | ARG MYSQL="5.7" 98 | WITH DOCKER \ 99 | --pull "mysql:$MYSQL" 100 | RUN set -e; \ 101 | timeout=$(expr $(date +%s) + 30); \ 102 | docker run --name mysql --network=host -d -e MYSQL_ROOT_PASSWORD=root "mysql:$MYSQL"; \ 103 | # wait for mysql to start 104 | while ! mysqladmin ping --host=127.0.0.1 --port=3306 --protocol=TCP --silent; do \ 105 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for mysql"; exit 1); \ 106 | echo "waiting for mysql"; \ 107 | sleep 1; \ 108 | done; \ 109 | # run tests 110 | MYSQL_URL=root:root@127.0.0.1 ECTO_ADAPTER=myxql mix test; 111 | END 112 | 113 | 114 | integration-test-mssql: 115 | FROM +integration-test-base 116 | 117 | RUN apk add --no-cache curl gnupg --virtual .build-dependencies -- && \ 118 | curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_17.5.2.1-1_amd64.apk && \ 119 | curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_17.5.2.1-1_amd64.apk && \ 120 | echo y | apk add --allow-untrusted msodbcsql17_17.5.2.1-1_amd64.apk mssql-tools_17.5.2.1-1_amd64.apk && \ 121 | apk del .build-dependencies && rm -f msodbcsql*.sig mssql-tools*.apk 122 | ENV PATH="/opt/mssql-tools/bin:${PATH}" 123 | 124 | DO +COMMON_INTEGRATION_SETUP_AND_MIX 125 | 126 | ARG MSSQL="2017" 127 | WITH DOCKER \ 128 | --pull "mcr.microsoft.com/mssql/server:$MSSQL-latest" 129 | RUN set -e; \ 130 | timeout=$(expr $(date +%s) + 30); \ 131 | docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' "mcr.microsoft.com/mssql/server:$MSSQL-latest"; \ 132 | # wait for mssql to start 133 | while ! sqlcmd -S tcp:127.0.0.1,1433 -U sa -P 'some!Password' -Q "SELECT 1" >/dev/null 2>&1; do \ 134 | test "$(date +%s)" -le "$timeout" || (echo "timed out waiting for mssql"; exit 1); \ 135 | echo "waiting for mssql"; \ 136 | sleep 1; \ 137 | done; \ 138 | # run tests 139 | ECTO_ADAPTER=tds mix test; 140 | END 141 | 142 | 143 | setup-base: 144 | ARG ELIXIR_BASE=1.11.0-erlang-23.1.1-alpine-3.13.1 145 | FROM hexpm/elixir:$ELIXIR_BASE 146 | RUN apk add --no-progress --update git build-base 147 | ENV ELIXIR_ASSERT_TIMEOUT=10000 148 | WORKDIR /src/ecto_sql 149 | 150 | 151 | test-setup: 152 | FROM +setup-base 153 | COPY mix.exs . 154 | COPY mix.lock . 155 | COPY .formatter.exs . 156 | RUN mix local.rebar --force 157 | RUN mix local.hex --force 158 | RUN mix deps.get 159 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoSQL.MixProject do 2 | use Mix.Project 3 | 4 | @source_url "https://github.com/elixir-ecto/ecto_sql" 5 | @version "3.7.1" 6 | @adapters ~w(pg myxql tds) 7 | 8 | def project do 9 | [ 10 | app: :ecto_sql, 11 | version: @version, 12 | elixir: "~> 1.8", 13 | deps: deps(), 14 | test_paths: test_paths(System.get_env("ECTO_ADAPTER")), 15 | xref: [ 16 | exclude: [ 17 | MyXQL, 18 | Ecto.Adapters.MyXQL.Connection, 19 | Postgrex, 20 | Ecto.Adapters.Postgres.Connection, 21 | Tds, 22 | Tds.Ecto.UUID, 23 | Ecto.Adapters.Tds.Connection 24 | ] 25 | ], 26 | 27 | # Custom testing 28 | aliases: [ 29 | "test.all": ["test", "test.adapters", "test.as_a_dep"], 30 | "test.adapters": &test_adapters/1, 31 | "test.as_a_dep": &test_as_a_dep/1 32 | ], 33 | preferred_cli_env: ["test.all": :test, "test.adapters": :test], 34 | 35 | # Hex 36 | description: "SQL-based adapters for Ecto and database migrations", 37 | package: package(), 38 | 39 | # Docs 40 | name: "Ecto SQL", 41 | docs: docs() 42 | ] 43 | end 44 | 45 | def application do 46 | [ 47 | extra_applications: [:logger, :eex], 48 | env: [postgres_map_type: "jsonb"], 49 | mod: {Ecto.Adapters.SQL.Application, []} 50 | ] 51 | end 52 | 53 | defp deps do 54 | [ 55 | ecto_dep(), 56 | {:telemetry, "~> 0.4.0 or ~> 1.0"}, 57 | 58 | # Drivers 59 | {:db_connection, "~> 2.2"}, 60 | postgrex_dep(), 61 | myxql_dep(), 62 | tds_dep(), 63 | 64 | # Bring something in for JSON during tests 65 | {:jason, ">= 0.0.0", only: [:test, :docs]}, 66 | 67 | # Docs 68 | {:ex_doc, "~> 0.21", only: :docs}, 69 | 70 | # Benchmarks 71 | {:benchee, "~> 0.11.0", only: :bench}, 72 | {:benchee_json, "~> 0.4.0", only: :bench} 73 | ] 74 | end 75 | 76 | defp ecto_dep do 77 | if path = System.get_env("ECTO_PATH") do 78 | {:ecto, path: path} 79 | else 80 | {:ecto, "~> 3.7.0"} 81 | end 82 | end 83 | 84 | defp postgrex_dep do 85 | if path = System.get_env("POSTGREX_PATH") do 86 | {:postgrex, path: path} 87 | else 88 | {:postgrex, "~> 0.15.0 or ~> 1.0", optional: true} 89 | end 90 | end 91 | 92 | defp myxql_dep do 93 | if path = System.get_env("MYXQL_PATH") do 94 | {:myxql, path: path} 95 | else 96 | {:myxql, "~> 0.4.0 or ~> 0.5.0", optional: true} 97 | end 98 | end 99 | 100 | defp tds_dep do 101 | if path = System.get_env("TDS_PATH") do 102 | {:tds, path: path} 103 | else 104 | {:tds, "~> 2.1.1 or ~> 2.2", optional: true} 105 | end 106 | end 107 | 108 | defp test_paths(adapter) when adapter in @adapters, do: ["integration_test/#{adapter}"] 109 | defp test_paths(nil), do: ["test"] 110 | defp test_paths(other), do: raise("unknown adapter #{inspect(other)}") 111 | 112 | defp package do 113 | [ 114 | maintainers: ["Eric Meadows-Jönsson", "José Valim", "James Fish", "Michał Muskała"], 115 | licenses: ["Apache-2.0"], 116 | links: %{"GitHub" => @source_url}, 117 | files: 118 | ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ 119 | ~w(integration_test/sql integration_test/support) 120 | ] 121 | end 122 | 123 | defp test_as_a_dep(args) do 124 | IO.puts("==> Compiling ecto_sql from a dependency") 125 | File.rm_rf!("tmp/as_a_dep") 126 | File.mkdir_p!("tmp/as_a_dep") 127 | 128 | File.cd!("tmp/as_a_dep", fn -> 129 | File.write!("mix.exs", """ 130 | defmodule DepsOnEctoSQL.MixProject do 131 | use Mix.Project 132 | 133 | def project do 134 | [ 135 | app: :deps_on_ecto_sql, 136 | version: "0.0.1", 137 | deps: [{:ecto_sql, path: "../.."}] 138 | ] 139 | end 140 | end 141 | """) 142 | 143 | mix_cmd_with_status_check(["do", "deps.get,", "compile", "--force" | args]) 144 | end) 145 | end 146 | 147 | defp test_adapters(args) do 148 | for adapter <- @adapters, do: env_run(adapter, args) 149 | end 150 | 151 | defp env_run(adapter, args) do 152 | IO.puts("==> Running tests for ECTO_ADAPTER=#{adapter} mix test") 153 | 154 | mix_cmd_with_status_check( 155 | ["test", ansi_option() | args], 156 | env: [{"ECTO_ADAPTER", adapter}] 157 | ) 158 | end 159 | 160 | defp ansi_option do 161 | if IO.ANSI.enabled?(), do: "--color", else: "--no-color" 162 | end 163 | 164 | defp mix_cmd_with_status_check(args, opts \\ []) do 165 | {_, res} = System.cmd("mix", args, [into: IO.binstream(:stdio, :line)] ++ opts) 166 | 167 | if res > 0 do 168 | System.at_exit(fn _ -> exit({:shutdown, 1}) end) 169 | end 170 | end 171 | 172 | defp docs do 173 | [ 174 | main: "Ecto.Adapters.SQL", 175 | source_ref: "v#{@version}", 176 | canonical: "http://hexdocs.pm/ecto_sql", 177 | source_url: @source_url, 178 | extras: ["CHANGELOG.md"], 179 | skip_undefined_reference_warnings_on: ["CHANGELOG.md"], 180 | groups_for_modules: [ 181 | # Ecto.Adapters.SQL, 182 | # Ecto.Adapters.SQL.Sandbox, 183 | # Ecto.Migration, 184 | # Ecto.Migrator, 185 | 186 | "Built-in adapters": [ 187 | Ecto.Adapters.MyXQL, 188 | Ecto.Adapters.Tds, 189 | Ecto.Adapters.Postgres 190 | ], 191 | "Adapter specification": [ 192 | Ecto.Adapter.Migration, 193 | Ecto.Adapter.Structure, 194 | Ecto.Adapters.SQL.Connection, 195 | Ecto.Migration.Command, 196 | Ecto.Migration.Constraint, 197 | Ecto.Migration.Index, 198 | Ecto.Migration.Reference, 199 | Ecto.Migration.Table 200 | ] 201 | ] 202 | ] 203 | end 204 | end 205 | -------------------------------------------------------------------------------- /integration_test/sql/migrator.exs: -------------------------------------------------------------------------------- 1 | Code.require_file "../support/file_helpers.exs", __DIR__ 2 | 3 | defmodule Ecto.Integration.MigratorTest do 4 | use Ecto.Integration.Case 5 | 6 | import Support.FileHelpers 7 | import ExUnit.CaptureLog 8 | import Ecto.Migrator 9 | 10 | alias Ecto.Integration.{TestRepo, PoolRepo} 11 | alias Ecto.Migration.SchemaMigration 12 | 13 | setup config do 14 | Process.register(self(), config.test) 15 | PoolRepo.delete_all(SchemaMigration) 16 | :ok 17 | end 18 | 19 | defmodule AnotherSchemaMigration do 20 | use Ecto.Migration 21 | 22 | def change do 23 | execute TestRepo.create_prefix("bad_schema_migrations"), 24 | TestRepo.drop_prefix("bad_schema_migrations") 25 | 26 | create table(:schema_migrations, prefix: "bad_schema_migrations") do 27 | add :version, :string 28 | add :inserted_at, :integer 29 | end 30 | end 31 | end 32 | 33 | defmodule BrokenLinkMigration do 34 | use Ecto.Migration 35 | 36 | def change do 37 | Task.start_link(fn -> raise "oops" end) 38 | Process.sleep(:infinity) 39 | end 40 | end 41 | 42 | defmodule GoodMigration do 43 | use Ecto.Migration 44 | 45 | def up do 46 | create table(:good_migration) 47 | end 48 | 49 | def down do 50 | drop table(:good_migration) 51 | end 52 | end 53 | 54 | defmodule BadMigration do 55 | use Ecto.Migration 56 | 57 | def change do 58 | execute "CREATE WHAT" 59 | end 60 | end 61 | 62 | test "migrations up and down" do 63 | assert migrated_versions(PoolRepo) == [] 64 | assert up(PoolRepo, 31, GoodMigration, log: false) == :ok 65 | 66 | [migration] = PoolRepo.all(SchemaMigration) 67 | assert migration.version == 31 68 | assert migration.inserted_at 69 | 70 | assert migrated_versions(PoolRepo) == [31] 71 | assert up(PoolRepo, 31, GoodMigration, log: false) == :already_up 72 | assert migrated_versions(PoolRepo) == [31] 73 | assert down(PoolRepo, 32, GoodMigration, log: false) == :already_down 74 | assert migrated_versions(PoolRepo) == [31] 75 | assert down(PoolRepo, 31, GoodMigration, log: false) == :ok 76 | assert migrated_versions(PoolRepo) == [] 77 | end 78 | 79 | @tag :prefix 80 | test "does not commit migration if insert into schema migration fails" do 81 | # First we create a new schema migration table in another prefix 82 | assert up(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok 83 | assert migrated_versions(PoolRepo) == [33] 84 | 85 | catch_error(up(PoolRepo, 34, GoodMigration, log: false, prefix: "bad_schema_migrations")) 86 | catch_error(PoolRepo.all("good_migration")) 87 | catch_error(PoolRepo.all("good_migration", prefix: "bad_schema_migrations")) 88 | 89 | assert down(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok 90 | end 91 | 92 | test "bad execute migration" do 93 | assert catch_error(up(PoolRepo, 31, BadMigration, log: false)) 94 | end 95 | 96 | test "broken link migration" do 97 | Process.flag(:trap_exit, true) 98 | 99 | assert capture_log(fn -> 100 | {:ok, pid} = Task.start_link(fn -> up(PoolRepo, 31, BrokenLinkMigration, log: false) end) 101 | assert_receive {:EXIT, ^pid, _} 102 | end) =~ "oops" 103 | 104 | assert capture_log(fn -> 105 | catch_exit(up(PoolRepo, 31, BrokenLinkMigration, log: false)) 106 | end) =~ "oops" 107 | end 108 | 109 | test "run up to/step migration", config do 110 | in_tmp fn path -> 111 | create_migration(47, config) 112 | create_migration(48, config) 113 | 114 | assert [47] = run(PoolRepo, path, :up, step: 1, log: false) 115 | assert count_entries() == 1 116 | 117 | assert [48] = run(PoolRepo, path, :up, to: 48, log: false) 118 | end 119 | end 120 | 121 | test "run down to/step migration", config do 122 | in_tmp fn path -> 123 | migrations = [ 124 | create_migration(49, config), 125 | create_migration(50, config), 126 | ] 127 | 128 | assert [49, 50] = run(PoolRepo, path, :up, all: true, log: false) 129 | purge migrations 130 | 131 | assert [50] = run(PoolRepo, path, :down, step: 1, log: false) 132 | purge migrations 133 | 134 | assert count_entries() == 1 135 | assert [50] = run(PoolRepo, path, :up, to: 50, log: false) 136 | end 137 | end 138 | 139 | test "runs all migrations", config do 140 | in_tmp fn path -> 141 | migrations = [ 142 | create_migration(53, config), 143 | create_migration(54, config), 144 | ] 145 | 146 | assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) 147 | assert [] = run(PoolRepo, path, :up, all: true, log: false) 148 | purge migrations 149 | 150 | assert [54, 53] = run(PoolRepo, path, :down, all: true, log: false) 151 | purge migrations 152 | 153 | assert count_entries() == 0 154 | assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) 155 | end 156 | end 157 | 158 | test "does not commit half transactions on bad syntax", config do 159 | in_tmp fn path -> 160 | migrations = [ 161 | create_migration(64, config), 162 | create_migration("65_+", config) 163 | ] 164 | 165 | assert_raise SyntaxError, fn -> 166 | run(PoolRepo, path, :up, all: true, log: false) 167 | end 168 | 169 | refute_received {:up, _} 170 | assert count_entries() == 0 171 | purge migrations 172 | end 173 | end 174 | 175 | @tag :lock_for_migrations 176 | test "raises when connection pool is too small" do 177 | config = Application.fetch_env!(:ecto_sql, PoolRepo) 178 | config = Keyword.merge(config, pool_size: 1) 179 | Application.put_env(:ecto_sql, __MODULE__.SingleConnectionRepo, config) 180 | 181 | defmodule SingleConnectionRepo do 182 | use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() 183 | end 184 | 185 | {:ok, _pid} = SingleConnectionRepo.start_link() 186 | 187 | in_tmp fn path -> 188 | exception_message = ~r/Migrations failed to run because the connection pool size is less than 2/ 189 | 190 | assert_raise Ecto.MigrationError, exception_message, fn -> 191 | run(SingleConnectionRepo, path, :up, all: true, log: false) 192 | end 193 | end 194 | end 195 | 196 | test "does not raise when connection pool is too small but there is no lock" do 197 | config = Application.fetch_env!(:ecto_sql, PoolRepo) 198 | config = Keyword.merge(config, pool_size: 1, migration_lock: nil) 199 | Application.put_env(:ecto_sql, __MODULE__.SingleConnectionNoLockRepo, config) 200 | 201 | defmodule SingleConnectionNoLockRepo do 202 | use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() 203 | end 204 | 205 | {:ok, _pid} = SingleConnectionNoLockRepo.start_link() 206 | 207 | in_tmp fn path -> 208 | run(SingleConnectionNoLockRepo, path, :up, all: true, log: false) 209 | end 210 | end 211 | 212 | defp count_entries() do 213 | PoolRepo.aggregate(SchemaMigration, :count, :version) 214 | end 215 | 216 | defp create_migration(num, config) do 217 | module = Module.concat(__MODULE__, "Migration#{num}") 218 | 219 | File.write! "#{num}_migration_#{num}.exs", """ 220 | defmodule #{module} do 221 | use Ecto.Migration 222 | 223 | def up do 224 | send #{inspect config.test}, {:up, #{inspect num}} 225 | end 226 | 227 | def down do 228 | send #{inspect config.test}, {:down, #{inspect num}} 229 | end 230 | end 231 | """ 232 | 233 | module 234 | end 235 | 236 | defp purge(modules) do 237 | Enum.each(List.wrap(modules), fn m -> 238 | :code.delete m 239 | :code.purge m 240 | end) 241 | end 242 | end 243 | -------------------------------------------------------------------------------- /integration_test/sql/transaction.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TransactionTest do 2 | # We can keep this test async as long as it 3 | # is the only one access the transactions table 4 | use Ecto.Integration.Case, async: true 5 | 6 | import Ecto.Query 7 | alias Ecto.Integration.PoolRepo # Used for writes 8 | alias Ecto.Integration.TestRepo # Used for reads 9 | 10 | @moduletag :capture_log 11 | 12 | defmodule UniqueError do 13 | defexception message: "unique error" 14 | end 15 | 16 | setup do 17 | PoolRepo.delete_all "transactions" 18 | :ok 19 | end 20 | 21 | defmodule Trans do 22 | use Ecto.Schema 23 | 24 | schema "transactions" do 25 | field :num, :integer 26 | end 27 | end 28 | 29 | test "transaction returns value" do 30 | refute PoolRepo.in_transaction?() 31 | {:ok, val} = PoolRepo.transaction(fn -> 32 | assert PoolRepo.in_transaction?() 33 | {:ok, val} = 34 | PoolRepo.transaction(fn -> 35 | assert PoolRepo.in_transaction?() 36 | 42 37 | end) 38 | assert PoolRepo.in_transaction?() 39 | val 40 | end) 41 | refute PoolRepo.in_transaction?() 42 | assert val == 42 43 | end 44 | 45 | test "transaction re-raises" do 46 | assert_raise UniqueError, fn -> 47 | PoolRepo.transaction(fn -> 48 | PoolRepo.transaction(fn -> 49 | raise UniqueError 50 | end) 51 | end) 52 | end 53 | end 54 | 55 | # tag is required for TestRepo, since it is checkout in 56 | # Ecto.Integration.Case setup 57 | @tag isolation_level: :snapshot 58 | test "transaction commits" do 59 | # mssql requires that all transactions that use same shared lock are set 60 | # to :snapshot isolation level 61 | opts = [isolation_level: :snapshot] 62 | 63 | PoolRepo.transaction(fn -> 64 | e = PoolRepo.insert!(%Trans{num: 1}) 65 | assert [^e] = PoolRepo.all(Trans) 66 | assert [] = TestRepo.all(Trans) 67 | end, opts) 68 | 69 | assert [%Trans{num: 1}] = PoolRepo.all(Trans) 70 | end 71 | 72 | @tag isolation_level: :snapshot 73 | test "transaction rolls back" do 74 | opts = [isolation_level: :snapshot] 75 | try do 76 | PoolRepo.transaction(fn -> 77 | e = PoolRepo.insert!(%Trans{num: 2}) 78 | assert [^e] = PoolRepo.all(Trans) 79 | assert [] = TestRepo.all(Trans) 80 | raise UniqueError 81 | end, opts) 82 | rescue 83 | UniqueError -> :ok 84 | end 85 | 86 | assert [] = TestRepo.all(Trans) 87 | end 88 | 89 | test "transaction rolls back per repository" do 90 | message = "cannot call rollback outside of transaction" 91 | 92 | assert_raise RuntimeError, message, fn -> 93 | PoolRepo.rollback(:done) 94 | end 95 | 96 | assert_raise RuntimeError, message, fn -> 97 | TestRepo.transaction fn -> 98 | PoolRepo.rollback(:done) 99 | end 100 | end 101 | end 102 | 103 | @tag :assigns_id_type 104 | test "transaction rolls back with reason on aborted transaction" do 105 | e1 = PoolRepo.insert!(%Trans{num: 13}) 106 | 107 | assert_raise Ecto.ConstraintError, fn -> 108 | TestRepo.transaction fn -> 109 | PoolRepo.insert!(%Trans{id: e1.id, num: 14}) 110 | end 111 | end 112 | end 113 | 114 | test "nested transaction partial rollback" do 115 | assert PoolRepo.transaction(fn -> 116 | e1 = PoolRepo.insert!(%Trans{num: 3}) 117 | assert [^e1] = PoolRepo.all(Trans) 118 | 119 | try do 120 | PoolRepo.transaction(fn -> 121 | e2 = PoolRepo.insert!(%Trans{num: 4}) 122 | assert [^e1, ^e2] = PoolRepo.all(from(t in Trans, order_by: t.num)) 123 | raise UniqueError 124 | end) 125 | rescue 126 | UniqueError -> :ok 127 | end 128 | 129 | assert_raise DBConnection.ConnectionError, "transaction rolling back", 130 | fn() -> PoolRepo.insert!(%Trans{num: 5}) end 131 | end) == {:error, :rollback} 132 | 133 | assert TestRepo.all(Trans) == [] 134 | end 135 | 136 | test "manual rollback doesn't bubble up" do 137 | x = PoolRepo.transaction(fn -> 138 | e = PoolRepo.insert!(%Trans{num: 6}) 139 | assert [^e] = PoolRepo.all(Trans) 140 | PoolRepo.rollback(:oops) 141 | end) 142 | 143 | assert x == {:error, :oops} 144 | assert [] = TestRepo.all(Trans) 145 | end 146 | 147 | test "manual rollback bubbles up on nested transaction" do 148 | assert PoolRepo.transaction(fn -> 149 | e = PoolRepo.insert!(%Trans{num: 7}) 150 | assert [^e] = PoolRepo.all(Trans) 151 | assert {:error, :oops} = PoolRepo.transaction(fn -> 152 | PoolRepo.rollback(:oops) 153 | end) 154 | assert_raise DBConnection.ConnectionError, "transaction rolling back", 155 | fn() -> PoolRepo.insert!(%Trans{num: 8}) end 156 | end) == {:error, :rollback} 157 | 158 | assert [] = TestRepo.all(Trans) 159 | end 160 | 161 | test "transactions are not shared in repo" do 162 | pid = self() 163 | opts = [isolation_level: :snapshot] 164 | 165 | new_pid = spawn_link fn -> 166 | PoolRepo.transaction(fn -> 167 | e = PoolRepo.insert!(%Trans{num: 9}) 168 | assert [^e] = PoolRepo.all(Trans) 169 | send(pid, :in_transaction) 170 | receive do 171 | :commit -> :ok 172 | after 173 | 5000 -> raise "timeout" 174 | end 175 | end, opts) 176 | send(pid, :committed) 177 | end 178 | 179 | receive do 180 | :in_transaction -> :ok 181 | after 182 | 5000 -> raise "timeout" 183 | end 184 | 185 | # mssql requires that all transactions that use same shared lock 186 | # set transaction isolation level to "snapshot" so this must be wrapped into 187 | # explicit transaction 188 | PoolRepo.transaction(fn -> 189 | assert [] = PoolRepo.all(Trans) 190 | end, opts) 191 | 192 | send(new_pid, :commit) 193 | receive do 194 | :committed -> :ok 195 | after 196 | 5000 -> raise "timeout" 197 | end 198 | 199 | assert [%Trans{num: 9}] = PoolRepo.all(Trans) 200 | end 201 | 202 | ## Checkout 203 | 204 | describe "with checkouts" do 205 | test "transaction inside checkout" do 206 | PoolRepo.checkout(fn -> 207 | refute PoolRepo.in_transaction?() 208 | PoolRepo.transaction(fn -> 209 | assert PoolRepo.in_transaction?() 210 | end) 211 | refute PoolRepo.in_transaction?() 212 | end) 213 | end 214 | 215 | test "checkout inside transaction" do 216 | PoolRepo.transaction(fn -> 217 | assert PoolRepo.in_transaction?() 218 | PoolRepo.checkout(fn -> 219 | assert PoolRepo.in_transaction?() 220 | end) 221 | assert PoolRepo.in_transaction?() 222 | end) 223 | end 224 | 225 | @tag :transaction_checkout_raises 226 | test "checkout raises on transaction attempt" do 227 | assert_raise DBConnection.ConnectionError, ~r"connection was checked out with status", fn -> 228 | PoolRepo.checkout(fn -> PoolRepo.query!("BEGIN") end) 229 | end 230 | end 231 | end 232 | 233 | ## Logging 234 | 235 | defp register_telemetry() do 236 | Process.put(:telemetry, fn _, measurements, event -> send(self(), {measurements, event}) end) 237 | end 238 | 239 | test "log begin, commit and rollback" do 240 | register_telemetry() 241 | 242 | PoolRepo.transaction(fn -> 243 | assert_received {measurements, %{params: [], result: {:ok, _res}}} 244 | assert is_integer(measurements.query_time) and measurements.query_time >= 0 245 | assert is_integer(measurements.queue_time) and measurements.queue_time >= 0 246 | 247 | refute_received %{} 248 | register_telemetry() 249 | end) 250 | 251 | assert_received {measurements, %{params: [], result: {:ok, _res}}} 252 | assert is_integer(measurements.query_time) and measurements.query_time >= 0 253 | refute Map.has_key?(measurements, :queue_time) 254 | 255 | assert PoolRepo.transaction(fn -> 256 | refute_received %{} 257 | register_telemetry() 258 | PoolRepo.rollback(:log_rollback) 259 | end) == {:error, :log_rollback} 260 | 261 | assert_received {measurements, %{params: [], result: {:ok, _res}}} 262 | assert is_integer(measurements.query_time) and measurements.query_time >= 0 263 | refute Map.has_key?(measurements, :queue_time) 264 | end 265 | 266 | test "log queries inside transactions" do 267 | PoolRepo.transaction(fn -> 268 | register_telemetry() 269 | assert [] = PoolRepo.all(Trans) 270 | 271 | assert_received {measurements, %{params: [], result: {:ok, _res}}} 272 | assert is_integer(measurements.query_time) and measurements.query_time >= 0 273 | assert is_integer(measurements.decode_time) and measurements.query_time >= 0 274 | refute Map.has_key?(measurements, :queue_time) 275 | end) 276 | end 277 | end 278 | -------------------------------------------------------------------------------- /lib/ecto/adapters/tds/types.ex: -------------------------------------------------------------------------------- 1 | if Code.ensure_loaded?(Tds) do 2 | defmodule Tds.Ecto.UUID do 3 | @moduledoc """ 4 | An TDS adapter type for UUIDs strings. 5 | 6 | If you are using Tds adapter and UUIDs in your project, instead of `Ecto.UUID` 7 | you should use Tds.Ecto.UUID to generate correct bytes that should be stored 8 | in database. 9 | """ 10 | 11 | use Ecto.Type 12 | 13 | @typedoc """ 14 | A hex-encoded UUID string. 15 | """ 16 | @type t :: <<_::288>> 17 | 18 | @typedoc """ 19 | A raw binary representation of a UUID. 20 | """ 21 | @type raw :: <<_::128>> 22 | 23 | @doc false 24 | @impl true 25 | def type(), do: :uuid 26 | 27 | @doc """ 28 | Casts to UUID. 29 | """ 30 | @impl true 31 | @spec cast(t | raw | any) :: {:ok, t} | :error 32 | def cast(<< a1, a2, a3, a4, a5, a6, a7, a8, ?-, 33 | b1, b2, b3, b4, ?-, 34 | c1, c2, c3, c4, ?-, 35 | d1, d2, d3, d4, ?-, 36 | e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12 >>) do 37 | << c(a1), c(a2), c(a3), c(a4), c(a5), c(a6), c(a7), c(a8), ?-, 38 | c(b1), c(b2), c(b3), c(b4), ?-, 39 | c(c1), c(c2), c(c3), c(c4), ?-, 40 | c(d1), c(d2), c(d3), c(d4), ?-, 41 | c(e1), c(e2), c(e3), c(e4), c(e5), c(e6), c(e7), c(e8), c(e9), c(e10), c(e11), c(e12) >> 42 | catch 43 | :error -> :error 44 | else 45 | casted -> {:ok, casted} 46 | end 47 | 48 | def cast(<>), do: encode(bin) 49 | def cast(_), do: :error 50 | 51 | @doc """ 52 | Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. 53 | """ 54 | def cast!(value) do 55 | case cast(value) do 56 | {:ok, uuid} -> uuid 57 | :error -> raise Ecto.CastError, type: __MODULE__, value: value 58 | end 59 | end 60 | 61 | @compile {:inline, c: 1} 62 | 63 | defp c(?0), do: ?0 64 | defp c(?1), do: ?1 65 | defp c(?2), do: ?2 66 | defp c(?3), do: ?3 67 | defp c(?4), do: ?4 68 | defp c(?5), do: ?5 69 | defp c(?6), do: ?6 70 | defp c(?7), do: ?7 71 | defp c(?8), do: ?8 72 | defp c(?9), do: ?9 73 | defp c(?A), do: ?a 74 | defp c(?B), do: ?b 75 | defp c(?C), do: ?c 76 | defp c(?D), do: ?d 77 | defp c(?E), do: ?e 78 | defp c(?F), do: ?f 79 | defp c(?a), do: ?a 80 | defp c(?b), do: ?b 81 | defp c(?c), do: ?c 82 | defp c(?d), do: ?d 83 | defp c(?e), do: ?e 84 | defp c(?f), do: ?f 85 | defp c(_), do: throw(:error) 86 | 87 | @doc """ 88 | Converts a string representing a UUID into a binary. 89 | """ 90 | @impl true 91 | @spec dump(t | any) :: {:ok, raw} | :error 92 | def dump(<>) do 97 | try do 98 | << d(a7)::4, d(a8)::4, d(a5)::4, d(a6)::4, 99 | d(a3)::4, d(a4)::4, d(a1)::4, d(a2)::4, 100 | d(b3)::4, d(b4)::4, d(b1)::4, d(b2)::4, 101 | d(c3)::4, d(c4)::4, d(c1)::4, d(c2)::4, 102 | d(d1)::4, d(d2)::4, d(d3)::4, d(d4)::4, 103 | d(e1)::4, d(e2)::4, d(e3)::4, d(e4)::4, 104 | d(e5)::4, d(e6)::4, d(e7)::4, d(e8)::4, 105 | d(e9)::4, d(e10)::4, d(e11)::4, d(e12)::4 >> 106 | catch 107 | :error -> :error 108 | else 109 | binary -> 110 | {:ok, binary} 111 | end 112 | end 113 | 114 | def dump(_), do: :error 115 | 116 | def dump!(value) do 117 | case dump(value) do 118 | {:ok, binary} -> binary 119 | :error -> raise ArgumentError, "Invalid uuid value #{inspect(value)}" 120 | end 121 | end 122 | 123 | @compile {:inline, d: 1} 124 | 125 | defp d(?0), do: 0 126 | defp d(?1), do: 1 127 | defp d(?2), do: 2 128 | defp d(?3), do: 3 129 | defp d(?4), do: 4 130 | defp d(?5), do: 5 131 | defp d(?6), do: 6 132 | defp d(?7), do: 7 133 | defp d(?8), do: 8 134 | defp d(?9), do: 9 135 | defp d(?A), do: 10 136 | defp d(?B), do: 11 137 | defp d(?C), do: 12 138 | defp d(?D), do: 13 139 | defp d(?E), do: 14 140 | defp d(?F), do: 15 141 | defp d(?a), do: 10 142 | defp d(?b), do: 11 143 | defp d(?c), do: 12 144 | defp d(?d), do: 13 145 | defp d(?e), do: 14 146 | defp d(?f), do: 15 147 | defp d(_), do: throw(:error) 148 | 149 | @doc """ 150 | Converts a binary UUID into a string. 151 | """ 152 | @impl true 153 | @spec load(raw | any) :: {:ok, t} | :error 154 | def load(<<_::128>> = uuid) do 155 | encode(uuid) 156 | end 157 | 158 | def load(<<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> = string) do 159 | raise ArgumentError, "trying to load string UUID as Tds.Ecto.UUID: #{inspect string}. " <> 160 | "Maybe you wanted to declare :uuid as your database field?" 161 | end 162 | 163 | def load(_), do: :error 164 | 165 | @doc """ 166 | Generates a version 4 (random) UUID. 167 | """ 168 | @spec generate() :: t 169 | def generate do 170 | {:ok, uuid} = encode(bingenerate()) 171 | uuid 172 | end 173 | 174 | @doc """ 175 | Generates a version 4 (random) UUID in the binary format. 176 | """ 177 | @spec bingenerate() :: raw 178 | def bingenerate do 179 | <> = :crypto.strong_rand_bytes(15) 180 | <> 181 | end 182 | 183 | # Callback invoked by autogenerate fields. 184 | @impl true 185 | def autogenerate, do: generate() 186 | 187 | defp encode(<>) do 195 | << e(a7), e(a8), e(a5), e(a6), e(a3), e(a4), e(a1), e(a2), ?-, 196 | e(b3), e(b4), e(b1), e(b2), ?-, 197 | e(c3), e(c4), e(c1), e(c2), ?-, 198 | e(d1), e(d2), e(d3), e(d4), ?-, 199 | e(e1), e(e2), e(e3), e(e4), e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12) >> 200 | catch 201 | :error -> :error 202 | else 203 | encoded -> {:ok, encoded} 204 | end 205 | 206 | @compile {:inline, e: 1} 207 | 208 | defp e(0), do: ?0 209 | defp e(1), do: ?1 210 | defp e(2), do: ?2 211 | defp e(3), do: ?3 212 | defp e(4), do: ?4 213 | defp e(5), do: ?5 214 | defp e(6), do: ?6 215 | defp e(7), do: ?7 216 | defp e(8), do: ?8 217 | defp e(9), do: ?9 218 | defp e(10), do: ?a 219 | defp e(11), do: ?b 220 | defp e(12), do: ?c 221 | defp e(13), do: ?d 222 | defp e(14), do: ?e 223 | defp e(15), do: ?f 224 | end 225 | 226 | defmodule Tds.Ecto.VarChar do 227 | @moduledoc """ 228 | An Tds adapter Ecto Type that wraps erlang string into tuple so TDS driver 229 | can understand if erlang string should be encoded as NVarChar or Varchar. 230 | 231 | Due some limitations in Ecto and Tds driver, it is not possible to 232 | support collations other than the one that is set on connection during login. 233 | Please be aware of this limitation if you plan to store varchar values in 234 | your database using Ecto since you will probably lose some codepoints in 235 | the value during encoding. Instead use `tds_encoding` library and first 236 | encode value and then annotate it as `:binary` by calling `Ecto.Query.API.type/2` 237 | in your query. This way all codepoints will be properly preserved during 238 | insert to database. 239 | """ 240 | use Ecto.Type 241 | 242 | @typedoc """ 243 | A erlang string 244 | """ 245 | @type t :: String.t 246 | 247 | @typedoc """ 248 | A value annotated as varchar. 249 | """ 250 | @type varchar :: {String.t, :varchar} 251 | 252 | @doc false 253 | @impl true 254 | def type(), do: :varchar 255 | 256 | @doc """ 257 | Casts to string. 258 | """ 259 | @spec cast(t | varchar | any) :: {:ok, t} | :error 260 | @impl true 261 | def cast({value, :varchar}) do 262 | # In case we get already dumped value 263 | {:ok, value} 264 | end 265 | 266 | def cast(value) when is_binary(value) do 267 | {:ok, value} 268 | end 269 | 270 | def cast(_), do: :error 271 | 272 | @doc """ 273 | Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. 274 | """ 275 | @spec cast!(t | varchar | any) :: t 276 | def cast!(value) do 277 | case cast(value) do 278 | {:ok, uuid} -> uuid 279 | :error -> raise Ecto.CastError, type: __MODULE__, value: value 280 | end 281 | end 282 | 283 | @doc """ 284 | Loads the DB type as is. 285 | """ 286 | @impl true 287 | @spec load(t | any) :: {:ok, t} | :error 288 | def load(value) do 289 | {:ok, value} 290 | end 291 | 292 | @doc """ 293 | Converts a string representing a VarChar into a tuple `{value, :varchar}`. 294 | 295 | Returns `:error` if value is not binary. 296 | """ 297 | @impl true 298 | @spec dump(t | any) :: {:ok, varchar} | :error 299 | def dump(value) when is_binary(value) do 300 | {:ok, {value, :varchar}} 301 | end 302 | 303 | def dump(_), do: :error 304 | end 305 | end 306 | --------------------------------------------------------------------------------