├── bench ├── config.yml ├── bench_helper.exs ├── support │ ├── setup.exs │ ├── repo.exs │ └── schemas.exs ├── README.md └── scripts │ └── macro │ ├── insert_bench.exs │ └── all_bench.exs ├── .formatter.exs ├── lib ├── ecto_foundationdb │ ├── index.ex │ ├── layer │ │ ├── decoded_kv.ex │ │ ├── internal_metadata.ex │ │ ├── metadata │ │ │ └── cache.ex │ │ ├── fields.ex │ │ ├── metadata_version.ex │ │ ├── ordering.ex │ │ ├── primary_kv_codec.ex │ │ ├── tx_insert.ex │ │ └── primary_kv_codec │ │ │ └── stream_decoder.ex │ ├── exception │ │ ├── incorrect_tenancy.ex │ │ └── unsupported.ex │ ├── database.ex │ ├── schema.ex │ ├── cli │ │ └── internal.ex │ ├── schema_migration.ex │ ├── sandbox │ │ └── sandboxer.ex │ ├── sandbox.ex │ ├── migrator.ex │ ├── indexer │ │ └── mdv_app_version.ex │ ├── options.ex │ ├── versionstamp.ex │ ├── tenant │ │ ├── managed_tenant.ex │ │ ├── backend.ex │ │ └── directory_tenant.ex │ ├── migration.ex │ ├── layer.ex │ ├── indexer.ex │ ├── future.ex │ └── query_plan.ex └── ecto │ └── adapters │ └── foundationdb │ ├── supervisor.ex │ ├── ecto_adapter.ex │ ├── ecto_adapter_transaction.ex │ ├── ecto_adapter_async.ex │ ├── ecto_adapter_assigns.ex │ └── ecto_adapter_schema.ex ├── test ├── ecto │ ├── adapters │ │ └── foundationdb_test.exs │ └── integration │ │ ├── migrations_test.exs │ │ ├── managed_tenant_test.exs │ │ ├── max_value_size_test.exs │ │ ├── watch_test.exs │ │ ├── large_migration_test.exs │ │ ├── timeseries_test.exs │ │ ├── index_test.exs │ │ ├── indexer_test.exs │ │ ├── progressive_job_test.exs │ │ ├── schema_metadata_test.exs │ │ ├── versionstamp_test.exs │ │ ├── upsert_test.exs │ │ └── pipeline_test.exs ├── ecto_foundationdb │ ├── layer │ │ ├── pack_test.exs │ │ ├── fields_test.exs │ │ ├── kv_zipper_test.exs │ │ └── metadata_test.exs │ └── indexer │ │ └── default_test.exs ├── support │ ├── repos │ │ ├── test_managed_tenant_repo.ex │ │ └── test_repo.ex │ ├── cli │ │ ├── field_a_index.ex │ │ ├── field_b_index.ex │ │ ├── drop_field_a_index.ex │ │ └── cli_repo.ex │ ├── schemas │ │ ├── queue_item.ex │ │ ├── user.ex │ │ ├── user2.ex │ │ ├── account.ex │ │ ├── event.ex │ │ └── product.ex │ ├── cases │ │ ├── case.ex │ │ ├── migrations_case.ex │ │ └── tenant_for_case.ex │ ├── migrator.ex │ ├── util.ex │ ├── migration.ex │ └── module_to_module_tracer.ex └── test_helper.exs ├── .dialyzer_ignore.exs ├── .credo.exs ├── .gitignore ├── docs ├── developer_guides │ ├── testing.md │ └── operators_manual.md ├── design │ └── metadata.md └── getting_started │ └── introduction.livemd ├── README.md ├── .github └── workflows │ └── ci.yml ├── mix.exs ├── mix.lock └── CHANGELOG.md /bench/config.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/index.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Index do 2 | @moduledoc "See `Ecto.Adapters.FoundationDB`" 3 | @type t() :: Keyword.t() 4 | end 5 | -------------------------------------------------------------------------------- /test/ecto/adapters/foundationdb_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoAdaptersFoundationDBTest do 2 | use ExUnit.Case 3 | # doctest Ecto.Adapters.FoundationDB 4 | end 5 | -------------------------------------------------------------------------------- /test/ecto_foundationdb/layer/pack_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBLayerPackTest do 2 | use ExUnit.Case, async: true 3 | doctest EctoFoundationDB.Layer.Pack 4 | end 5 | -------------------------------------------------------------------------------- /test/ecto_foundationdb/layer/fields_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBLayerFieldsTest do 2 | use ExUnit.Case, async: true 3 | doctest EctoFoundationDB.Layer.Fields 4 | end 5 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/decoded_kv.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.DecodedKV do 2 | @moduledoc false 3 | defstruct codec: nil, data_object: nil, multikey?: false 4 | end 5 | -------------------------------------------------------------------------------- /test/ecto_foundationdb/indexer/default_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBIndexerDefaultTest do 2 | use ExUnit.Case, async: true 3 | doctest EctoFoundationDB.Indexer.Default 4 | end 5 | -------------------------------------------------------------------------------- /test/support/repos/test_managed_tenant_repo.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TestManagedTenantRepo do 2 | @moduledoc false 3 | 4 | use Ecto.Repo, otp_app: :ecto_foundationdb, adapter: Ecto.Adapters.FoundationDB 5 | end 6 | -------------------------------------------------------------------------------- /bench/bench_helper.exs: -------------------------------------------------------------------------------- 1 | # Micro benchmarks 2 | 3 | ## Macro benchmarks need foundationdb and postgresql up and running 4 | Code.require_file("scripts/macro/insert_bench.exs", __DIR__) 5 | Code.require_file("scripts/macro/all_bench.exs", __DIR__) 6 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/exception/incorrect_tenancy.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Exception.IncorrectTenancy do 2 | @moduledoc """ 3 | This exception is raised when there is a conflict in the tenant used. 4 | """ 5 | defexception [:message] 6 | end 7 | -------------------------------------------------------------------------------- /test/support/cli/field_a_index.ex: -------------------------------------------------------------------------------- 1 | defmodule CliTest.FieldAIndex do 2 | @moduledoc false 3 | use EctoFoundationDB.Migration 4 | 5 | @impl true 6 | def change() do 7 | [create(index(CliTest.Schema, [:field_a]))] 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /test/support/cli/field_b_index.ex: -------------------------------------------------------------------------------- 1 | defmodule CliTest.FieldBIndex do 2 | @moduledoc false 3 | use EctoFoundationDB.Migration 4 | 5 | @impl true 6 | def change() do 7 | [create(index(CliTest.Schema, [:field_b]))] 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /test/support/cli/drop_field_a_index.ex: -------------------------------------------------------------------------------- 1 | defmodule CliTest.DropFieldAIndex do 2 | @moduledoc false 3 | use EctoFoundationDB.Migration 4 | 5 | @impl true 6 | def change() do 7 | [drop(index(CliTest.Schema, [:field_a]))] 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/exception/unsupported.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Exception.Unsupported do 2 | @moduledoc """ 3 | This exception is raised when the application uses an Ecto feature that is not 4 | supported by the FoundationDB Adapter. 5 | """ 6 | defexception [:message] 7 | end 8 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/database.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Database do 2 | @moduledoc "See `Ecto.Adapters.FoundationDB`" 3 | @type t() :: :erlfdb.database() 4 | 5 | alias EctoFoundationDB.Options 6 | 7 | def open(repo) do 8 | config = repo.config() 9 | :erlfdb.open(Options.get(config, :cluster_file)) 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/support/schemas/queue_item.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.QueueItem do 2 | @moduledoc false 3 | 4 | use Ecto.Schema 5 | 6 | alias EctoFoundationDB.Versionstamp 7 | 8 | @primary_key {:id, Versionstamp, autogenerate: false} 9 | 10 | schema "queue" do 11 | field(:author, :string) 12 | field(:data, :binary) 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /bench/support/setup.exs: -------------------------------------------------------------------------------- 1 | Code.require_file("repo.exs", __DIR__) 2 | Code.require_file("schemas.exs", __DIR__) 3 | 4 | alias Ecto.Bench.FdbRepo 5 | alias EctoFoundationDB.Tenant 6 | 7 | {:ok, _} = Ecto.Adapters.FoundationDB.ensure_all_started(FdbRepo.config(), :temporary) 8 | 9 | {:ok, _pid} = FdbRepo.start_link(log: false) 10 | 11 | Tenant.open!(FdbRepo, "bench") 12 | -------------------------------------------------------------------------------- /test/support/cases/case.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.Case do 2 | @moduledoc false 3 | use ExUnit.CaseTemplate 4 | alias Ecto.Integration.TestRepo 5 | 6 | setup do 7 | context = TenantForCase.setup(TestRepo, log: false) 8 | 9 | on_exit(fn -> 10 | TenantForCase.exit(TestRepo, context[:tenant_id]) 11 | end) 12 | 13 | {:ok, context} 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /test/support/repos/test_repo.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TestRepo do 2 | @moduledoc """ 3 | This is the Ecto Repo module under which all integration tests run. 4 | 5 | The integration tests use `EctoFoundationDB.Sandbox' for managing 6 | a standalone FoundationDB cluster. 7 | """ 8 | 9 | use Ecto.Repo, otp_app: :ecto_foundationdb, adapter: Ecto.Adapters.FoundationDB 10 | end 11 | -------------------------------------------------------------------------------- /test/support/cli/cli_repo.ex: -------------------------------------------------------------------------------- 1 | defmodule CliTest.Repo do 2 | @moduledoc """ 3 | This is the Ecto Repo module under which all integration tests run. 4 | 5 | The integration tests use `EctoFoundationDB.Sandbox' for managing 6 | a standalone FoundationDB cluster. 7 | """ 8 | 9 | use Ecto.Repo, 10 | otp_app: :ecto_foundationdb, 11 | adapter: Ecto.Adapters.FoundationDB 12 | end 13 | -------------------------------------------------------------------------------- /.dialyzer_ignore.exs: -------------------------------------------------------------------------------- 1 | [ 2 | # Hint: mix dialyzer --format ignore_file_strict 3 | 4 | # We don't implement all of the Migration behaviour's possible DDLs, so ignore these errors 5 | {"lib/ecto/adapters/foundationdb.ex", "Type mismatch with behaviour callback to execute_ddl/3."}, 6 | {"lib/ecto/adapters/foundationdb/ecto_adapter_migration.ex", "Type mismatch with behaviour callback to execute_ddl/3."} 7 | ] 8 | -------------------------------------------------------------------------------- /test/support/cases/migrations_case.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsCase do 2 | @moduledoc false 3 | use ExUnit.CaseTemplate 4 | alias Ecto.Integration.TestRepo 5 | 6 | setup do 7 | context = TenantForCase.setup(TestRepo, migrator: nil, log: false) 8 | 9 | on_exit(fn -> 10 | TenantForCase.exit(TestRepo, context[:tenant_id]) 11 | end) 12 | 13 | {:ok, context} 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /.credo.exs: -------------------------------------------------------------------------------- 1 | %{ 2 | configs: [ 3 | %{ 4 | name: "default", 5 | checks: %{ 6 | disabled: [ 7 | {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, 8 | {Credo.Check.Refactor.CyclomaticComplexity, []}, 9 | {Credo.Check.Refactor.Apply, []}, 10 | {Credo.Check.Design.AliasUsage, excluded_lastnames: [FoundationDB]} 11 | ] 12 | } 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /bench/support/repo.exs: -------------------------------------------------------------------------------- 1 | fdb_bench_cluster_file = System.get_env("FDB_CLUSTER_FILE") || "/usr/local/etc/foundationdb/fdb.cluster" 2 | 3 | Application.put_env(:ecto_foundationdb, Ecto.Bench.FdbRepo, 4 | cluster_file: fdb_bench_cluster_file, 5 | storage_id: EctoFoundationDB.Bench 6 | ) 7 | 8 | defmodule Ecto.Bench.FdbRepo do 9 | use Ecto.Repo, otp_app: :ecto_foundationdb, adapter: Ecto.Adapters.FoundationDB, log: false 10 | end 11 | -------------------------------------------------------------------------------- /test/support/cases/tenant_for_case.ex: -------------------------------------------------------------------------------- 1 | defmodule TenantForCase do 2 | @moduledoc false 3 | alias EctoFoundationDB.Sandbox 4 | 5 | def setup(repo, options) do 6 | tenant_id = Ecto.UUID.autogenerate() 7 | 8 | tenant = Sandbox.checkout(repo, tenant_id, options) 9 | 10 | [tenant: tenant, tenant_id: tenant_id] 11 | end 12 | 13 | def exit(repo, tenant_id) do 14 | Sandbox.checkin(repo, tenant_id) 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/internal_metadata.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.InternalMetadata do 2 | @moduledoc false 3 | 4 | # InternalMetadata is metadata stored alongside data to guide retrieval. 5 | 6 | def new(metadata, data) do 7 | {metadata, data} 8 | end 9 | 10 | def fetch({metadata, data}) do 11 | {:ok, {metadata, data}} 12 | end 13 | 14 | def fetch(obj) when is_list(obj) do 15 | :error 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.Supervisor do 2 | @moduledoc false 3 | use Supervisor 4 | 5 | alias EctoFoundationDB.Sandbox.Sandboxer 6 | 7 | def start_link(init_arg) do 8 | Supervisor.start_link(__MODULE__, init_arg) 9 | end 10 | 11 | @impl true 12 | def init(_init_arg) do 13 | children = [Sandboxer] 14 | 15 | Supervisor.init(children, strategy: :one_for_one) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /test/support/migrator.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Integration.TestMigrator do 2 | @moduledoc false 3 | 4 | use EctoFoundationDB.Migrator 5 | 6 | @impl true 7 | def migrations() do 8 | [ 9 | {0, EctoFoundationDB.Integration.Migration.UserIndex}, 10 | {1, EctoFoundationDB.Integration.Migration.UserSchemaMetadata}, 11 | {2, EctoFoundationDB.Integration.Migration.EventIndex}, 12 | {3, EctoFoundationDB.Integration.Migration.QueueItemIndex} 13 | ] 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /test/support/schemas/user.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.User do 2 | @moduledoc false 3 | 4 | use Ecto.Schema 5 | 6 | import Ecto.Changeset 7 | 8 | @primary_key {:id, :binary_id, autogenerate: true} 9 | 10 | schema "users" do 11 | field(:name, :string) 12 | field(:notes, :string) 13 | 14 | timestamps() 15 | end 16 | 17 | def changeset(struct, attrs) do 18 | struct 19 | |> cast(attrs, [:name, :notes]) 20 | |> validate_required([:name]) 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /test/support/schemas/user2.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.User2 do 2 | @moduledoc false 3 | # User2 is a copy of User, but only used for the large_migration_test.exs so 4 | # we can precisely control the indexes during migration 5 | 6 | use Ecto.Schema 7 | 8 | import Ecto.Changeset 9 | 10 | @primary_key {:id, :binary_id, autogenerate: true} 11 | 12 | schema "users2" do 13 | field(:name, :string) 14 | field(:notes, :string) 15 | 16 | timestamps() 17 | end 18 | 19 | def changeset(struct, attrs) do 20 | struct 21 | |> cast(attrs, [:name, :notes]) 22 | |> validate_required([:name]) 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /test/support/schemas/account.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.Account do 2 | @moduledoc false 3 | 4 | use Ecto.Schema 5 | 6 | import Ecto.Changeset 7 | 8 | alias EctoFoundationDB.Schemas.Product 9 | alias EctoFoundationDB.Schemas.User 10 | 11 | @primary_key {:id, :binary_id, autogenerate: true} 12 | 13 | schema "accounts" do 14 | field(:name, :string) 15 | field(:email, :string) 16 | 17 | timestamps() 18 | 19 | many_to_many(:users, User, join_through: "account_users") 20 | has_many(:products, Product) 21 | end 22 | 23 | def changeset(struct, attrs) do 24 | struct 25 | |> cast(attrs, [:name]) 26 | |> validate_required([:name]) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /test/support/util.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Test.Util do 2 | @moduledoc false 3 | def get_random_bytes(size) do 4 | case :persistent_term.get({__MODULE__, :random_bytes, size}, nil) do 5 | nil -> 6 | bytes = weak_rand_bytes(size) 7 | :persistent_term.put({__MODULE__, :random_bytes, size}, bytes) 8 | bytes 9 | 10 | bytes -> 11 | bytes 12 | end 13 | end 14 | 15 | defp weak_rand_bytes(n) do 16 | s = :rand.seed_s(:exsss, {0, 0, 0}) 17 | 18 | {nums, _} = 19 | Enum.reduce(1..n, {[], s}, fn _, {acc, s} -> 20 | {r, s} = :rand.uniform_s(256, s) 21 | {[r - 1 | acc], s} 22 | end) 23 | 24 | nums 25 | |> Enum.reverse() 26 | |> :erlang.list_to_binary() 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | ecto_foundationdb-*.tar 24 | 25 | # Temporary files, for example, from tests. 26 | /tmp/ 27 | 28 | .erlfdb_sandbox 29 | -------------------------------------------------------------------------------- /test/support/schemas/event.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.Event do 2 | @moduledoc false 3 | 4 | use Ecto.Schema 5 | 6 | alias EctoFoundationDB.Versionstamp 7 | 8 | import Ecto.Changeset 9 | 10 | # Using write_primary: false means that the index will be the 11 | # only way to access the Event. There will be no entry existing 12 | # on only the primary key 13 | @schema_context write_primary: false 14 | 15 | @primary_key {:id, Versionstamp, autogenerate: false} 16 | 17 | schema "events" do 18 | field(:date, :date) 19 | field(:user_id, :string) 20 | field(:time, :time_usec) 21 | field(:data, :string) 22 | 23 | timestamps() 24 | end 25 | 26 | def changeset(struct, attrs) do 27 | struct 28 | |> cast(attrs, [:date, :user_id, :time]) 29 | |> validate_required([:date, :user_id, :time]) 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/schema.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schema do 2 | @moduledoc false 3 | 4 | def get_context!(_source, schema) when is_atom(schema) and not is_nil(schema) do 5 | %{__meta__: _meta = %{context: context}} = Kernel.struct!(schema) 6 | context 7 | end 8 | 9 | def get_context!(_source, _schema), do: [] 10 | 11 | def get_source(schema) do 12 | schema.__schema__(:source) 13 | end 14 | 15 | def field_types(schema) do 16 | field_types(schema, schema.__schema__(:fields)) 17 | end 18 | 19 | def field_types(schema, fields) do 20 | for field <- fields, 21 | do: {field, schema.__schema__(:type, field)} 22 | end 23 | 24 | def get_option(context, :write_primary), do: get_option(context, :write_primary, true) 25 | 26 | def get_option(nil, _key, default), do: default 27 | def get_option(context, key, default), do: Keyword.get(context, key, default) 28 | end 29 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/cli/internal.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.CLI.Internal do 2 | @moduledoc false 3 | 4 | alias EctoFoundationDB.Layer.DecodedKV 5 | alias EctoFoundationDB.Layer.Pack 6 | alias EctoFoundationDB.Layer.PrimaryKVCodec 7 | alias EctoFoundationDB.Schema 8 | 9 | alias Ecto.Adapters.FoundationDB 10 | 11 | def read_raw_primary_obj(tenant, schema, pk) do 12 | objs = 13 | FoundationDB.transactional(tenant, fn tx -> 14 | kv_codec = Pack.primary_codec(tenant, Schema.get_source(schema), pk) 15 | 16 | {start_key, end_key} = PrimaryKVCodec.range(kv_codec) 17 | 18 | tx 19 | |> :erlfdb.get_range(start_key, end_key, wait: true) 20 | |> PrimaryKVCodec.stream_decode(tenant) 21 | |> Enum.map(fn %DecodedKV{data_object: obj} -> obj end) 22 | end) 23 | 24 | case objs do 25 | [obj] -> 26 | obj 27 | 28 | [] -> 29 | nil 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | Logger.configure(level: :info) 2 | 3 | alias Ecto.Integration.TestRepo 4 | alias Ecto.Integration.TestManagedTenantRepo 5 | 6 | Application.put_env(:ecto_foundationdb, TestRepo, 7 | open_db: &EctoFoundationDB.Sandbox.open_db/1, 8 | storage_id: EctoFoundationDB.Sandbox, 9 | migrator: EctoFoundationDB.Integration.TestMigrator 10 | ) 11 | 12 | Application.put_env(:ecto_foundationdb, TestManagedTenantRepo, 13 | open_db: &EctoFoundationDB.Sandbox.open_db/1, 14 | storage_id: EctoFoundationDB.Sandbox, 15 | migrator: EctoFoundationDB.Integration.TestMigrator, 16 | tenant_backend: EctoFoundationDB.Tenant.ManagedTenant 17 | ) 18 | 19 | Application.put_env(:ecto_foundationdb, CliTest.Repo, 20 | open_db: &EctoFoundationDB.Sandbox.open_db/1, 21 | storage_id: EctoFoundationDB.Sandbox, 22 | migrator: CliTest.Migrator 23 | ) 24 | 25 | {:ok, _} = TestRepo.start_link() 26 | {:ok, _} = CliTest.Repo.start_link() 27 | 28 | ExUnit.start() 29 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/schema_migration.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Migration.SchemaMigration do 2 | # Defines a schema that works with a table that tracks schema migrations. 3 | @moduledoc false 4 | alias EctoFoundationDB.Migration.SchemaMigration 5 | use Ecto.Schema 6 | 7 | import Ecto.Query, only: [from: 2] 8 | 9 | @schema_migrations_source "schema_migrations" 10 | 11 | def source(), do: @schema_migrations_source 12 | 13 | @primary_key false 14 | schema @schema_migrations_source do 15 | field(:version, :integer, primary_key: true) 16 | timestamps(updated_at: false) 17 | end 18 | 19 | def versions() do 20 | from(m in SchemaMigration, select: m.version) 21 | end 22 | 23 | def up(repo, version, opts \\ []) do 24 | %__MODULE__{version: version} 25 | |> repo.insert(opts) 26 | end 27 | 28 | def down(repo, version, opts) do 29 | from(m in SchemaMigration, where: m.version == type(^version, :integer)) 30 | |> repo.delete_all(opts) 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/sandbox/sandboxer.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Sandbox.Sandboxer do 2 | @moduledoc false 3 | use GenServer 4 | 5 | defstruct [:db] 6 | 7 | def start_link(init_arg) do 8 | GenServer.start_link(__MODULE__, init_arg) 9 | end 10 | 11 | def get_or_create_test_db(pid, subdir) do 12 | GenServer.call(pid, {:get_or_create_test_db, subdir}, 60_000) 13 | end 14 | 15 | @impl true 16 | def init(_init_arg) do 17 | {:ok, %__MODULE__{}} 18 | end 19 | 20 | @impl true 21 | def handle_call({:get_or_create_test_db, subdir}, _from, state = %__MODULE__{db: nil}) do 22 | # :erlfdb_sandbox.open/0 has a wide-open receive block, so we have to insulate 23 | # it from the GenServer 24 | task = Task.async(fn -> :erlfdb_sandbox.open(subdir) end) 25 | db = Task.await(task) 26 | 27 | {:reply, db, %__MODULE__{state | db: db}} 28 | end 29 | 30 | def handle_call({:get_or_create_test_db, _}, _from, state = %__MODULE__{db: db}) do 31 | {:reply, db, state} 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /bench/README.md: -------------------------------------------------------------------------------- 1 | # EctoFDB Benchmarks 2 | 3 | Note: Benchmarking cribbed from [ecto_sql](https://github.com/elixir-ecto/ecto_sql). 4 | 5 | EctoFDB has a benchmark suite to track performance of sensitive operations. Benchmarks 6 | are run using the [Benchee](https://github.com/PragTob/benchee) library and 7 | need FoundationDB up and running. 8 | 9 | To run the benchmarks tests just type in the console: 10 | 11 | ``` 12 | # POSIX-compatible shells 13 | $ MIX_ENV=bench mix run bench/bench_helper.exs 14 | ``` 15 | 16 | ``` 17 | # other shells 18 | $ env MIX_ENV=bench mix run bench/bench_helper.exs 19 | ``` 20 | 21 | Benchmarks are inside the `scripts/` directory and are divided into two 22 | categories: 23 | 24 | * `micro benchmarks`: Operations that don't actually interface with the database, 25 | but might need it up and running to start the Ecto agents and processes. 26 | 27 | * `macro benchmarks`: Operations that are actually run in the database. These are 28 | more like integration tests. 29 | 30 | You can also run a benchmark individually by giving the path to the benchmark 31 | script instead of `bench/bench_helper.exs`. 32 | -------------------------------------------------------------------------------- /test/support/schemas/product.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Schemas.Product do 2 | @moduledoc false 3 | 4 | use Ecto.Schema 5 | 6 | import Ecto.Changeset 7 | 8 | alias EctoFoundationDB.Schemas.Account 9 | 10 | @primary_key {:id, :binary_id, autogenerate: true} 11 | 12 | schema "products" do 13 | field(:name, :string) 14 | field(:description, :string) 15 | field(:external_id, Ecto.UUID) 16 | field(:bid, :binary_id) 17 | field(:tags, {:array, :string}, default: []) 18 | field(:approved_at, :naive_datetime) 19 | field(:ordered_at, :utc_datetime) 20 | field(:price, :decimal) 21 | 22 | belongs_to(:account, Account) 23 | 24 | timestamps() 25 | end 26 | 27 | def changeset(struct, attrs) do 28 | struct 29 | |> cast(attrs, [:name, :description, :tags, :account_id, :approved_at, :ordered_at]) 30 | |> validate_required([:name]) 31 | |> maybe_generate_external_id() 32 | end 33 | 34 | defp maybe_generate_external_id(changeset) do 35 | if get_field(changeset, :external_id) do 36 | changeset 37 | else 38 | put_change(changeset, :external_id, Ecto.UUID.bingenerate()) 39 | end 40 | end 41 | end 42 | -------------------------------------------------------------------------------- /test/support/migration.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Integration.Migration.UserIndex do 2 | @moduledoc false 3 | alias EctoFoundationDB.Schemas.User 4 | alias EctoFoundationDB.Schemas.User2 5 | use EctoFoundationDB.Migration 6 | 7 | @impl true 8 | def change() do 9 | [create(index(User, [:name])), create(index(User2, [:name]))] 10 | end 11 | end 12 | 13 | defmodule EctoFoundationDB.Integration.Migration.UserSchemaMetadata do 14 | @moduledoc false 15 | alias EctoFoundationDB.Schemas.User 16 | use EctoFoundationDB.Migration 17 | 18 | @impl true 19 | def change() do 20 | [create(metadata(User))] 21 | end 22 | end 23 | 24 | defmodule EctoFoundationDB.Integration.Migration.EventIndex do 25 | @moduledoc false 26 | alias EctoFoundationDB.Schemas.Event 27 | use EctoFoundationDB.Migration 28 | 29 | @impl true 30 | def change() do 31 | [ 32 | create(metadata(Event)), 33 | create(index(Event, [:date, :user_id, :time], options: [mapped?: false])) 34 | ] 35 | end 36 | end 37 | 38 | defmodule EctoFoundationDB.Integration.Migration.QueueItemIndex do 39 | @moduledoc false 40 | alias EctoFoundationDB.Schemas.QueueItem 41 | use EctoFoundationDB.Migration 42 | 43 | @impl true 44 | def change() do 45 | [ 46 | create(index(QueueItem, [:author])) 47 | ] 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/ecto_adapter.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.EctoAdapter do 2 | @moduledoc false 3 | @behaviour Ecto.Adapter 4 | 5 | @impl Ecto.Adapter 6 | defmacro __before_compile__(_env), do: :this_is_never_called 7 | 8 | @impl Ecto.Adapter 9 | def ensure_all_started(_config, type), do: Application.ensure_all_started(:erlfdb, type) 10 | 11 | @impl Ecto.Adapter 12 | def init(config) do 13 | # Pulled from QLC 14 | log = Keyword.get(config, :log, :debug) 15 | stacktrace = Keyword.get(config, :stacktrace, nil) 16 | telemetry_prefix = Keyword.fetch!(config, :telemetry_prefix) 17 | telemetry = {config[:repo], log, telemetry_prefix ++ [:query]} 18 | 19 | {:ok, Ecto.Adapters.FoundationDB.Supervisor.child_spec([]), 20 | %{telemetry: telemetry, stacktrace: stacktrace, opts: config}} 21 | end 22 | 23 | @impl Ecto.Adapter 24 | def checkout(%{pid: pid}, _config, fun) do 25 | Process.put({__MODULE__, pid}, true) 26 | result = fun.() 27 | Process.delete({__MODULE__, pid}) 28 | result 29 | end 30 | 31 | @impl Ecto.Adapter 32 | def checked_out?(%{pid: pid}) do 33 | Process.get({__MODULE__, pid}) != nil 34 | end 35 | 36 | @impl Ecto.Adapter 37 | def loaders(_primitive_type, ecto_type) do 38 | [ecto_type] 39 | end 40 | 41 | @impl Ecto.Adapter 42 | def dumpers(_primitive_type, ecto_type) do 43 | [ecto_type] 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/sandbox.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Sandbox do 2 | @moduledoc """ 3 | A module for managing a sandbox FoundationDB cluster. This allows a developer to create 4 | a space under which it should be safe to write tests. 5 | 6 | When using this module, it creates a directory in the project root called `.erlfdb`. It 7 | is safe to delete this directory when you no longer need it (e.g. after test execution) 8 | 9 | See [Testing with EctoFoundationDB](testing.html) for more. 10 | """ 11 | alias EctoFoundationDB.Database 12 | alias EctoFoundationDB.Options 13 | alias EctoFoundationDB.Sandbox.Sandboxer 14 | alias EctoFoundationDB.Tenant 15 | 16 | @spec open_db(Ecto.Repo.t()) :: Database.t() 17 | def open_db(repo) do 18 | [{Ecto.Adapters.FoundationDB.Supervisor, sup, :supervisor, _}] = 19 | Supervisor.which_children(repo) 20 | 21 | repo_children = Supervisor.which_children(sup) 22 | {Sandboxer, pid, :worker, _} = List.keyfind!(repo_children, Sandboxer, 0) 23 | 24 | Sandboxer.get_or_create_test_db(pid, "#{inspect(repo)}") 25 | end 26 | 27 | @spec checkout(Ecto.Repo.t(), Tenant.id(), Options.t()) :: Tenant.t() 28 | def checkout(repo, id, options \\ []) when is_binary(id) do 29 | Tenant.open_empty!(repo, id, options) 30 | end 31 | 32 | @spec checkin(Ecto.Repo.t(), Tenant.id()) :: :ok 33 | def checkin(repo, id) when is_binary(id) do 34 | Tenant.clear_delete!(repo, id) 35 | :ok 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /test/ecto/integration/migrations_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MigrationsTest do 2 | use Ecto.Integration.MigrationsCase, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | 6 | alias EctoFoundationDB.Schemas.User 7 | 8 | alias Ecto.Adapters.FoundationDB 9 | 10 | alias EctoFoundationDB.CLI 11 | alias EctoFoundationDB.Exception.Unsupported 12 | alias EctoFoundationDB.Test.Util 13 | 14 | import Ecto.Query 15 | 16 | describe "CLI.migrate!/1" do 17 | test "migrates all tenants", context do 18 | tenant = context[:tenant] 19 | 20 | {:ok, _user1} = 21 | %User{name: "John", notes: Util.get_random_bytes(100_000)} 22 | |> FoundationDB.usetenant(tenant) 23 | |> TestRepo.insert() 24 | 25 | {:ok, _user2} = 26 | %User{name: "James"} 27 | |> FoundationDB.usetenant(tenant) 28 | |> TestRepo.insert() 29 | 30 | {:ok, _user3} = 31 | %User{name: "John"} 32 | |> FoundationDB.usetenant(tenant) 33 | |> TestRepo.insert() 34 | 35 | query_fun = fn -> 36 | from(u in User, where: u.name == ^"John") 37 | |> TestRepo.all(prefix: tenant) 38 | end 39 | 40 | assert_raise(Unsupported, ~r/FoundationDB Adapter supports either/, query_fun) 41 | 42 | # Ecto.Integration.MigrationsCase skips the migrations on purpose, so now we'll apply them manually. 43 | :ok = CLI.migrate!(TestRepo, log: false, prefix: [tenant.id]) 44 | 45 | assert [%User{name: "John"}, %User{name: "John"}] = query_fun.() 46 | end 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /bench/support/schemas.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Bench.User do 2 | use Ecto.Schema 3 | alias EctoFoundationDB.Versionstamp 4 | 5 | @primary_key {:id, Versionstamp, autogenerate: false} 6 | 7 | schema "users" do 8 | field(:name, :string) 9 | field(:email, :string) 10 | field(:password, :string) 11 | field(:time_attr, :time) 12 | field(:date_attr, :date) 13 | field(:naive_datetime_attr, :naive_datetime) 14 | field(:uuid, :binary_id) 15 | end 16 | 17 | @required_attrs [ 18 | :name, 19 | :email, 20 | :password, 21 | :time_attr, 22 | :date_attr, 23 | :naive_datetime_attr, 24 | :uuid 25 | ] 26 | 27 | def changeset() do 28 | changeset(sample_data()) 29 | end 30 | 31 | def changeset(data) do 32 | Ecto.Changeset.cast(%__MODULE__{}, data, @required_attrs) 33 | end 34 | 35 | def sample_data do 36 | %__MODULE__{ 37 | name: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", 38 | email: "foobar@email.com", 39 | password: "mypass", 40 | time_attr: Time.utc_now() |> Time.truncate(:second), 41 | date_attr: Date.utc_today(), 42 | naive_datetime_attr: NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second), 43 | uuid: Ecto.UUID.generate() 44 | } 45 | end 46 | end 47 | 48 | defmodule Ecto.Bench.Game do 49 | use Ecto.Schema 50 | 51 | alias EctoFoundationDB.Versionstamp 52 | 53 | @primary_key {:id, Versionstamp, autogenerate: false} 54 | 55 | schema "games" do 56 | field(:name, :string) 57 | field(:price, :float) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/migrator.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Migrator do 2 | @moduledoc """ 3 | Implement this behaviour to define migrations for `Ecto.Adapters.FoundationDB` 4 | """ 5 | 6 | require Logger 7 | 8 | @callback migrations() :: [{non_neg_integer(), module()}] 9 | 10 | alias Ecto.Adapters.FoundationDB 11 | alias EctoFoundationDB.MigrationsPJ 12 | alias EctoFoundationDB.Options 13 | alias EctoFoundationDB.Tenant 14 | 15 | @doc false 16 | defmacro __using__(_) do 17 | quote location: :keep do 18 | import EctoFoundationDB.Migrator 19 | @before_compile EctoFoundationDB.Migrator 20 | @behaviour EctoFoundationDB.Migrator 21 | end 22 | end 23 | 24 | @doc false 25 | defmacro __before_compile__(_env) do 26 | quote do 27 | def __migrator__ do 28 | [] 29 | end 30 | end 31 | end 32 | 33 | @spec up(Ecto.Repo.t(), Tenant.t() | Tenant.id(), Options.t()) :: :ok 34 | def up(repo, tenant_id, options) when is_binary(tenant_id) do 35 | db = FoundationDB.db(repo) 36 | tenant = Tenant.Backend.db_open(db, tenant_id, options) 37 | up(repo, tenant, options) 38 | end 39 | 40 | def up(repo, tenant, options) do 41 | migrator = Options.get(options, :migrator) 42 | migrator = if is_nil(migrator), do: repo, else: migrator 43 | {:module, _} = Code.ensure_loaded(migrator) 44 | migrations? = Kernel.function_exported?(migrator, :migrations, 0) 45 | 46 | if migrations? do 47 | limit = Options.get(options, :migration_step) 48 | MigrationsPJ.transactional(repo, tenant, migrator, limit, options) 49 | else 50 | :ok 51 | end 52 | 53 | :ok 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/metadata/cache.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.Metadata.Cache do 2 | @moduledoc false 3 | alias EctoFoundationDB.Layer.Metadata 4 | 5 | defmodule CacheItem do 6 | @moduledoc false 7 | alias EctoFoundationDB.Layer.MetadataVersion 8 | 9 | def new(key, mdv, metadata, ts) do 10 | {key, mdv, metadata, ts} 11 | end 12 | 13 | def get_metadata_version({_key, mdv, _metadata, _ts}), do: mdv 14 | def get_metadata({_key, _mdv, metadata, _ts}), do: metadata 15 | 16 | def match_global?(nil, _mdv_b), do: false 17 | 18 | def match_global?({_key, mdv_a, _metadata, _ts}, mdv_b), 19 | do: MetadataVersion.match_global?(mdv_a, mdv_b) 20 | 21 | def match_local?(nil, _), do: false 22 | 23 | def match_local?({_key, mdv_a, _metadata, _ts}, mdv_b), 24 | do: MetadataVersion.match_local?(mdv_a, mdv_b) 25 | end 26 | 27 | def key(tenant, source) do 28 | {tenant.id, source} 29 | end 30 | 31 | def lookup(nil, _key), do: nil 32 | 33 | def lookup(table, key) do 34 | case :ets.lookup(table, key) do 35 | [item] -> 36 | item 37 | 38 | _ -> 39 | nil 40 | end 41 | end 42 | 43 | def update(nil, _new), do: :ok 44 | def update(_table, nil), do: :ok 45 | 46 | def update( 47 | table, 48 | {key, _mdv, %Metadata{partial_indexes: partial_indexes}, _ts} 49 | ) 50 | when length(partial_indexes) > 0 do 51 | delete(table, key) 52 | :ok 53 | end 54 | 55 | def update(table, cache_item) do 56 | :ets.insert(table, cache_item) 57 | 58 | :ok 59 | end 60 | 61 | def delete(table, key) do 62 | :ets.delete(table, key) 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /docs/developer_guides/testing.md: -------------------------------------------------------------------------------- 1 | # Testing with ExUnit 2 | 3 | Setting up your app to use a FoundationDB Sandbox is very easy! 4 | 5 | First, in `config/test.exs`: 6 | 7 | ```elixir 8 | config :my_app, MyApp.Repo, 9 | open_db: &EctoFoundationDB.Sandbox.open_db/1 10 | ``` 11 | 12 | Next, set up an ExUnit Case that will provide you with a new Tenant to use 13 | in your tests. This step is optional, but recommended: it will make writing 14 | each test frictionless. 15 | 16 | ```elixir 17 | defmodule MyApp.TenantCase do 18 | @moduledoc false 19 | use ExUnit.CaseTemplate 20 | 21 | alias EctoFoundationDB.Sandbox 22 | 23 | setup do 24 | tenant_id = Ecto.UUID.autogenerate() 25 | 26 | tenant = Sandbox.checkout(MyApp.Repo, tenant_id, []) 27 | 28 | on_exit(fn -> 29 | Sandbox.checkin(MyApp.Repo, tenant_id) 30 | end) 31 | 32 | {:ok, [tenant_id: tenant_id, tenant: tenant]} 33 | end 34 | end 35 | ``` 36 | 37 | Now, you can use your TenantCase to do any FDB operation in a test. Because 38 | we're using tenants with randomized names, you don't have to worry about key conflicts, so you can include the `async: true` option. 39 | 40 | ```elixir 41 | defmodule MyAppHelloFDBTest do 42 | use MyApp.TenantCase, async: true 43 | 44 | alias Ecto.Adapters.FoundationDB 45 | 46 | test "hello", context do 47 | tenant = context[:tenant] 48 | 49 | # An :erlfdb hello world, delete this for your tests 50 | assert :not_found == 51 | FoundationDB.transactional( 52 | tenant, 53 | fn tx -> 54 | tx 55 | |> :erlfdb.get("hello world") 56 | |> :erlfdb.wait() 57 | end 58 | ) 59 | 60 | # Example: 61 | # MyApp.Repo.insert!(%MyApp.Hello{message: "world"}, prefix: tenant) 62 | end 63 | end 64 | ``` 65 | -------------------------------------------------------------------------------- /bench/scripts/macro/insert_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of inserting changesets and structs in the different 3 | # supported databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of inserting changesets and structs in the 7 | # database with Repo.insert!/1 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # returning the result of the transaction. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # Different inputs to be inserted, aka Changesets and Structs 17 | 18 | Code.require_file("../../support/setup.exs", __DIR__) 19 | 20 | alias Ecto.Bench.User 21 | alias EctoFoundationDB.Tenant 22 | 23 | tenant = Tenant.open!(Ecto.Bench.FdbRepo, "bench") 24 | 25 | inputs = %{ 26 | "Struct" => User.sample_data(), 27 | #"Changeset" => User.changeset(User.sample_data()) 28 | } 29 | 30 | jobs = %{ 31 | "Fdb Insert" => fn entry -> 32 | f = Ecto.Bench.FdbRepo.transactional(tenant, fn -> 33 | Ecto.Bench.FdbRepo.async_insert_all!(User, [entry]) 34 | end) 35 | [record] = Ecto.Bench.FdbRepo.await(f) 36 | record 37 | end 38 | } 39 | 40 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 41 | _file = Path.join(path, "insert.json") 42 | 43 | Benchee.run( 44 | jobs, 45 | inputs: inputs, 46 | formatters: [Benchee.Formatters.Console] 47 | ) 48 | 49 | # Clean inserted data 50 | Ecto.Bench.FdbRepo.delete_all(User, prefix: tenant) 51 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/ecto_adapter_transaction.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.EctoAdapterTransaction do 2 | @moduledoc false 3 | alias Ecto.Adapters.FoundationDB 4 | alias EctoFoundationDB.Layer.Tx 5 | @behaviour Ecto.Adapter.Transaction 6 | 7 | @rollback :__ectofdbtxrollback__ 8 | 9 | @doc """ 10 | Runs the given function inside a transaction. 11 | 12 | Returns `{:ok, value}` if the transaction was successful where `value` 13 | is the value return by the function or `{:error, value}` if the transaction 14 | was rolled back where `value` is the value given to `rollback/1`. 15 | """ 16 | @impl true 17 | def transaction(_adapter_meta, options, function) when is_function(function, 0) do 18 | tenant = assert_tenancy!(options) 19 | 20 | FoundationDB.transactional(tenant, fn -> 21 | function.() 22 | end) 23 | catch 24 | {@rollback, value} -> {:error, value} 25 | end 26 | 27 | def transaction(_adapter_meta, options, function) when is_function(function, 1) do 28 | tenant = assert_tenancy!(options) 29 | 30 | FoundationDB.transactional(tenant, fn tx -> 31 | function.(tx) 32 | end) 33 | catch 34 | {:__ectofdbtxrollback__, value} -> {:error, value} 35 | end 36 | 37 | @doc """ 38 | Returns true if the given process is inside a transaction. 39 | """ 40 | @impl true 41 | def in_transaction?(_adapter_meta) do 42 | Tx.in_tx?() 43 | end 44 | 45 | @doc """ 46 | Rolls back the current transaction. 47 | 48 | The transaction will return the value given as `{:error, value}`. 49 | 50 | See `c:Ecto.Repo.rollback/1`. 51 | """ 52 | @impl true 53 | def rollback(adapter_meta, value) do 54 | if in_transaction?(adapter_meta) do 55 | throw({@rollback, value}) 56 | end 57 | end 58 | 59 | defp assert_tenancy!(options) do 60 | case Keyword.get(options, :prefix) do 61 | nil -> 62 | raise "Tenant required" 63 | 64 | tenant -> 65 | tenant 66 | end 67 | end 68 | end 69 | -------------------------------------------------------------------------------- /test/ecto_foundationdb/layer/kv_zipper_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBPrimaryKVCodecTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias EctoFoundationDB.Layer.DecodedKV 5 | alias EctoFoundationDB.Layer.PrimaryKVCodec 6 | alias EctoFoundationDB.Test.Util 7 | 8 | test "kv_codec" do 9 | tenant = %EctoFoundationDB.Tenant{backend: EctoFoundationDB.Tenant.ManagedTenant} 10 | 11 | a_codec = PrimaryKVCodec.new({"a"}) 12 | a_data = [id: Util.get_random_bytes(82)] 13 | a_value = :erlang.term_to_binary(a_data) 14 | assert 100 = byte_size(a_value) 15 | assert {true, a_kvs} = PrimaryKVCodec.encode(a_codec, a_value, max_single_value_size: 10) 16 | 17 | # 100 bytes split into 10 kvs each of size 10 bytes. And 1 extra kv for the "primary write" which holds the metadata 18 | assert 11 = length(a_kvs) 19 | 20 | assert [ 21 | %DecodedKV{ 22 | codec: %EctoFoundationDB.Layer.PrimaryKVCodec{tuple: {"a"}}, 23 | data_object: ^a_data, 24 | multikey?: true 25 | } 26 | ] = 27 | PrimaryKVCodec.stream_decode(a_kvs, tenant) |> Enum.to_list() 28 | 29 | b_codec = PrimaryKVCodec.new({"b"}) 30 | b_data = [id: "xx"] 31 | b_value = :erlang.term_to_binary(b_data) 32 | assert 20 = byte_size(b_value) 33 | assert {false, b_kvs} = PrimaryKVCodec.encode(b_codec, b_value, max_single_value_size: 20) 34 | assert 1 = length(b_kvs) 35 | 36 | assert [ 37 | %DecodedKV{ 38 | codec: %EctoFoundationDB.Layer.PrimaryKVCodec{tuple: {"a"}}, 39 | data_object: ^a_data, 40 | multikey?: true 41 | }, 42 | %DecodedKV{ 43 | codec: %EctoFoundationDB.Layer.PrimaryKVCodec{tuple: {"b"}}, 44 | data_object: ^b_data, 45 | multikey?: false 46 | } 47 | ] = 48 | PrimaryKVCodec.stream_decode(a_kvs ++ b_kvs, tenant) |> Enum.to_list() 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /bench/scripts/macro/all_bench.exs: -------------------------------------------------------------------------------- 1 | # -----------------------------------Goal-------------------------------------- 2 | # Compare the performance of querying all objects of the different supported 3 | # databases 4 | 5 | # -------------------------------Description----------------------------------- 6 | # This benchmark tracks performance of querying a set of objects registered in 7 | # the database with Repo.all/2 function. The query pass through 8 | # the steps of translating the SQL statements, sending them to the database and 9 | # load the results into Ecto structures. Both, Ecto Adapters and Database itself 10 | # play a role and can affect the results of this benchmark. 11 | 12 | # ----------------------------Factors(don't change)--------------------------- 13 | # Different adapters supported by Ecto with the proper database up and running 14 | 15 | # ----------------------------Parameters(change)------------------------------- 16 | # There is only a unique parameter in this benchmark, the User objects to be 17 | # fetched. 18 | 19 | Code.require_file("../../support/setup.exs", __DIR__) 20 | 21 | alias Ecto.Bench.User 22 | alias EctoFoundationDB.Tenant 23 | 24 | limit = 5_000 25 | 26 | tenant = Tenant.open!(Ecto.Bench.FdbRepo, "bench") 27 | 28 | users = 29 | 1..limit 30 | |> Enum.map(fn _ -> User.sample_data() end) 31 | 32 | # We need to insert data to fetch 33 | f = Ecto.Bench.FdbRepo.transactional(tenant, fn -> 34 | Ecto.Bench.FdbRepo.async_insert_all!(User, users) 35 | end) 36 | Ecto.Bench.FdbRepo.await(f) 37 | 38 | jobs = %{ 39 | "Fdb Repo.all/2" => fn -> Ecto.Bench.FdbRepo.all(User, limit: limit, prefix: tenant) end 40 | } 41 | 42 | path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" 43 | _file = Path.join(path, "all.json") 44 | 45 | Benchee.run( 46 | jobs, 47 | formatters: [Benchee.Formatters.Console], 48 | time: 10, 49 | after_each: fn results -> 50 | ^limit = length(results) 51 | end 52 | ) 53 | 54 | # Clean inserted data 55 | Ecto.Bench.FdbRepo.delete_all(User, prefix: tenant) 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ecto FoundationDB Adapter 2 | 3 | [![CI](https://github.com/foundationdb-beam/ecto_foundationdb/actions/workflows/ci.yml/badge.svg)](https://github.com/foundationdb-beam/ecto_foundationdb/actions/workflows/ci.yml) 4 | 5 | ## Driver 6 | 7 | An Ecto Adapter for FoundationDB, written using [foundationdb-beam/erlfdb](https://github.com/foundationdb-beam/erlfdb) 8 | as the driver for communicating with FoundationDB. 9 | 10 | ## Installation 11 | 12 | Install the latest stable release of FoundationDB from the 13 | [official FoundationDB Releases](https://github.com/apple/foundationdb/releases). 14 | 15 | The `foundationdb-server` package is required on any system that will be running 16 | a FoundationDB server instance. For example, it's common to 17 | run the `foundationdb-server` on your development machine and on managed 18 | instances running a FoundationDB cluster, but not for your stateless Elixir 19 | application server in production. 20 | 21 | `foundationdb-clients` is always required. 22 | 23 | Include `:ecto_foundationdb` in your list of dependencies in `mix.exs`: 24 | 25 | ```elixir 26 | defp deps do 27 | [ 28 | {:ecto_foundationdb, "~> 0.4"} 29 | ] 30 | end 31 | ``` 32 | 33 | ## Motivation 34 | 35 | What are some reasons to choose EctoFDB? 36 | 37 | FoundationDB offers: 38 | 39 | * Horizontal scaling of high-write workloads 40 | * Unbounded multi-tenancy 41 | * Serializable Transactions 42 | * Rich operations: multi-region, disaster recovery, backup/restore, telemetry 43 | 44 | EctoFoundationDB offers: 45 | 46 | * Object storage similar to [Record Layer](https://github.com/FoundationDB/fdb-record-layer) 47 | * Online migrations 48 | * Built-in common indexes 49 | * Extensible index types 50 | 51 | ## Usage 52 | 53 | See the [documentation](https://hexdocs.pm/ecto_foundationdb) for usage 54 | information. 55 | 56 | For documentation on `main` branch, see [Ecto.Adapters.FoundationDB](https://github.com/foundationdb-beam/ecto_foundationdb/blob/main/lib/ecto/adapters/foundationdb.ex). 57 | 58 | ## Running tests 59 | 60 | To run the integration tests, use the following. 61 | 62 | ```sh 63 | mix test 64 | ``` 65 | -------------------------------------------------------------------------------- /test/ecto/integration/managed_tenant_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.ManagedTenantCase do 2 | @moduledoc false 3 | use ExUnit.CaseTemplate 4 | 5 | alias Ecto.Integration.TestManagedTenantRepo 6 | 7 | setup do 8 | {:ok, _} = TestManagedTenantRepo.start_link() 9 | 10 | context = TenantForCase.setup(TestManagedTenantRepo, log: false) 11 | 12 | on_exit(fn -> 13 | TenantForCase.exit(TestManagedTenantRepo, context[:tenant_id]) 14 | end) 15 | 16 | {:ok, context} 17 | end 18 | end 19 | 20 | defmodule Ecto.Integration.ManagedTenantTest do 21 | use Ecto.Integration.ManagedTenantCase, async: true 22 | 23 | alias Ecto.Adapters.FoundationDB 24 | alias Ecto.Integration.TestManagedTenantRepo 25 | alias EctoFoundationDB.Schemas.User 26 | 27 | import Ecto.Query 28 | 29 | test "managed tenant", context do 30 | tenant = context[:tenant] 31 | 32 | assert tenant.backend == EctoFoundationDB.Tenant.ManagedTenant 33 | 34 | # Insert consistency 35 | {:ok, _user1} = 36 | %User{name: "John"} 37 | |> FoundationDB.usetenant(tenant) 38 | |> TestManagedTenantRepo.insert() 39 | 40 | {:ok, user2} = 41 | %User{name: "James"} 42 | |> FoundationDB.usetenant(tenant) 43 | |> TestManagedTenantRepo.insert() 44 | 45 | {:ok, user3} = 46 | %User{name: "John"} 47 | |> FoundationDB.usetenant(tenant) 48 | |> TestManagedTenantRepo.insert() 49 | 50 | assert [%User{name: "John"}, %User{name: "John"}] = 51 | from(u in User, where: u.name == ^"John") 52 | |> TestManagedTenantRepo.all(prefix: tenant) 53 | 54 | # Delete consistency 55 | TestManagedTenantRepo.delete!(user3) 56 | 57 | assert [%User{name: "John"}] = 58 | from(u in User, where: u.name == ^"John") 59 | |> TestManagedTenantRepo.all(prefix: tenant) 60 | 61 | # Update consistency 62 | user2 63 | |> User.changeset(%{name: "John"}) 64 | |> TestManagedTenantRepo.update!() 65 | 66 | assert [%User{name: "John"}, %User{name: "John"}] = 67 | from(u in User, where: u.name == ^"John") 68 | |> TestManagedTenantRepo.all(prefix: tenant) 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/fields.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.Fields do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Ecto provides a compiled set of 'select's. We simply pull the field names out 6 | 7 | ## Examples 8 | 9 | iex> EctoFoundationDB.Layer.Fields.parse_select_fields([{{:., [], [{:&, [], [0]}, :a]}, [], []}]) 10 | [:a] 11 | 12 | """ 13 | def parse_select_fields(select_fields) do 14 | select_fields 15 | |> Enum.map(fn 16 | {{:., _, [{:&, [], [0]}, field]}, [], []} -> 17 | field 18 | 19 | atom when is_atom(atom) -> 20 | atom 21 | end) 22 | end 23 | 24 | @doc """ 25 | Given a Keyword of key-value pairs, arrange them in the order of the passed-in 26 | fields. 27 | 28 | ## Examples 29 | 30 | iex> EctoFoundationDB.Layer.Fields.arrange([b: 1, c: 2, a: 0], [:a, :b]) 31 | [a: 0, b: 1] 32 | 33 | iex> EctoFoundationDB.Layer.Fields.arrange([b: 1, c: 2, a: 0], []) 34 | [b: 1, c: 2, a: 0] 35 | 36 | """ 37 | def arrange(fields, []) do 38 | fields 39 | end 40 | 41 | def arrange(fields, field_names) do 42 | Enum.map(field_names, fn field_name -> {field_name, fields[field_name]} end) 43 | end 44 | 45 | @doc """ 46 | Ecto expects data to be returned from queries as just a list of values. This 47 | function removes the field names from each. 48 | 49 | ## Examples 50 | 51 | iex> EctoFoundationDB.Layer.Fields.strip_field_names_for_ecto([[a: 0, b: 1, c: 2]]) 52 | [[0,1,2]] 53 | 54 | """ 55 | def strip_field_names_for_ecto(entries) do 56 | Enum.map(entries, &Keyword.values/1) 57 | end 58 | 59 | @doc """ 60 | Gets the name of the primary key field from the schema. 61 | """ 62 | def get_pk_field!(schema) do 63 | [pk_field] = schema.__schema__(:primary_key) 64 | pk_field 65 | end 66 | 67 | @doc """ 68 | Brings the given key-value pair to the front of the Keyword 69 | 70 | ## Examples 71 | 72 | iex> EctoFoundationDB.Layer.Fields.to_front([a: 0, b: 1, c: 2], :c) 73 | [c: 2, a: 0, b: 1] 74 | 75 | """ 76 | def to_front(kw = [{first_key, _} | _], key) do 77 | if first_key == key do 78 | kw 79 | else 80 | val = kw[key] 81 | [{key, val} | Keyword.delete(kw, key)] 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/indexer/mdv_app_version.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Indexer.MDVAppVersion do 2 | @moduledoc false 3 | # From a specified field on a schema, stores the max value. 4 | 5 | # This index assumes: 6 | # * the field value is an unsigned integer 7 | # * the max is monotonically non-decreasing 8 | 9 | # A value of -1 is returned if there are no values. 10 | alias EctoFoundationDB.Exception.Unsupported 11 | alias EctoFoundationDB.Indexer 12 | alias EctoFoundationDB.Layer 13 | alias EctoFoundationDB.Layer.MetadataVersion 14 | alias EctoFoundationDB.Layer.Pack 15 | 16 | @behaviour Indexer 17 | 18 | @impl true 19 | def create_range(tenant, idx) do 20 | source = idx[:source] 21 | Pack.primary_range(tenant, source) 22 | end 23 | 24 | @impl true 25 | def drop_ranges(tenant, idx) do 26 | [MetadataVersion.app_version_key(tenant, idx)] 27 | end 28 | 29 | @impl true 30 | def create(tenant, tx, idx, _schema, {start_key, end_key}, limit) do 31 | [max_field] = idx[:fields] 32 | 33 | case :erlfdb.get_range(tx, start_key, end_key, limit: limit, wait: true) do 34 | [] -> 35 | {0, {end_key, end_key}} 36 | 37 | kvs -> 38 | max_val = 39 | kvs 40 | |> Stream.map(fn {_, fdb_value} -> 41 | data = Pack.from_fdb_value(fdb_value) 42 | data[max_field] 43 | end) 44 | |> Enum.max() 45 | 46 | update_metadata_version(tenant, tx, idx, max_val) 47 | {_, last_key} = List.last(kvs) 48 | next_key = :erlfdb_key.strinc(last_key) 49 | 50 | {length(kvs), {next_key, end_key}} 51 | end 52 | end 53 | 54 | @impl true 55 | def set(tenant, tx, idx, _schema, {_, data}) do 56 | [max_field] = idx[:fields] 57 | val = data[max_field] 58 | update_metadata_version(tenant, tx, idx, val) 59 | end 60 | 61 | @impl true 62 | def clear(_tenant, _tx, _idx, _schema, _kv) do 63 | # Entries from SchemaMigration are never individually cleared 64 | :ok 65 | end 66 | 67 | @impl true 68 | def range(_idx, _plan, _options) do 69 | raise Unsupported, """ 70 | Using an Ecto Query on an index created with #{__MODULE__} isn't supported. 71 | """ 72 | end 73 | 74 | defp update_metadata_version(tenant, tx, idx, val) do 75 | Layer.MetadataVersion.tx_set_app(tenant, tx, idx, val) 76 | Layer.MetadataVersion.tx_set_global(tx) 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /test/ecto/integration/max_value_size_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.MaxValueSizeTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Adapters.FoundationDB 5 | 6 | alias Ecto.Integration.TestRepo 7 | 8 | alias EctoFoundationDB.Future 9 | alias EctoFoundationDB.Schemas.User 10 | alias EctoFoundationDB.Test.Util 11 | 12 | describe "insert" do 13 | test "fdb:value_too_large", context do 14 | tenant = context[:tenant] 15 | 16 | # value_too_large - 2103 - Value length exceeds limit 17 | assert_raise(ErlangError, ~r/Erlang error: {:erlfdb_error, 2103}/, fn -> 18 | %User{name: "John", notes: Util.get_random_bytes(100_000)} 19 | |> FoundationDB.usetenant(tenant) 20 | |> TestRepo.insert(max_single_value_size: :infinity) 21 | end) 22 | end 23 | 24 | test "ecto_fdb:value_too_large", context do 25 | tenant = context[:tenant] 26 | 27 | assert_raise(ArgumentError, ~r/reject any objects larger than 100000 bytes/, fn -> 28 | %User{name: "John", notes: Util.get_random_bytes(100_000)} 29 | |> FoundationDB.usetenant(tenant) 30 | |> TestRepo.insert(max_value_size: 100_000) 31 | end) 32 | end 33 | 34 | test "split 1", context do 35 | tenant = context[:tenant] 36 | 37 | assert {:ok, user} = 38 | %User{name: "John", notes: Util.get_random_bytes(100_000)} 39 | |> FoundationDB.usetenant(tenant) 40 | |> TestRepo.insert(max_single_value_size: 100_000) 41 | 42 | assert %User{} = TestRepo.get(User, user.id, prefix: tenant) 43 | 44 | future = TestRepo.watch(user, label: :max_value_watch) 45 | 46 | changeset = User.changeset(user, %{name: "Bob"}) 47 | {:ok, changed} = TestRepo.update(changeset) 48 | 49 | assert changed.name == "Bob" 50 | 51 | # Simple wait for watch 52 | future_ref = Future.ref(future) 53 | 54 | receive do 55 | {^future_ref, :ready} -> 56 | :ok 57 | after 58 | 100 -> 59 | raise "Watch failure" 60 | end 61 | 62 | assert %User{} = TestRepo.get(User, user.id, prefix: tenant) 63 | 64 | # @todo: verify user has the expected `:notes` data 65 | assert %User{} = TestRepo.get_by(User, [name: "Bob"], prefix: tenant) 66 | 67 | assert {:ok, _} = TestRepo.delete(user) 68 | 69 | assert is_nil(TestRepo.get(User, user.id, prefix: tenant)) 70 | end 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/metadata_version.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.MetadataVersion do 2 | @moduledoc false 3 | @fdb_metadata_version_key "\xff/metadataVersion" 4 | @fdb_metadata_version_required_value String.duplicate("\0", 14) 5 | 6 | alias EctoFoundationDB.Future 7 | alias EctoFoundationDB.Layer.Pack 8 | 9 | import Kernel, except: [match?: 2] 10 | 11 | defstruct [:global, :app] 12 | 13 | def new(global, app \\ nil), do: %__MODULE__{global: global, app: app} 14 | 15 | def match_global?(%__MODULE__{global: global}, %__MODULE__{global: global}), 16 | do: not is_nil(global) 17 | 18 | def match_global?(_a, _b), do: false 19 | 20 | def match_local?(nil, _), do: false 21 | def match_local?(_, nil), do: false 22 | 23 | def match_local?(mdv_a, mdv_b), 24 | do: match?(%__MODULE__{mdv_a | global: <<>>}, %__MODULE__{mdv_b | global: <<>>}) 25 | 26 | def match?(mdv, mdv), do: not is_nil(mdv) 27 | def match?(_, _), do: false 28 | 29 | def tx_set_global(tx) do 30 | :erlfdb.set_versionstamped_value( 31 | tx, 32 | @fdb_metadata_version_key, 33 | @fdb_metadata_version_required_value 34 | ) 35 | end 36 | 37 | def tx_set_app(tenant, tx, idx, val) do 38 | :erlfdb.max(tx, app_version_key(tenant, idx), val) 39 | end 40 | 41 | def tx_get_new(tx, future) do 42 | Future.set(future, tx, :erlfdb.get(tx, @fdb_metadata_version_key), &new/1) 43 | end 44 | 45 | def tx_with_app(mdv = %__MODULE__{app: nil}, tenant, tx, idx, future) do 46 | Future.set( 47 | future, 48 | tx, 49 | :erlfdb.get(tx, app_version_key(tenant, idx)), 50 | &with_decoded_app(mdv, &1) 51 | ) 52 | end 53 | 54 | def tx_get_full(tenant, tx, idx, future) do 55 | fut1 = Future.set(future, tx, :erlfdb.get(tx, @fdb_metadata_version_key)) 56 | fut2 = Future.set(future, tx, :erlfdb.get(tx, app_version_key(tenant, idx)), &decode_app/1) 57 | 58 | [global, app] = 59 | [fut1, fut2] 60 | |> Future.await_all() 61 | |> Enum.map(&Future.result/1) 62 | 63 | new(global, app) 64 | end 65 | 66 | defp with_decoded_app(mdv, app_result) do 67 | %__MODULE__{mdv | app: decode_app(app_result)} 68 | end 69 | 70 | defp decode_app(:not_found), do: -1 71 | defp decode_app(x), do: :binary.decode_unsigned(x, :little) 72 | 73 | def app_version_key(tenant, idx) do 74 | index_name = idx[:id] 75 | source = idx[:source] 76 | Pack.namespaced_pack(tenant, source, "max", ["#{index_name}"]) 77 | end 78 | end 79 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/options.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Options do 2 | @moduledoc "See `Ecto.Adapters.FoundationDB`" 3 | 4 | @type option() :: 5 | {:open_db, function()} 6 | | {:storage_id, String.t()} 7 | | {:storage_delimiter, String.t()} 8 | | {:open_tenant_callback, function()} 9 | | {:migrator, module()} 10 | | {:cluster_file, :erlfdb.cluster_filename()} 11 | | {:migration_step, integer()} 12 | | {:max_single_value_size, integer()} 13 | | {:max_value_size, integer()} 14 | | {:log, boolean()} 15 | 16 | @fdb_max_single_value_size_bytes 100_000 17 | 18 | # :migration_step must be large enough to fit the largest multikey value. Otherwise, the ProgressiveJob 19 | # cannot make progress. 1000 is safe with other defaults. 20 | @migration_step 1000 21 | 22 | @type t() :: [option()] 23 | 24 | alias EctoFoundationDB.Exception.Unsupported 25 | 26 | @spec get(t(), atom()) :: any() 27 | def get(options, :open_db) do 28 | Keyword.get(options, :open_db, &EctoFoundationDB.Database.open/1) 29 | end 30 | 31 | def get(options, :storage_id), 32 | do: Keyword.get(options, :storage_id, "Ecto.Adapters.FoundationDB") 33 | 34 | def get(options, :storage_delimiter), 35 | do: Keyword.get(options, :storage_delimiter, "/") 36 | 37 | def get(options, :tenant_backend), 38 | do: Keyword.get(options, :tenant_backend, EctoFoundationDB.Tenant.DirectoryTenant) 39 | 40 | def get(options, :cluster_file), do: Keyword.get(options, :cluster_file, "") 41 | 42 | def get(options, :migrator), 43 | do: Keyword.get(options, :migrator, nil) 44 | 45 | def get(options, :migration_step), 46 | do: Keyword.get(options, :migration_step, @migration_step) 47 | 48 | def get(options, :metadata_cache), 49 | do: Keyword.get(options, :metadata_cache, :enabled) 50 | 51 | def get(options, :max_single_value_size), 52 | do: Keyword.get(options, :max_single_value_size, @fdb_max_single_value_size_bytes) 53 | 54 | def get(options, :max_value_size), 55 | do: Keyword.get(options, :max_value_size, :infinity) 56 | 57 | def get(options, :log), 58 | do: Keyword.get(options, :log, true) 59 | 60 | def get(options, key), 61 | do: 62 | get_or_raise( 63 | options, 64 | key, 65 | "FoundationDB Adapter does not specify a default for option #{inspect(key)}" 66 | ) 67 | 68 | defp get_or_raise(options, key, message) do 69 | case options[key] do 70 | nil -> 71 | raise Unsupported, message 72 | 73 | val -> 74 | val 75 | end 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /test/ecto/integration/watch_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoIntegrationWatchTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias EctoFoundationDB.Future 6 | alias EctoFoundationDB.Schemas.User 7 | alias EctoFoundationDB.Tenant 8 | 9 | test "watch", context do 10 | tenant = context[:tenant] 11 | 12 | # We're emulating an Elixir process that keeps track of structs in a map with 13 | # some custom label. 14 | assigns = %{mykey: nil} 15 | 16 | # Our process has created a watch and will receive a message when the struct 17 | # changes. 18 | {assigns, futures} = 19 | TestRepo.transactional( 20 | tenant, 21 | fn -> 22 | alice = TestRepo.insert!(%User{name: "Alice"}) 23 | future = TestRepo.watch(alice, label: :mykey) 24 | {%{assigns | mykey: alice}, [future]} 25 | end 26 | ) 27 | 28 | assert %User{name: "Alice"} = assigns.mykey 29 | 30 | # This transaction emulates some other change to the DB that is independent of 31 | # our process. For simplicity, we're using the same tenant ref, but that isn't required. 32 | {:ok, _alicia} = 33 | TestRepo.transactional( 34 | tenant, 35 | fn -> 36 | TestRepo.get_by!(User, name: "Alice") 37 | |> User.changeset(%{name: "Alicia"}) 38 | |> TestRepo.update() 39 | end 40 | ) 41 | 42 | [watch_future] = futures 43 | watch_ref = Future.ref(watch_future) 44 | 45 | # Here we emulate our process's event loop (e.g. handle_info). When we receive a {ref, :ready} 46 | # message, we use TestRepo to retrieve the result according to the previously specified :label. 47 | # The returned map is merged into our assigns. We also create another watch so that the event loop 48 | # could continue in the same manner. Instead of looping, we end our test. 49 | {assigns, futures} = 50 | receive do 51 | {^watch_ref, :ready} when is_reference(watch_ref) -> 52 | {ready_assigns, futures} = 53 | TestRepo.assign_ready(futures, [watch_ref], watch?: true, prefix: tenant) 54 | 55 | assert [_] = ready_assigns 56 | assert is_list(ready_assigns) 57 | 58 | {Map.merge(assigns, Enum.into(ready_assigns, %{})), futures} 59 | after 60 | 100 -> 61 | raise "Future result not received within 100 msec" 62 | end 63 | 64 | assert [_] = futures 65 | refute watch_future == hd(futures) 66 | 67 | assert %User{name: "Alicia"} = assigns.mykey 68 | assert %Tenant{} = assigns.mykey.__meta__.prefix 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: ["*"] 6 | pull_request: 7 | branches: ["*"] 8 | 9 | jobs: 10 | lint: 11 | runs-on: ${{ matrix.os }} 12 | env: 13 | MIX_ENV: dev 14 | FDB_VERSION: ${{ matrix.fdb }} 15 | name: Lint 16 | strategy: 17 | matrix: 18 | os: ["ubuntu-latest"] 19 | elixir: ["1.17"] 20 | otp: ["27"] 21 | fdb: ["7.3.32"] 22 | steps: 23 | - uses: actions/checkout@v3 24 | - uses: erlef/setup-beam@v1 25 | with: 26 | otp-version: ${{ matrix.otp }} 27 | elixir-version: ${{ matrix.elixir }} 28 | - uses: actions/cache@v3 29 | with: 30 | path: deps 31 | key: ${{ matrix.os }}-otp_${{ matrix.otp }}-elixir_${{ matrix.elixir }}-mix_${{ hashFiles('**/mix.lock') }} 32 | restore-keys: ${{ matrix.os }}-otp_${{ matrix.otp }}-elixir_${{ matrix.elixir }}-mix_ 33 | - name: Install FoundationDB 34 | run: | 35 | wget --quiet https://github.com/apple/foundationdb/releases/download/${FDB_VERSION}/foundationdb-clients_${FDB_VERSION}-1_amd64.deb 36 | sudo dpkg -i foundationdb-clients_${FDB_VERSION}-1_amd64.deb 37 | - run: mix deps.get 38 | - run: mix lint 39 | 40 | test: 41 | runs-on: ${{ matrix.os }} 42 | name: Test Elixir ${{ matrix.elixir }}, OTP ${{ matrix.otp }}, OS ${{ matrix.os }}, FDB ${{ matrix.fdb }} 43 | strategy: 44 | fail-fast: false 45 | matrix: 46 | os: ["ubuntu-latest"] 47 | elixir: ["1.17"] 48 | otp: ["27"] 49 | fdb: ["7.3.32"] 50 | env: 51 | MIX_ENV: test 52 | FDB_VERSION: ${{ matrix.fdb }} 53 | steps: 54 | - uses: actions/checkout@v3 55 | - uses: erlef/setup-beam@v1 56 | with: 57 | otp-version: ${{ matrix.otp }} 58 | elixir-version: ${{ matrix.elixir }} 59 | - uses: actions/cache@v3 60 | with: 61 | path: deps 62 | key: ${{ matrix.os }}-otp_${{ matrix.otp }}-elixir_${{ matrix.elixir }}-mix_${{ hashFiles('**/mix.lock') }} 63 | restore-keys: ${{ matrix.os }}-otp_${{ matrix.otp }}-elixir_${{ matrix.elixir }}-mix_ 64 | - name: Install FoundationDB 65 | run: | 66 | wget --quiet https://github.com/apple/foundationdb/releases/download/${FDB_VERSION}/foundationdb-clients_${FDB_VERSION}-1_amd64.deb 67 | wget --quiet https://github.com/apple/foundationdb/releases/download/${FDB_VERSION}/foundationdb-server_${FDB_VERSION}-1_amd64.deb 68 | sudo dpkg -i foundationdb-clients_${FDB_VERSION}-1_amd64.deb 69 | sudo dpkg -i foundationdb-server_${FDB_VERSION}-1_amd64.deb 70 | - run: mix deps.get --only test 71 | - run: mix deps.compile 72 | - run: mix compile 73 | - run: mix test 74 | -------------------------------------------------------------------------------- /test/ecto_foundationdb/layer/metadata_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBLayerMetadataTest do 2 | use ExUnit.Case, async: true 3 | doctest EctoFoundationDB.Layer.Metadata 4 | 5 | alias EctoFoundationDB.Layer.Metadata 6 | alias EctoFoundationDB.QueryPlan.Between 7 | alias EctoFoundationDB.QueryPlan.Equal 8 | 9 | describe "select_index/2" do 10 | test "trivial case" do 11 | idx_a = [id: :a, fields: [:user_id]] 12 | idx_b = [id: :b, fields: [:user_id]] 13 | constraints = [%Equal{field: :user_id}] 14 | assert ^idx_a = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 15 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 16 | end 17 | 18 | test "b over a" do 19 | idx_a = [id: :a, fields: [:timestamp]] 20 | idx_b = [id: :b, fields: [:user_id]] 21 | constraints = [%Equal{field: :user_id}] 22 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 23 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 24 | end 25 | 26 | test "exact matches with different between ordering" do 27 | idx_a = [id: :a, fields: [:date, :time, :user_id]] 28 | idx_b = [id: :b, fields: [:date, :user_id, :time]] 29 | constraints = [%Equal{field: :date}, %Equal{field: :user_id}, %Between{field: :time}] 30 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 31 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 32 | end 33 | 34 | test "one subset, the other insufficient" do 35 | idx_a = [id: :a, fields: [:date, :time, :user_id]] 36 | idx_b = [id: :b, fields: [:user_id, :time]] 37 | constraints = [%Equal{field: :date}] 38 | assert ^idx_a = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 39 | assert ^idx_a = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 40 | end 41 | 42 | test "inexact matches with different between ordering" do 43 | idx_a = [id: :a, fields: [:date, :time, :user_id, :extra]] 44 | idx_b = [id: :b, fields: [:date, :user_id, :time, :extra]] 45 | constraints = [%Equal{field: :date}, %Equal{field: :user_id}, %Between{field: :time}] 46 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 47 | assert ^idx_b = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 48 | end 49 | 50 | test "best partial match" do 51 | # Note: this will fail at the Default index, but we allow it to be selected 52 | idx_a = [id: :a, fields: [:date, :time]] 53 | idx_b = [id: :b, fields: [:date, :user_id, :time]] 54 | constraints = [%Between{field: :time}] 55 | assert ^idx_a = Metadata.select_index(Metadata.new([idx_a, idx_b]), constraints) 56 | assert ^idx_a = Metadata.select_index(Metadata.new([idx_b, idx_a]), constraints) 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/ordering.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.Ordering do 2 | alias EctoFoundationDB.Schema 3 | 4 | @moduledoc false 5 | # This module emulates `query.order_bys` behavior, because FoundationDB 6 | # doesn't have native support for result ordering. 7 | 8 | @doc """ 9 | Returns the ordering function that needs to be applied on a query result. 10 | """ 11 | def get_ordering_fn(_schema, []), do: {[], &Function.identity/1} 12 | def get_ordering_fn(_schema, nil), do: {[], &Function.identity/1} 13 | 14 | def get_ordering_fn(schema, ordering) do 15 | field_types = if is_nil(schema), do: %{}, else: Schema.field_types(schema) |> Enum.into(%{}) 16 | parsed_ordering = parse_ordering(ordering) 17 | 18 | fun = fn data -> 19 | Enum.sort(data, &sort(&1, &2, field_types, parsed_ordering)) 20 | end 21 | 22 | {parsed_ordering, fun} 23 | end 24 | 25 | defp parse_ordering(ordering) do 26 | ordering 27 | |> join_exprs() 28 | |> Enum.map(fn {dir, {{:., [], [{:&, [], [0]}, field]}, _, _}} -> 29 | {dir, field} 30 | end) 31 | end 32 | 33 | defp sort(left, right, field_types, ordering) do 34 | cmp(left, right, field_types, ordering) == :lt 35 | end 36 | 37 | defp join_exprs([%{expr: exprs1}, %{expr: exprs2} | t]) do 38 | join_exprs([%{expr: Keyword.merge(exprs1, exprs2)} | t]) 39 | end 40 | 41 | defp join_exprs([%{expr: exprs1}]), do: exprs1 42 | 43 | defp cmp(left, right, field_types, [{:asc, field} | t]) do 44 | case cmp_field(field_types[field], left[field], right[field]) do 45 | :eq -> 46 | cmp(left, right, field_types, t) 47 | 48 | cmp_res -> 49 | cmp_res 50 | end 51 | end 52 | 53 | defp cmp(left, right, field_types, [{:desc, field} | t]) do 54 | case cmp_field(field_types[field], left[field], right[field]) do 55 | :eq -> 56 | cmp(left, right, field_types, t) 57 | 58 | cmp_res -> 59 | reverse_cmp(cmp_res) 60 | end 61 | end 62 | 63 | defp cmp(_, _, _, []), do: :gt 64 | 65 | if Code.ensure_loaded?(Decimal) do 66 | defp cmp_field(:decimal, lhs, rhs), do: Decimal.compare(lhs, rhs) 67 | end 68 | 69 | defp cmp_field(:utc_datetime, lhs, rhs), do: DateTime.compare(lhs, rhs) 70 | defp cmp_field(:naive_datetime, lhs, rhs), do: NaiveDateTime.compare(lhs, rhs) 71 | defp cmp_field(:date, lhs, rhs), do: Date.compare(lhs, rhs) 72 | defp cmp_field(:time, lhs, rhs), do: Time.compare(lhs, rhs) 73 | defp cmp_field(:utc_datetime_usec, lhs, rhs), do: DateTime.compare(lhs, rhs) 74 | defp cmp_field(:naive_datetime_usec, lhs, rhs), do: NaiveDateTime.compare(lhs, rhs) 75 | defp cmp_field(:time_usec, lhs, rhs), do: Time.compare(lhs, rhs) 76 | 77 | defp cmp_field(_, lhs, rhs) do 78 | cond do 79 | lhs < rhs -> 80 | :lt 81 | 82 | lhs > rhs -> 83 | :gt 84 | 85 | true -> 86 | :eq 87 | end 88 | end 89 | 90 | defp reverse_cmp(:gt), do: :lt 91 | defp reverse_cmp(:lt), do: :gt 92 | end 93 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/versionstamp.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Versionstamp do 2 | @moduledoc """ 3 | Versionstamping is a feature that allows you to create a unique identifier for a record 4 | that is guaranteed to be unique across all records in the database. 5 | 6 | Please refer to the documentation for `Repo.async_insert_all!/3`. 7 | """ 8 | use Ecto.Type 9 | 10 | alias EctoFoundationDB.Exception.Unsupported 11 | alias EctoFoundationDB.Future 12 | alias EctoFoundationDB.Layer.Tx 13 | 14 | # From :erlfdb_tuple 15 | @vs80 0x32 16 | @vs96 0x33 17 | @inc_id 0xFFFFFFFFFFFFFFFF 18 | @inc_batch 0xFFFF 19 | 20 | def incomplete(user) do 21 | {:versionstamp, @inc_id, @inc_batch, user} 22 | end 23 | 24 | def incomplete?({:versionstamp, @inc_id, @inc_batch, _}), do: true 25 | def incomplete?(_), do: false 26 | 27 | def get(tx) do 28 | Future.new_deferred(:erlfdb.get_versionstamp(tx), &from_binary/1) 29 | end 30 | 31 | def to_integer({:versionstamp, @inc_id, @inc_batch, _}) do 32 | raise Unsupported, """ 33 | Versionstamps must be completed before they are useful, so we disallow converting an incomplete versionstamp to an integer. 34 | 35 | Verstionstamp discovery can be done within the transaction that created it, and an incomplete versionstamp can be made complete with `resolve/2`. 36 | 37 | alias EctoFoundationDB.Future 38 | alias EctoFoundationDB.Versionstamp 39 | 40 | {event, vs_future} = MyRepo.transactional(tenant, fn tx -> 41 | {:ok, event} = MyRepo.insert(%Event{id: Versionstamp.next(tx)}) 42 | vs_future = Versionstamp.get(tx) 43 | {event, vs_future} 44 | end) 45 | 46 | vs = MyRepo.await(vs_future) 47 | event = %{event | id: Versionstamp.resolve(event.id, vs)} 48 | """ 49 | end 50 | 51 | def to_integer(vs = {:versionstamp, _, _, _}) do 52 | <<@vs96, bin::binary>> = :erlfdb_tuple.pack({vs}) 53 | :binary.decode_unsigned(bin, :big) 54 | end 55 | 56 | def from_integer(int) when is_integer(int) do 57 | bin = :binary.encode_unsigned(int, :big) 58 | {vs} = :erlfdb_tuple.unpack(<<@vs96>> <> bin) 59 | vs 60 | end 61 | 62 | def from_binary(bin) when byte_size(bin) == 10 do 63 | {vs80} = :erlfdb_tuple.unpack(<<@vs80>> <> bin) 64 | vs80 65 | end 66 | 67 | def next() do 68 | if Tx.in_tx?() do 69 | raise Unsupported, """ 70 | When calling from inside a transaction, you must use `EctoFoundationDB.Versionstamp.next/1`. 71 | """ 72 | end 73 | 74 | incomplete(0) 75 | end 76 | 77 | def next(tx) do 78 | incomplete(:erlfdb.get_next_tx_id(tx)) 79 | end 80 | 81 | def resolve({:versionstamp, @inc_id, @inc_batch, user}, {:versionstamp, id, batch}) do 82 | {:versionstamp, id, batch, user} 83 | end 84 | 85 | @impl true 86 | def type(), do: :id 87 | 88 | @impl true 89 | def cast(id) when is_integer(id), do: {:ok, from_integer(id)} 90 | def cast(vs = {:versionstamp, _, _, _}), do: {:ok, vs} 91 | def cast(id_str) when is_binary(id_str), do: Ecto.Type.cast(:id, id_str) 92 | def cast(_), do: :error 93 | 94 | @impl true 95 | def dump(vs = {:versionstamp, _, _, _}), do: {:ok, vs} 96 | 97 | @impl true 98 | def load(vs = {:versionstamp, _, _, _}), do: {:ok, vs} 99 | end 100 | -------------------------------------------------------------------------------- /test/support/module_to_module_tracer.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.ModuleToModuleTracer do 2 | @moduledoc false 3 | 4 | use GenServer 5 | 6 | # module_name 7 | @type caller() :: atom() 8 | 9 | # {module_name, function_name, arity} 10 | @type call() :: {atom(), atom(), integer()} 11 | 12 | @type caller_spec() :: function() | caller() 13 | @type call_spec() :: function() | call() 14 | 15 | @type traced_calls() :: {caller(), call()} 16 | 17 | defstruct caller_specs: [], call_specs: [], traced_calls: [] 18 | 19 | @doc """ 20 | The given function is executed and for any function call made within, 21 | when both a caller_spec and a call_spec are found, the function call is 22 | recorded in the trace. 23 | """ 24 | @spec with_traced_calls(atom(), list(caller_spec()), list(call_spec()), function()) :: 25 | {list(traced_calls()), any()} 26 | def with_traced_calls(name, caller_specs, call_specs, fun) do 27 | trace_data = start_trace(name, self(), caller_specs, call_specs) 28 | res = fun.() 29 | calls = stop_trace(trace_data) 30 | {calls, res} 31 | end 32 | 33 | def start_trace(name, target, caller_specs, call_specs) do 34 | {:ok, tracer} = start_link(caller_specs, call_specs) 35 | 36 | session = :trace.session_create(name, tracer, []) 37 | :trace.process(session, target, true, [:call, :arity]) 38 | 39 | match_spec = [{:_, [], [{:message, {{:cp, {:caller}}}}]}] 40 | 41 | :trace.function(session, :on_load, match_spec, [:local]) 42 | :trace.function(session, {:erlfdb, :_, :_}, match_spec, [:local]) 43 | 44 | {tracer, session} 45 | end 46 | 47 | def stop_trace({tracer, session}) do 48 | ret = get_traced_calls(tracer) 49 | 50 | :trace.session_destroy(session) 51 | 52 | GenServer.stop(tracer) 53 | 54 | ret 55 | end 56 | 57 | def start_link(caller_specs, call_specs) do 58 | GenServer.start_link(__MODULE__, {caller_specs, call_specs}, []) 59 | end 60 | 61 | def get_traced_calls(pid) do 62 | GenServer.call(pid, :get_traced_calls) 63 | end 64 | 65 | @impl true 66 | def init({caller_specs, call_specs}) do 67 | {:ok, %__MODULE__{caller_specs: caller_specs, call_specs: call_specs}} 68 | end 69 | 70 | @impl true 71 | def handle_call(:get_traced_calls, _from, state) do 72 | {:reply, Enum.reverse(state.traced_calls), state} 73 | end 74 | 75 | @impl true 76 | def handle_info( 77 | {:trace, _pid, :call, call = {_module, _fun, _arity}, {:cp, {caller, _, _}}}, 78 | state 79 | ) do 80 | if match?(caller, call, state) do 81 | {:noreply, %__MODULE__{state | traced_calls: [{caller, call} | state.traced_calls]}} 82 | else 83 | {:noreply, state} 84 | end 85 | end 86 | 87 | def handle_info(_info, state) do 88 | # other traces will end up here 89 | {:noreply, state} 90 | end 91 | 92 | defp match?(caller, call, state) do 93 | matching_origin?(caller, state) and matching_call?(call, state) 94 | end 95 | 96 | defp matching_origin?(caller, state) do 97 | any_match?(state.caller_specs, caller) 98 | end 99 | 100 | defp matching_call?(call, state) do 101 | any_match?(state.call_specs, call) 102 | end 103 | 104 | defp any_match?(specs, item) do 105 | Enum.any?( 106 | specs, 107 | fn 108 | spec when is_function(spec) -> 109 | spec.(item) 110 | 111 | spec -> 112 | item == spec 113 | end 114 | ) 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/tenant/managed_tenant.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Tenant.ManagedTenant do 2 | @moduledoc """ 3 | An experimental backend for EctoFDB multitenancy. It uses FDB's Tenants 4 | to guarantee that a given transaction cannot access keys from another tenant. 5 | 6 | To use ManagedTenant, your database must be configured with 7 | 8 | ```shell 9 | fdbcli --exec 'configure tenant_mode=required_experimental' 10 | # or 11 | fdbcli --exec 'configure tenant_mode=optional_experimental' 12 | ``` 13 | 14 | FDB's tenants are still an experimental feature, so EctoFDB's ManagedTenant must 15 | also be considered experimental. For now, we recommend using `EctoFoundationDB.Tenant.DirectoryTenant`, 16 | which is the default. 17 | """ 18 | defstruct [] 19 | 20 | alias EctoFoundationDB.Options 21 | alias EctoFoundationDB.Tenant.Backend 22 | 23 | @behaviour Backend 24 | 25 | @type t() :: %__MODULE__{} 26 | 27 | @impl true 28 | def txobj(_db, tenant_ref, _meta) do 29 | tenant_ref 30 | end 31 | 32 | @impl true 33 | def ref({:erlfdb_tenant, tenant_ref}, _meta), do: tenant_ref 34 | 35 | @impl true 36 | def make_meta(_tenant_ref) do 37 | %__MODULE__{} 38 | end 39 | 40 | @impl true 41 | def get_name(id, options) do 42 | storage_id = Options.get(options, :storage_id) 43 | storage_delimiter = Options.get(options, :storage_delimiter) 44 | 45 | "#{storage_id}#{storage_delimiter}#{id}" 46 | end 47 | 48 | @impl true 49 | def list(db, options) do 50 | start_name = get_name("", options) 51 | end_name = :erlfdb_key.strinc(start_name) 52 | :erlfdb_tenant_management.list_tenants(db, start_name, end_name, options) 53 | end 54 | 55 | @impl true 56 | def create(db, tenant_name, _options) do 57 | :erlfdb_tenant_management.create_tenant(db, tenant_name) 58 | rescue 59 | e in ErlangError -> 60 | case e do 61 | %ErlangError{original: {:erlfdb_error, 2132}} -> 62 | {:error, :tenant_already_exists} 63 | end 64 | end 65 | 66 | @impl true 67 | def delete(db, tenant_name, _options) do 68 | :erlfdb_tenant_management.delete_tenant(db, tenant_name) 69 | rescue 70 | e in ErlangError -> 71 | case e do 72 | %ErlangError{ 73 | original: {:erlfdb_directory, {:remove_error, :path_missing, [utf8: ^tenant_name]}} 74 | } -> 75 | {:error, :tenant_nonempty} 76 | end 77 | end 78 | 79 | @impl true 80 | def get(db, tenant_name, _options) do 81 | case :erlfdb_tenant_management.get_tenant(db, tenant_name) do 82 | :not_found -> 83 | {:error, :tenant_does_not_exist} 84 | 85 | tenant -> 86 | {:ok, tenant} 87 | end 88 | end 89 | 90 | @impl true 91 | def open(db, tenant_name, _options) do 92 | :erlfdb.open_tenant(db, tenant_name) 93 | end 94 | 95 | @impl true 96 | def all_data_ranges(_meta) do 97 | [{"", <<0xFF>>}] 98 | end 99 | 100 | @impl true 101 | def get_id({_key, json}, options) do 102 | %{"name" => %{"printable" => name}} = Jason.decode!(json) 103 | tenant_name_to_id!(name, options) 104 | end 105 | 106 | @impl true 107 | def extend_tuple(tuple, _meta) when is_tuple(tuple), do: tuple 108 | 109 | def extend_tuple(function, meta) when is_function(function), 110 | do: function.(0) |> extend_tuple(meta) 111 | 112 | def extend_tuple(list, _meta) when is_list(list), do: :erlang.list_to_tuple(list) 113 | 114 | @impl true 115 | def extract_tuple(tuple, _meta), do: tuple 116 | 117 | defp tenant_name_to_id!(tenant_name, options) do 118 | prefix = get_name("", options) 119 | len = String.length(prefix) 120 | ^prefix = String.slice(tenant_name, 0, len) 121 | String.slice(tenant_name, len, String.length(tenant_name) - len) 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /test/ecto/integration/large_migration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.LargeMigrationTest do 2 | use Ecto.Integration.MigrationsCase, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | 6 | alias EctoFoundationDB.Tenant 7 | 8 | alias EctoFoundationDB.Integration.TestMigrator 9 | alias EctoFoundationDB.Schemas.User2 10 | 11 | import Ecto.Query 12 | 13 | @chunk_size 100 14 | @num_chunks 20 15 | @migration_step 2 16 | 17 | defp do_insert(tenant, tag, i, j) do 18 | # user.id is chosen to distribute writes evenly among the keyspace during the migration 19 | 20 | i = i |> Integer.to_string() |> String.pad_leading(4, "0") 21 | j = j |> Integer.to_string() |> String.pad_leading(4, "0") 22 | 23 | {:ok, _user1} = 24 | TestRepo.insert(%User2{id: "id-#{j}-#{i}-#{tag}", name: "name-#{j}-#{i}-#{tag}"}, 25 | prefix: tenant 26 | ) 27 | end 28 | 29 | defp do_insert_chunk(tenant, tag, i, sleep, reverse?) do 30 | seq = if reverse?, do: @chunk_size..1//-1, else: 1..@chunk_size 31 | 32 | for j <- seq do 33 | if sleep > 0, do: :timer.sleep(sleep) 34 | tick = :erlang.monotonic_time(:microsecond) 35 | {:ok, user} = do_insert(tenant, tag, i, j) 36 | diff = :erlang.monotonic_time(:microsecond) - tick 37 | {diff, user} 38 | end 39 | end 40 | 41 | defp get_stream(fun) do 42 | Task.async_stream(1..@num_chunks, fun, 43 | ordered: false, 44 | max_concurrency: System.schedulers_online() * 2, 45 | timeout: :infinity 46 | ) 47 | end 48 | 49 | defp seed_users(tenant) do 50 | insert_stream = 51 | get_stream(fn i -> 52 | TestRepo.transactional( 53 | tenant, 54 | fn -> 55 | {_, users} = Enum.unzip(do_insert_chunk(tenant, "user-a", i, 0, false)) 56 | users 57 | end 58 | ) 59 | end) 60 | 61 | insert_stream 62 | |> Stream.flat_map(fn {:ok, x} -> x end) 63 | |> Enum.to_list() 64 | end 65 | 66 | defp migrate_with_new_inserts(tenant_id, tenant) do 67 | open_tenant_stream = 68 | get_stream(fn i -> 69 | task = ins_task(tenant, i) 70 | 71 | # A short migration step ensures that there are many opportunities between 72 | # transactions 73 | Tenant.open(TestRepo, tenant_id, 74 | migrator: TestMigrator, 75 | migration_step: @migration_step, 76 | log: false 77 | ) 78 | 79 | unless is_nil(task), do: Task.await(task) 80 | end) 81 | 82 | {ins_claim_time, ins_claim_users} = 83 | open_tenant_stream 84 | |> Stream.map(fn {:ok, x} -> x end) 85 | |> Stream.filter(fn 86 | nil -> false 87 | _ -> true 88 | end) 89 | |> Enum.to_list() 90 | |> List.flatten() 91 | |> Enum.unzip() 92 | 93 | ins_claim_time = Enum.max(ins_claim_time) / 1000.0 94 | ins_claim_users = List.flatten(ins_claim_users) 95 | {ins_claim_time, ins_claim_users} 96 | end 97 | 98 | defp ins_task(tenant, i) do 99 | # A short sleep in between each insert to distribute the writes over time 100 | # and inserting in reverse nearly guarantees that we will cross over the 101 | # advancing migration cursor 102 | ins_fun = fn i -> do_insert_chunk(tenant, "user-b", i, 2, true) end 103 | if i == 1, do: Task.async(fn -> ins_fun.(i) end) 104 | end 105 | 106 | test "large migration with concurrent writes", context do 107 | tenant_id = context[:tenant_id] 108 | tenant = context[:tenant] 109 | 110 | seed_users = seed_users(tenant) 111 | {_ins_claim_time, ins_claim_users} = migrate_with_new_inserts(tenant_id, tenant) 112 | 113 | all_users = seed_users ++ ins_claim_users 114 | 115 | assert((@num_chunks + 1) * @chunk_size == length(all_users)) 116 | 117 | assert(length(all_users) == length(TestRepo.all(User2, prefix: tenant))) 118 | 119 | assert( 120 | length(all_users) == 121 | length( 122 | TestRepo.all(from(u in User2, where: u.name > ^"\x00" and u.name < ^"\xF0"), 123 | prefix: tenant 124 | ) 125 | ) 126 | ) 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/ecto_adapter_async.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.EctoAdapterAsync do 2 | @moduledoc false 3 | alias Ecto.Adapters.FoundationDB 4 | alias EctoFoundationDB.Exception.Unsupported 5 | alias EctoFoundationDB.Future 6 | alias EctoFoundationDB.Layer.Fields 7 | alias EctoFoundationDB.Layer.Tx 8 | alias EctoFoundationDB.Versionstamp 9 | import Ecto.Query 10 | 11 | def async_insert_all!(_module, repo, schema, list, opts) do 12 | {tx?, tenant} = Tx.in_tenant_tx?() 13 | 14 | if not tx?, 15 | do: raise(Unsupported, "`Repo.async_insert_all!` must be called within a transaction") 16 | 17 | pk_field = Fields.get_pk_field!(schema) 18 | 19 | tx = Tx.get() 20 | 21 | forced_no_conflict? = [] == Keyword.get(opts, :conflict_target) 22 | 23 | list = 24 | for x <- list do 25 | if not is_struct(x, schema) do 26 | raise Unsupported, """ 27 | `Repo.async_insert_all!` must be called with a list of Ecto.Schema structs 28 | """ 29 | end 30 | 31 | pk = Map.get(x, pk_field) 32 | 33 | x = 34 | if is_nil(pk) and 35 | schema.__schema__(:type, pk_field) == Versionstamp do 36 | Map.put(x, pk_field, Versionstamp.next(tx)) 37 | else 38 | x 39 | end 40 | 41 | pk = Map.get(x, pk_field) 42 | 43 | if not forced_no_conflict? and not Versionstamp.incomplete?(pk) do 44 | raise Unsupported, """ 45 | `Repo.async_insert_all!` is designed to be called with either 46 | 47 | 1. A list of Ecto.Schema structs with incomplete Versionstamp in the `:id` field 48 | 2. The option `conflict_target: []`. (Make sure you understand the implications of this option) 49 | """ 50 | end 51 | 52 | x 53 | end 54 | 55 | # The insert_all function does not return the structs, so instead we make sure individual calls to `insert!` are 56 | # non-blocking by enforcing Versionstamps or conflict_target == [] 57 | result = for x <- list, do: repo.insert!(x, opts) 58 | 59 | vs_future = Versionstamp.get(tx) 60 | 61 | Future.apply(vs_future, fn vs -> 62 | Enum.map(result, &resolve_versionstamp(tenant, &1, vs, pk_field)) 63 | end) 64 | end 65 | 66 | def async_query(_module, repo, fun) do 67 | # Executes the repo function (e.g. get, get_by, all, etc). Caller must ensure 68 | # that the proper `:returning` option is used to adhere to the async/await 69 | # contract. 70 | _res = fun.() 71 | 72 | case Process.delete(Future.token()) do 73 | nil -> 74 | raise "Pipelining failure" 75 | 76 | {{source, schema}, future} -> 77 | Future.apply(future, fn {return_handler, result} -> 78 | invoke_return_handler(repo, source, schema, return_handler, result) 79 | end) 80 | end 81 | after 82 | Process.delete(Future.token()) 83 | end 84 | 85 | defp invoke_return_handler(repo, source, schema, return_handler, result) do 86 | if is_nil(result), do: raise("Pipelining failure") 87 | 88 | queryable = if is_nil(schema), do: source, else: schema 89 | 90 | # Abuse a :noop option here to signal to the backend that we don't 91 | # actually want to run a query. Instead, we just want the result to 92 | # be transformed by Ecto's internal logic. 93 | case return_handler do 94 | :all -> 95 | repo.all(queryable, noop: result) 96 | 97 | :one -> 98 | repo.one(queryable, noop: result) 99 | 100 | :all_from_source -> 101 | {select_fields, data_result} = result 102 | query = from(_ in source, select: ^select_fields) 103 | repo.all(query, noop: data_result) 104 | end 105 | end 106 | 107 | defp resolve_versionstamp(tenant, x, vs, pk_field) do 108 | pk = Map.get(x, pk_field) 109 | 110 | x = 111 | if Versionstamp.incomplete?(pk) do 112 | pk = Versionstamp.resolve(Map.get(x, pk_field), vs) 113 | Map.put(x, pk_field, pk) 114 | else 115 | x 116 | end 117 | 118 | FoundationDB.usetenant(x, tenant) 119 | end 120 | end 121 | -------------------------------------------------------------------------------- /test/ecto/integration/timeseries_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.TimeSeriesTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias EctoFoundationDB.Exception.Unsupported 5 | 6 | alias Ecto.Integration.TestRepo 7 | 8 | alias EctoFoundationDB.Schemas.Event 9 | 10 | import Ecto.Query 11 | 12 | @moduletag :integration 13 | describe "timeseries index" do 14 | test "multiple fields in query", context do 15 | tenant = context[:tenant] 16 | 17 | query = 18 | from( 19 | e in Event, 20 | where: 21 | e.date >= ^~D[1970-01-01] and e.date < ^~D[2100-01-01] and 22 | e.user_id == ^"foo" and 23 | (e.time >= ^~T[00:00:00] and e.time <= ^~T[00:00:00]) 24 | ) 25 | 26 | f = 27 | TestRepo.transactional(tenant, fn -> 28 | TestRepo.async_insert_all!(Event, [ 29 | %Event{date: ~D[2070-01-01], user_id: "bar", time: ~T[00:00:00.000000]}, 30 | %Event{date: ~D[2070-01-01], user_id: "foo", time: ~T[00:00:00.000000]} 31 | ]) 32 | end) 33 | 34 | [_, %Event{id: event_id}] = TestRepo.await(f) 35 | 36 | assert_raise Unsupported, ~r/Default Index query mismatch/, fn -> 37 | TestRepo.all(query, prefix: tenant) 38 | end 39 | 40 | query = 41 | from( 42 | e in Event, 43 | where: 44 | e.date == ^~D[2070-01-01] and 45 | e.user_id == ^"foo" and 46 | (e.time >= ^~T[00:00:00] and e.time <= ^~T[00:00:00]) 47 | ) 48 | 49 | assert [%Event{id: ^event_id}] = TestRepo.all(query, prefix: tenant) 50 | 51 | query = 52 | from( 53 | e in Event, 54 | where: e.date >= ^~D[1970-01-01] and e.date <= ^~D[2100-01-01] 55 | ) 56 | 57 | assert 2 == length(TestRepo.all(query, prefix: tenant)) 58 | end 59 | 60 | test "timeseries consistency", context do 61 | tenant = context[:tenant] 62 | 63 | # Insert 64 | f = 65 | TestRepo.transactional(tenant, fn -> 66 | TestRepo.async_insert_all!(Event, [ 67 | %Event{date: ~D[2070-01-01], user_id: "foo", time: ~T[00:00:00.000000]}, 68 | %Event{date: ~D[2777-01-01], user_id: "foo", time: ~T[00:00:00.000000]} 69 | ]) 70 | end) 71 | 72 | [event = %Event{id: event_id}, _] = TestRepo.await(f) 73 | 74 | # Because write_primary: false 75 | nil = TestRepo.get(Event, event.id, prefix: tenant) 76 | 77 | # All 78 | query = 79 | from(e in Event, 80 | where: e.date > ^~D[1970-01-01] and e.date < ^~D[2100-01-01] 81 | ) 82 | 83 | assert [%Event{}] = TestRepo.all(query, prefix: tenant) 84 | 85 | # Update 86 | assert {1, _} = TestRepo.update_all(query, [set: [data: "foo"]], prefix: tenant) 87 | 88 | assert [%Event{id: ^event_id, data: "foo"}] = TestRepo.all(query, prefix: tenant) 89 | 90 | # Delete 91 | assert {1, _} = TestRepo.delete_all(query, prefix: tenant) 92 | 93 | assert [] == TestRepo.all(query, prefix: tenant) 94 | end 95 | end 96 | 97 | test "backward scan", context do 98 | tenant = context[:tenant] 99 | 100 | # Insert 101 | f = 102 | TestRepo.transactional(tenant, fn -> 103 | TestRepo.async_insert_all!(Event, [ 104 | %Event{date: ~D[2666-01-01], user_id: "foo", time: ~T[00:00:00.000000]}, 105 | %Event{date: ~D[2555-01-01], user_id: "foo", time: ~T[00:00:00.000000]}, 106 | %Event{date: ~D[2777-01-01], user_id: "foo", time: ~T[00:00:00.000000]}, 107 | %Event{date: ~D[2444-01-01], user_id: "foo", time: ~T[00:00:00.000000]} 108 | ]) 109 | end) 110 | 111 | [_, _, _, _] = TestRepo.await(f) 112 | 113 | # backward scan ensures that we get the latest events first 114 | assert [%Event{date: ~D[2777-01-01]}, %Event{date: ~D[2666-01-01]}] = 115 | TestRepo.all( 116 | from(e in Event, 117 | where: e.date > ^~D[2444-01-01], 118 | order_by: [desc: e.date] 119 | ), 120 | prefix: tenant, 121 | limit: 2 122 | ) 123 | end 124 | end 125 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/ecto_adapter_assigns.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.EctoAdapterAssigns do 2 | @moduledoc false 3 | alias EctoFoundationDB.Future 4 | alias EctoFoundationDB.Indexer.SchemaMetadata 5 | alias EctoFoundationDB.Layer.Tx 6 | 7 | alias Ecto.Adapters.FoundationDB 8 | 9 | def assign_ready(_module, repo, futures, ready_refs, options) when is_list(ready_refs) do 10 | Tx.transactional(options[:prefix], fn _tx -> 11 | {assign_futures_rev, futures} = filter_ready(repo, futures, ready_refs, options) 12 | 13 | res = repo.await(Enum.reverse(assign_futures_rev)) 14 | 15 | Enum.reduce(res, {[], futures}, fn 16 | {new_assigns, new_future_or_nil}, {assigns, futures} -> 17 | {assigns ++ new_assigns, append_new_future(futures, new_future_or_nil)} 18 | end) 19 | end) 20 | end 21 | 22 | defp filter_ready(repo, futures, ready_refs, options) do 23 | Enum.reduce(ready_refs, {[], futures}, fn ready_ref, {acc, futures} -> 24 | case async_assign_ready(__MODULE__, repo, futures, ready_ref, options) do 25 | {nil, futures} -> 26 | {acc, futures} 27 | 28 | {assign_future, futures} -> 29 | {[assign_future | acc], futures} 30 | end 31 | end) 32 | end 33 | 34 | defp append_new_future(futures, nil), do: futures 35 | defp append_new_future(futures, future), do: [future | futures] 36 | 37 | def async_assign_ready(_module, repo, futures, ready_ref, options) 38 | when is_reference(ready_ref) do 39 | case Future.find_ready(futures, ready_ref) do 40 | {nil, futures} -> 41 | {nil, futures} 42 | 43 | {future, futures} -> 44 | {schema, kind, watch_options, new_watch_fn} = Future.result(future) 45 | 46 | if not Keyword.has_key?(watch_options, :label) do 47 | raise """ 48 | To use Repo.assign_ready/3, you must have previously created a watch with a label 49 | 50 | Examples: 51 | 52 | Repo.watch(struct, label: :mykey) 53 | SchemaMetadata.watch_collection(MySchema, label: :mykey) 54 | """ 55 | end 56 | 57 | case kind do 58 | {:pk, pk} -> 59 | async_get(repo, futures, schema, pk, watch_options, options, new_watch_fn) 60 | 61 | {SchemaMetadata, name} 62 | when name in [:inserts, :deletes, :collection, :updates, :changes] -> 63 | async_all(repo, futures, schema, watch_options, options, new_watch_fn) 64 | end 65 | end 66 | end 67 | 68 | defp async_get(repo, futures, schema, id, watch_options, options, new_watch_fn) do 69 | label = watch_options[:label] 70 | 71 | tenant = options[:prefix] 72 | 73 | Tx.transactional(options[:prefix], fn _tx -> 74 | assign_future = 75 | repo.async_get(schema, id, options) 76 | |> Future.apply(fn struct_or_nil -> 77 | struct_or_nil = usetenant(struct_or_nil, tenant) 78 | new_future = maybe_new_watch(struct_or_nil, watch_options, options, new_watch_fn) 79 | 80 | {[{label, struct_or_nil}], new_future} 81 | end) 82 | 83 | {assign_future, futures} 84 | end) 85 | end 86 | 87 | defp async_all(repo, futures, schema, watch_options, options, new_watch_fn) do 88 | label = watch_options[:label] 89 | query = watch_options[:query] || schema 90 | tenant = options[:prefix] 91 | 92 | Tx.transactional(tenant, fn _tx -> 93 | assign_future = 94 | repo.async_all(query, options) 95 | |> Future.apply(fn result -> 96 | result = usetenant(result, tenant) 97 | new_future = maybe_new_watch(result, watch_options, options, new_watch_fn) 98 | 99 | {[{label, result}], new_future} 100 | end) 101 | 102 | {assign_future, futures} 103 | end) 104 | end 105 | 106 | defp usetenant(nil, _tenant), do: nil 107 | defp usetenant(list, tenant) when is_list(list), do: Enum.map(list, &usetenant(&1, tenant)) 108 | defp usetenant(struct, tenant), do: FoundationDB.usetenant(struct, tenant) 109 | 110 | defp maybe_new_watch(result, watch_options, options, new_watch_fn) do 111 | if Keyword.get(options, :watch?, false) do 112 | new_watch_fn.(result, watch_options) 113 | else 114 | nil 115 | end 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /test/ecto/integration/index_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.IndexTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | 6 | alias EctoFoundationDB.Schemas.User 7 | 8 | alias Ecto.Adapters.FoundationDB 9 | 10 | import Ecto.Query 11 | 12 | @moduletag :integration 13 | describe "user index" do 14 | test "index consistency", context do 15 | tenant = context[:tenant] 16 | 17 | assert tenant.backend == EctoFoundationDB.Tenant.DirectoryTenant 18 | 19 | # Insert consistency 20 | {:ok, _user1} = 21 | %User{name: "John"} 22 | |> FoundationDB.usetenant(tenant) 23 | |> TestRepo.insert() 24 | 25 | {:ok, user2} = 26 | %User{name: "James"} 27 | |> FoundationDB.usetenant(tenant) 28 | |> TestRepo.insert() 29 | 30 | {:ok, user3} = 31 | %User{name: "John"} 32 | |> FoundationDB.usetenant(tenant) 33 | |> TestRepo.insert() 34 | 35 | assert [%User{name: "John"}, %User{name: "John"}] = 36 | from(u in User, where: u.name == ^"John") 37 | |> TestRepo.all(prefix: tenant) 38 | 39 | # Delete consistency 40 | TestRepo.delete!(user3) 41 | 42 | assert [%User{name: "John"}] = 43 | from(u in User, where: u.name == ^"John") 44 | |> TestRepo.all(prefix: tenant) 45 | 46 | # Update consistency 47 | user2 48 | |> User.changeset(%{name: "John"}) 49 | |> TestRepo.update!() 50 | 51 | assert nil == TestRepo.get_by(User, [name: "James"], prefix: tenant) 52 | 53 | assert [%User{name: "John"}, %User{name: "John"}] = 54 | from(u in User, where: u.name == ^"John") 55 | |> TestRepo.all(prefix: tenant) 56 | end 57 | 58 | test "update_all via index", context do 59 | tenant = context[:tenant] 60 | 61 | {:ok, user} = 62 | %User{name: "John"} 63 | |> FoundationDB.usetenant(tenant) 64 | |> TestRepo.insert() 65 | 66 | assert {1, _} = 67 | from(u in User, where: u.name == ^"John") 68 | |> TestRepo.update_all([set: [name: "Jane"]], prefix: tenant) 69 | 70 | user_id = user.id 71 | assert %User{id: ^user_id, name: "Jane"} = TestRepo.get!(User, user_id, prefix: tenant) 72 | end 73 | 74 | test "delete_all via index", context do 75 | tenant = context[:tenant] 76 | 77 | {:ok, user} = 78 | %User{name: "John"} 79 | |> FoundationDB.usetenant(tenant) 80 | |> TestRepo.insert() 81 | 82 | assert {1, _} = 83 | from(u in User, where: u.name == ^"John") 84 | |> TestRepo.delete_all(prefix: tenant) 85 | 86 | assert nil == TestRepo.get(User, user.id, prefix: tenant) 87 | end 88 | 89 | test "between query on string param", context do 90 | tenant = context[:tenant] 91 | 92 | {:ok, _user1} = 93 | %User{name: "John"} 94 | |> FoundationDB.usetenant(tenant) 95 | |> TestRepo.insert() 96 | 97 | {:ok, _user2} = 98 | %User{name: "James"} 99 | |> FoundationDB.usetenant(tenant) 100 | |> TestRepo.insert() 101 | 102 | assert [%User{name: "James"}] = 103 | from(u in User, 104 | where: 105 | u.name > "Ja" and 106 | u.name < "Jo" 107 | ) 108 | |> TestRepo.all(prefix: tenant) 109 | end 110 | 111 | test "greater/lesser query on string param", context do 112 | tenant = context[:tenant] 113 | 114 | {:ok, _user1} = 115 | %User{name: "John"} 116 | |> FoundationDB.usetenant(tenant) 117 | |> TestRepo.insert() 118 | 119 | {:ok, _user2} = 120 | %User{name: "James"} 121 | |> FoundationDB.usetenant(tenant) 122 | |> TestRepo.insert() 123 | 124 | assert [%User{name: "James"}, %User{name: "John"}] = 125 | from(u in User, 126 | where: u.name > "Ja" 127 | ) 128 | |> TestRepo.all(prefix: tenant) 129 | 130 | assert [%User{name: "James"}] = 131 | from(u in User, 132 | where: u.name < "Jo" 133 | ) 134 | |> TestRepo.all(prefix: tenant) 135 | end 136 | end 137 | end 138 | -------------------------------------------------------------------------------- /test/ecto/integration/indexer_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.IndexerTest do 2 | use Ecto.Integration.MigrationsCase, async: true 3 | 4 | alias Ecto.Adapters.FoundationDB 5 | 6 | alias EctoFoundationDB.Indexer 7 | alias EctoFoundationDB.Layer.Pack 8 | alias EctoFoundationDB.Migrator 9 | 10 | alias Ecto.Integration.TestRepo 11 | 12 | alias EctoFoundationDB.Schemas.User 13 | 14 | alias Ecto.Integration.IndexerTest.NameStartsWithJ 15 | alias Ecto.Integration.IndexerTest.TestDropMigration 16 | alias Ecto.Integration.IndexerTest.TestMigration 17 | alias Ecto.Integration.IndexerTest.TestMigrator1 18 | alias Ecto.Integration.IndexerTest.TestMigrator2 19 | 20 | defmodule TestMigrator1 do 21 | @moduledoc false 22 | 23 | use EctoFoundationDB.Migrator 24 | 25 | @impl true 26 | def migrations(), do: [{0, TestMigration}] 27 | end 28 | 29 | defmodule TestMigrator2 do 30 | @moduledoc false 31 | 32 | use EctoFoundationDB.Migrator 33 | 34 | @impl true 35 | def migrations(), do: [{0, TestMigration}, {1, TestDropMigration}] 36 | end 37 | 38 | defmodule TestMigration do 39 | @moduledoc false 40 | use EctoFoundationDB.Migration 41 | 42 | @impl true 43 | def change() do 44 | [create(index(User, [:name], options: [indexer: NameStartsWithJ]))] 45 | end 46 | end 47 | 48 | defmodule TestDropMigration do 49 | @moduledoc false 50 | use EctoFoundationDB.Migration 51 | 52 | @impl true 53 | def change() do 54 | [drop(index(User, [:name], options: [indexer: NameStartsWithJ]))] 55 | end 56 | end 57 | 58 | defmodule NameStartsWithJ do 59 | @behaviour Indexer 60 | 61 | alias EctoFoundationDB.QueryPlan 62 | alias EctoFoundationDB.Tenant 63 | 64 | @count_key "name_starts_with_J_count" 65 | @index_key "name_starts_with_J_index/" 66 | 67 | # omitted for brevity. A complete Indexer must implement all functions 68 | @impl true 69 | def create_range(_tenant, _idx), do: {"", "\xFF"} 70 | 71 | @impl true 72 | def drop_ranges(tenant, _idx), do: [count_key(tenant), index_range(tenant)] 73 | 74 | @impl true 75 | def create(_tenant, _tx, _idx, _schema, _range, _limit), do: {0, {"\xFF", "\xFF"}} 76 | 77 | @impl true 78 | def clear(_tenant, _tx, _idx, _schema, _kv), do: nil 79 | 80 | @impl true 81 | def set(tenant, tx, _idx, _schema, {_, data}) do 82 | name = Keyword.get(data, :name) 83 | 84 | if not is_nil(name) and String.starts_with?(name, "J") do 85 | :erlfdb.add(tx, Tenant.pack(tenant, {@count_key}), 1) 86 | 87 | # For simplicity, we duplicate the data into the index key 88 | :erlfdb.set(tx, Tenant.pack(tenant, {@index_key, data[:id]}), Pack.to_fdb_value(data)) 89 | end 90 | end 91 | 92 | @impl true 93 | def range(_idx, plan, _options) do 94 | %QueryPlan{constraints: [%QueryPlan.Equal{field: :name, param: "J"}]} = plan 95 | index_range(plan.tenant) 96 | end 97 | 98 | def get_count(tenant) do 99 | FoundationDB.transactional(tenant, fn tx -> 100 | tx 101 | |> :erlfdb.get(count_key(tenant)) 102 | |> :erlfdb.wait() 103 | |> decode() 104 | end) 105 | end 106 | 107 | defp decode(:not_found), do: -1 108 | defp decode(x), do: :binary.decode_unsigned(x, :little) 109 | 110 | defp count_key(tenant) do 111 | Tenant.pack(tenant, {@count_key}) 112 | end 113 | 114 | defp index_range(tenant) do 115 | Tenant.range(tenant, {@index_key}) 116 | end 117 | end 118 | 119 | @moduletag :integration 120 | describe "indexer" do 121 | test ":create/drop: name starts with J", context do 122 | tenant = context[:tenant] 123 | 124 | :ok = Migrator.up(TestRepo, tenant, migrator: TestMigrator1, log: false) 125 | 126 | assert {:ok, _} = TestRepo.insert(%User{name: "Jesse"}, prefix: tenant) 127 | 128 | assert {:ok, _} = TestRepo.insert(%User{name: "Sarah"}, prefix: tenant) 129 | 130 | assert %User{name: "Jesse"} = TestRepo.get_by!(User, [name: "J"], prefix: tenant) 131 | 132 | assert 1 == NameStartsWithJ.get_count(tenant) 133 | 134 | :ok = Migrator.up(TestRepo, tenant, migrator: TestMigrator2, log: false) 135 | 136 | assert -1 == NameStartsWithJ.get_count(tenant) 137 | end 138 | end 139 | end 140 | -------------------------------------------------------------------------------- /docs/developer_guides/operators_manual.md: -------------------------------------------------------------------------------- 1 | # Operator's Manual 2 | 3 | In order to maintain transactional guarantees as your application changes and 4 | grows, there is a burden on the database operator for some data manipulations. 5 | EctoFoundationDB provides building blocks that 6 | can be executed at strategic times to maintain data integrity. 7 | 8 | There is a need for a human operator to execute these commands for one 9 | important reason: EctoFDB does not know details about the topology of your 10 | distributed application, or the migration versions that are active on clients 11 | at a given time. 12 | 13 | In writing this guide, we've taken heavy inspiration from 14 | [GitHub | Safe Ecto Migrations](https://github.com/fly-apps/safe-ecto-migrations) 15 | It's written assuming a SQL database like Postgres, but many of the concepts 16 | are applicable to EctoFoundationDB, and it's worth a read. 17 | 18 | > #### Safe Index Migrations {: .note} 19 | > Adding and dropping indexes is done completely on-line without risk of data consistency problems. 20 | > The remainder of this document discusses other data operations. Please 21 | > refer to the documentation on 22 | > [Migrations](Ecto.Adapters.FoundationDB.html#module-migrations) 23 | > for more info on index migrations. 24 | 25 | ## Safe Data Migrations 26 | 27 | - [Adding a field](#adding-a-field) 28 | - [Removing a field](#removing-a-field) 29 | - [Renaming a field](#renaming-a-field) 30 | 31 | ### Adding a field 32 | 33 | Adding a field is trivial. Simply modify your schema to include the new field. 34 | There is no `ALTER TABLE` DDL or equivalent. Any existing objects will 35 | automatically interpret values for the new field as `nil`. If you require 36 | some other value, use the `Repo.update!/2` function like normal. 37 | 38 | ```diff 39 | # App deploy, in the Ecto schema 40 | 41 | defmodule MyApp.Post do 42 | schema "posts" do 43 | + field :a_new_field, :string 44 | end 45 | end 46 | ``` 47 | 48 | --- 49 | 50 | ### Removing a field 51 | 52 | If all you care about is not having the field show up in your structs, you may 53 | choose to simply remove it from your schema. The underlying data will remain 54 | intact, however, until each item is updated at some point in the future. If you 55 | need to ensure that the data is truly removed, continue reading. 56 | 57 | Safety can be assured if the application code is first updated to remove references to the field so it's no longer loaded or queried. Then, the field can safely be removed from the database. 58 | 59 | 1. Deploy code change to remove references to the field. 60 | 2. Execute `EctoFoundationDB.CLI.delete_field!/3` to remove the data. 61 | 62 | > #### Warning: data rewrite {: .warning} 63 | > Removing a field requires writing all keys that have that field in the 64 | > value. 65 | 66 | Application deployment: 67 | 68 | ```diff 69 | # App deploy, in the Ecto schema 70 | 71 | defmodule MyApp.Post do 72 | schema "posts" do 73 | - field :no_longer_needed, :string 74 | end 75 | end 76 | ``` 77 | 78 | Once your application is fully deployed to all nodes, execute the following 79 | command: 80 | 81 | ```elixir 82 | iex> EctoFoundationDB.CLI.delete_field!(MyApp.Repo, MyApp.Post, :no_longer_needed) 83 | ``` 84 | 85 | --- 86 | 87 | ### Renaming a field 88 | 89 | Take a phased approach: 90 | 91 | 1. Create a new field with any relevant indexes 92 | 2. In application code, write to both fields 93 | 3. Backfill data from old field to new field (see below) 94 | 4. In application code, move reads from old field to the new field 95 | 5. In application code, remove old field from Ecto schemas, and drop any old index(es) 96 | 6. Delete the old field (see below) 97 | 98 | > #### Warning: data rewrite {: .warning} 99 | > Renaming a field requires writing all related keys twice. 100 | 101 | **Backfill data from old field to new field** 102 | 103 | ```elixir 104 | iex> EctoFoundationDB.CLI.copy_field!(MyApp.Repo, MyApp.Post, :old_field, :new_field) 105 | ``` 106 | 107 | **Delete the old field** 108 | 109 | ```elixir 110 | iex> EctoFoundationDB.CLI.delete_field!(MyApp.Repo, MyApp.Post, :old_field) 111 | ``` 112 | 113 | > #### Sample Available {: .note} 114 | > Please refer to [GitHub | cli_test.exs](https://github.com/foundationdb-beam/ecto_foundationdb/blob/main/test/ecto/integration/cli_test.exs) 115 | > to review the test that executes this operation. 116 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/migration.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Migration do 2 | @moduledoc false 3 | 4 | alias EctoFoundationDB.Indexer.SchemaMetadata 5 | alias EctoFoundationDB.Migration.Index 6 | alias EctoFoundationDB.Schema 7 | 8 | @type adapter_meta :: Ecto.Adapter.adapter_meta() 9 | 10 | @type drop_mode :: :restrict | :cascade 11 | 12 | @typedoc "All migration commands" 13 | @type command :: 14 | raw :: 15 | String.t() 16 | | {:create, Index.t()} 17 | | {:create_if_not_exists, Index.t()} 18 | | {:drop, Index.t(), drop_mode()} 19 | | {:drop_if_exists, Index.t(), drop_mode()} 20 | 21 | @callback change() :: list(command()) 22 | 23 | defmodule Index do 24 | @moduledoc """ 25 | Used internally by adapters. 26 | 27 | To define an index in a migration, see `Ecto.Migration.index/3`. 28 | """ 29 | defstruct schema: nil, 30 | prefix: nil, 31 | name: nil, 32 | columns: [], 33 | unique: false, 34 | concurrently: true, 35 | using: nil, 36 | include: [], 37 | only: false, 38 | nulls_distinct: nil, 39 | where: nil, 40 | comment: nil, 41 | options: [] 42 | 43 | @type t :: %__MODULE__{ 44 | schema: Ecto.Schema.t() | nil, 45 | prefix: atom, 46 | name: atom, 47 | columns: [atom | String.t()], 48 | unique: boolean, 49 | concurrently: boolean, 50 | using: atom | String.t(), 51 | only: boolean, 52 | include: [atom | String.t()], 53 | nulls_distinct: boolean | nil, 54 | where: atom | String.t(), 55 | comment: String.t() | nil, 56 | options: String.t() 57 | } 58 | end 59 | 60 | @doc false 61 | defmacro __using__(_) do 62 | quote location: :keep do 63 | import EctoFoundationDB.Migration 64 | @behaviour EctoFoundationDB.Migration 65 | @before_compile EctoFoundationDB.Migration 66 | end 67 | end 68 | 69 | @doc false 70 | defmacro __before_compile__(_env) do 71 | quote do 72 | def __migration__ do 73 | [] 74 | end 75 | end 76 | end 77 | 78 | def create(index = %Index{}) do 79 | {:create, index} 80 | end 81 | 82 | def drop(index = %Index{}) do 83 | {:drop, index} 84 | end 85 | 86 | def index(schema, columns, opts \\ []) 87 | 88 | def index(schema, column, opts) when is_atom(schema) and is_atom(column) do 89 | index(schema, [column], opts) 90 | end 91 | 92 | def index(schema, columns, opts) when is_atom(schema) and is_list(columns) and is_list(opts) do 93 | validate_index_opts!(opts) 94 | index = struct(%Index{schema: schema, columns: columns}, opts) 95 | %{index | name: index.name || default_index_name(index)} 96 | end 97 | 98 | def metadata(schema, include \\ nil, opts \\ []) do 99 | options = opts[:options] || [] 100 | options = Keyword.put(options, :indexer, SchemaMetadata) 101 | opts = Keyword.put(opts, :options, options) 102 | validate_index_opts!(opts) 103 | index = struct(%Index{schema: schema, columns: get_metadata_include(include)}, opts) 104 | %{index | name: index.name || default_metadata_name(%{index | columns: []})} 105 | end 106 | 107 | defp validate_index_opts!(opts), do: Keyword.validate!(opts, [:options]) 108 | 109 | defp default_index_name(index) do 110 | default_name_(index, "index") 111 | end 112 | 113 | defp default_metadata_name(index) do 114 | default_name_(index, "metadata") 115 | end 116 | 117 | defp default_name_(index, label) do 118 | [Schema.get_source(index.schema), index.columns, label] 119 | |> List.flatten() 120 | |> Stream.map(&to_string(&1)) 121 | |> Stream.map(&String.replace(&1, ~r"[^\w_]", "_")) 122 | |> Stream.map(&String.replace_trailing(&1, "_", "")) 123 | |> Enum.to_list() 124 | |> Enum.join("_") 125 | |> String.to_atom() 126 | end 127 | 128 | # The include param is always explicitly defined in the DB so that new entries to 129 | # SchemaMetadata can be added without affecting existing indexes. 130 | defp get_metadata_include(nil), do: SchemaMetadata.field_names() 131 | defp get_metadata_include(include) when is_list(include), do: include 132 | end 133 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/primary_kv_codec.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.PrimaryKVCodec do 2 | @moduledoc false 3 | alias EctoFoundationDB.Layer.InternalMetadata 4 | alias EctoFoundationDB.Layer.Pack 5 | alias EctoFoundationDB.Layer.PrimaryKVCodec.StreamDecoder 6 | alias EctoFoundationDB.Options 7 | alias EctoFoundationDB.Tenant 8 | 9 | defstruct [:tuple, :vs?, :packed] 10 | 11 | def metadata_key(), do: :multikey 12 | 13 | def new(tuple, vs \\ false) 14 | 15 | def new(tuple, vs) when is_tuple(tuple) do 16 | %__MODULE__{tuple: tuple, vs?: vs} 17 | end 18 | 19 | def new(key, vs) when is_binary(key) do 20 | %__MODULE__{packed: key, vs?: vs} 21 | end 22 | 23 | def vs?(%__MODULE__{vs?: vs?}), do: vs? 24 | 25 | def stream_decode(kvs, tenant, opts \\ []) do 26 | StreamDecoder.stream_decode(kvs, tenant, opts) 27 | end 28 | 29 | def encode(kv_codec, fdb_value, options) do 30 | max_single_value_size = Options.get(options, :max_single_value_size) 31 | max_value_size = Options.get(options, :max_value_size) 32 | 33 | size = byte_size(fdb_value) 34 | 35 | if size > max_value_size do 36 | raise ArgumentError, """ 37 | EctoFoundationDB is configured to reject any objects larger than #{max_value_size} bytes. 38 | 39 | We've encountered a binary of size #{size} bytes. 40 | """ 41 | end 42 | 43 | fdb_key = pack_key(kv_codec, nil) 44 | 45 | if size > max_single_value_size do 46 | # split binary, create keys 47 | crc = :erlang.crc32(fdb_value) 48 | 49 | chunks = binary_chunk_by(fdb_value, max_single_value_size, []) 50 | 51 | n = length(chunks) 52 | 53 | multikey_kvs = 54 | for( 55 | {chunk, idx} <- Enum.with_index(chunks), 56 | do: {pack_key(kv_codec, codec_metadata_tuple(n, idx, crc)), chunk} 57 | ) 58 | 59 | # Write metadata to the DB. This ensures the "primary write key" 60 | # is always updated (crc), which is required for watches to work as expected. 61 | meta_fdb_value = 62 | InternalMetadata.new(metadata_key(), codec_metadata_tuple(n, -1, crc)) 63 | |> Pack.to_fdb_value() 64 | 65 | {true, [{fdb_key, meta_fdb_value} | multikey_kvs]} 66 | else 67 | {false, [{fdb_key, fdb_value}]} 68 | end 69 | end 70 | 71 | def binary_chunk_by(<<>>, _size, acc), do: Enum.reverse(acc) 72 | 73 | def binary_chunk_by(bin, size, acc) do 74 | case bin do 75 | <> -> 76 | binary_chunk_by(rest, size, [chunk | acc]) 77 | 78 | chunk -> 79 | Enum.reverse([chunk | acc]) 80 | end 81 | end 82 | 83 | defp codec_metadata_tuple(n, i, crc) do 84 | {n, i, crc} 85 | end 86 | 87 | def with_packed_key(kv_codec = %{packed: packed}) when not is_nil(packed), do: kv_codec 88 | 89 | def with_packed_key(kv_codec) do 90 | %{kv_codec | packed: pack_key(kv_codec, nil)} 91 | end 92 | 93 | def with_unpacked_tuple(kv_codec = %{tuple: tuple}, _tenant) when not is_nil(tuple), 94 | do: kv_codec 95 | 96 | def with_unpacked_tuple(kv_codec, tenant) do 97 | %{kv_codec | tuple: Tenant.unpack(tenant, kv_codec.packed)} 98 | end 99 | 100 | def pack_key(kv_codec, t) do 101 | %__MODULE__{vs?: vs?} = kv_codec 102 | 103 | tuple = 104 | kv_codec.tuple 105 | |> add_codec_metadata(t) 106 | 107 | if vs?, do: :erlfdb_tuple.pack_vs(tuple), else: :erlfdb_tuple.pack(tuple) 108 | end 109 | 110 | def set_new_kvs(tx, %__MODULE__{vs?: true}, kvs) do 111 | for {k, v} <- kvs do 112 | :erlfdb.set_versionstamped_key(tx, k, v) 113 | end 114 | end 115 | 116 | def set_new_kvs(tx, %__MODULE__{vs?: false}, kvs) do 117 | for {k, v} <- kvs do 118 | :erlfdb.set(tx, k, v) 119 | end 120 | end 121 | 122 | def range(kv_codec) do 123 | %__MODULE__{vs?: vs?} = kv_codec 124 | tuple = add_codec_metadata(kv_codec.tuple, nil) 125 | start_key = if vs?, do: :erlfdb_tuple.pack_vs(tuple), else: :erlfdb_tuple.pack(tuple) 126 | {start_key, start_key <> <<0xFF>>} 127 | end 128 | 129 | # add_codec_metadata({:a, :tuple}, nil) -> {:a, :tuple} 130 | # add_codec_metadata({:a, :tuple}, {0}) -> {:a, :tuple, {0}} 131 | defp add_codec_metadata(tuple, nil), do: tuple 132 | 133 | defp add_codec_metadata(tuple, t) do 134 | Tuple.insert_at(tuple, tuple_size(tuple), t) 135 | end 136 | end 137 | -------------------------------------------------------------------------------- /docs/design/metadata.md: -------------------------------------------------------------------------------- 1 | # Metadata 2 | 3 | EctoFDB stores metadata alongside your Ecto schema data in order to maintain 4 | correct data integrity on the indexes. In doing so, each EctoFDB client 5 | knows which indexes are needed to be written to, even if the application code 6 | is on an older version, which can be the case when deploying updates to a 7 | distributed system. 8 | 9 | Since this metadata is required to be available in every transaction, EctoFDB 10 | must *allow for* the retrieval of the metadata in every transaction. However, 11 | we wish to avoid that retrieval from actually happening as much as possible. 12 | Otherwise, those particular keys become very "hot" and limit the scalability 13 | of the system. Therefore, we maintain a local cache of the metadata for each 14 | (tenant, schema) tuple. 15 | 16 | This document describes the design considerations for the metadata and the 17 | approaches we use to ensure the cache is guaranteed to be valid. 18 | 19 | ## Metadata Content 20 | 21 | We currently keep track of 2 types of metadata: indexes and partial indexes. 22 | 23 | ### Indexes 24 | 25 | This is a list of all 'ready' indexes for a particular Schema. An index is 26 | considered 'ready' when it has been created and is ready to be used. 27 | 28 | At query time, this list of indexes is inspected to determine the index to use 29 | to provide the best possible query performance. 30 | 31 | At insert and update time, the list is used to set or clear the index keys 32 | according to the specified `EctoFoundationDB.Indexer`. 33 | 34 | ### Partial Indexes 35 | 36 | This is a list of all indexes that are currently undergoing a migration, 37 | usually for index creation. The creation of an index for a particular 38 | (tenant, schema) tuple can take arbitrarily long, and so any queries that 39 | arrive in the meantime must be handled accordingly. The list of partial 40 | indexes informs EctoFDB of how to keep data integrity for any concurrent 41 | creation of `Default` indexes. 42 | 43 | ## Metadata Cache 44 | 45 | The cache uses 2 stages for invalidation. 46 | 47 | * **Stage 1:** The global FoundationDB metadata version key 48 | * **Stage 2:** A version key for each (tenant, schema) tuple, and a special 49 | "claim key" for any partial indexes 50 | 51 | #### Metadata Version Key 52 | 53 | This is a key that has special treatment in the implementation of 54 | FoundationDB transactions. The 55 | [FDB Design Doc | Metadata Version](https://github.com/apple/foundationdb/blob/deda04b8453ecbc6411cc7ac41efb3213e18343f/design/metadata-version.md) 56 | provides a detailed explanation of how this key is implemented. 57 | 58 | For the purposes of this document, it's important to understand that the metadata version key is truly global to the entire keyspace. Since we allow 59 | tenants to migrate independently, this necessarily means that the migration for 60 | a single tenant will invalidate Stage 1 of the cache for **all tenants** in 61 | the database. 62 | 63 | There is no cost to reading the global metadata version key, since it's 64 | always sent along with other necessary transactional data. This means that 65 | at steady state, your transactions only need to do the base minimum FDB 66 | operations, and are able to avoid any hot keys. 67 | 68 | If the global version is found to have changed, we do not yet invalidate the 69 | cache. Instead, we move onto Stage 2 of the cache invalidation process. In 70 | doing so, we limit the impact of another tenant's migration. 71 | 72 | #### Schema Migration Version Key 73 | 74 | This is a key that is managed by EctoFDB itself, and does not have special 75 | treatment by FDB. It always contains an integer value representing the largest 76 | version number for complete migrations that are specified in your 77 | `EctoFoundationDB.Migrator`. 78 | 79 | When the global key is found to have changed in Stage 1 of the cache 80 | invalidation, we perform a 'get' on this key. Then we delay the wait until 81 | after some of the transaction work has been completed. Thus, the cache 82 | is *optimistic* that the cached value is still valid. Only at the end of the 83 | transaction do we wait for and compare the version value. If it's not equal, 84 | then the cache is invalidated and the transaction is retried. 85 | 86 | #### Claim Key 87 | 88 | This key is used for tracking metadata that is currently undergoing a 89 | migration. It's managed in the same way as the schema migration version key. 90 | Its content includes a cursor that defines the progress of the migration. 91 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/tx_insert.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.TxInsert do 2 | @moduledoc false 3 | alias EctoFoundationDB.Exception.Unsupported 4 | alias EctoFoundationDB.Future 5 | alias EctoFoundationDB.Indexer 6 | alias EctoFoundationDB.Layer.DecodedKV 7 | alias EctoFoundationDB.Layer.Pack 8 | alias EctoFoundationDB.Layer.PrimaryKVCodec 9 | alias EctoFoundationDB.Layer.Tx 10 | 11 | defstruct [:tenant, :schema, :source, :metadata, :write_primary, :options] 12 | 13 | def new(tenant, schema, source, metadata, write_primary, options) do 14 | %__MODULE__{ 15 | tenant: tenant, 16 | schema: schema, 17 | source: source, 18 | metadata: metadata, 19 | write_primary: write_primary, 20 | options: options 21 | } 22 | end 23 | 24 | def insert_one( 25 | acc, 26 | tx, 27 | {{pk_field, pk}, future, data_object}, 28 | read_before_write 29 | ) do 30 | %__MODULE__{ 31 | tenant: tenant, 32 | source: source 33 | } = acc 34 | 35 | kv_codec = Pack.primary_codec(tenant, source, pk) 36 | read_before_write = if kv_codec.vs?, do: false, else: read_before_write 37 | data_object = [{pk_field, pk} | Keyword.delete(data_object, pk_field)] 38 | kv = %DecodedKV{codec: kv_codec, data_object: data_object} 39 | 40 | if read_before_write do 41 | future = Tx.async_get(tenant, tx, kv_codec, future) 42 | Future.apply(future, &do_set(acc, tx, kv, &1)) 43 | else 44 | # We assume that the data doesn't exist. This speeds up data loading 45 | # but can result in inconsistent indexes if objects do exist in 46 | # the database that are being blindly overwritten. 47 | Future.set_result(future, do_set(acc, tx, kv, nil)) 48 | end 49 | end 50 | 51 | def do_set(acc, tx, new_kv, nil) do 52 | %__MODULE__{ 53 | tenant: tenant, 54 | schema: schema, 55 | metadata: metadata, 56 | write_primary: write_primary, 57 | options: options 58 | } = acc 59 | 60 | %DecodedKV{codec: kv_codec, data_object: data_object} = new_kv 61 | 62 | {_, kvs} = PrimaryKVCodec.encode(kv_codec, Pack.to_fdb_value(data_object), options) 63 | 64 | if write_primary do 65 | PrimaryKVCodec.set_new_kvs(tx, kv_codec, kvs) 66 | end 67 | 68 | kv_codec = PrimaryKVCodec.with_packed_key(kv_codec) 69 | 70 | Indexer.set(tenant, tx, metadata, schema, {kv_codec, data_object}) 71 | :ok 72 | end 73 | 74 | def do_set(acc, tx, new_kv, existing_kv) do 75 | %__MODULE__{ 76 | tenant: tenant, 77 | schema: schema, 78 | metadata: metadata, 79 | write_primary: write_primary, 80 | options: options 81 | } = acc 82 | 83 | %DecodedKV{data_object: data_object = [{pk_field, pk} | _]} = new_kv 84 | 85 | case options[:on_conflict] do 86 | :nothing -> 87 | nil 88 | 89 | :replace_all -> 90 | Tx.update_data_object( 91 | tenant, 92 | tx, 93 | schema, 94 | pk_field, 95 | {existing_kv, [set: data_object]}, 96 | metadata, 97 | write_primary, 98 | options 99 | ) 100 | 101 | :ok 102 | 103 | {:replace_all_except, fields} -> 104 | Tx.update_data_object( 105 | tenant, 106 | tx, 107 | schema, 108 | pk_field, 109 | {existing_kv, [set: Keyword.drop(data_object, fields)]}, 110 | metadata, 111 | write_primary, 112 | options 113 | ) 114 | 115 | :ok 116 | 117 | {:replace, fields} -> 118 | Tx.update_data_object( 119 | tenant, 120 | tx, 121 | schema, 122 | pk_field, 123 | {existing_kv, [set: Keyword.take(data_object, fields)]}, 124 | metadata, 125 | write_primary, 126 | options 127 | ) 128 | 129 | :ok 130 | 131 | val when is_nil(val) or val == :raise -> 132 | raise Unsupported, "Key exists: #{inspect(schema)} #{inspect(pk)}" 133 | 134 | unsupported_on_conflict -> 135 | raise Unsupported, """ 136 | The :on_conflict option provided is not supported by the FoundationDB Adapter. 137 | 138 | You provided #{inspect(unsupported_on_conflict)}. 139 | 140 | Instead, use one of :raise, :nothing, :replace_all, {:replace_all_except, fields}, or {:replace, fields} 141 | """ 142 | end 143 | end 144 | end 145 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationdb.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :ecto_foundationdb, 7 | version: "0.5.0", 8 | description: "FoundationDB adapter for Ecto", 9 | elixir: "~> 1.15", 10 | start_permanent: Mix.env() == :prod, 11 | deps: deps(), 12 | elixirc_paths: elixirc_paths(Mix.env()), 13 | aliases: aliases(), 14 | dialyzer: [ 15 | ignore_warnings: ".dialyzer_ignore.exs" 16 | ], 17 | package: package(), 18 | 19 | # Docs 20 | name: "Ecto.Adapters.FoundationDB", 21 | docs: docs() 22 | ] 23 | end 24 | 25 | defp package() do 26 | [ 27 | licenses: ["Apache-2.0"], 28 | links: %{ 29 | "GitHub" => "https://github.com/foundationdb-beam/ecto_foundationdb" 30 | } 31 | ] 32 | end 33 | 34 | defp docs do 35 | [ 36 | main: "Ecto.Adapters.FoundationDB", 37 | source_url: "https://github.com/foundationdb-beam/ecto_foundationdb", 38 | filter_modules: 39 | ~r/^Elixir.Ecto.Adapters.FoundationDB|EctoFoundationDB(.CLI|.Database|.Exception.Unsupported|.Exception.IncorrectTenancy|.Future|.Index|.Indexer|.Layer|.Migrator|.Options|.QueryPlan|.Sandbox|.Tenant|.Tenant.DirectoryTenant|.Tenant.ManagedTenant|.Versionstamp|.Indexer.SchemaMetadata)?$/, 40 | extras: [ 41 | "CHANGELOG.md", 42 | "docs/getting_started/introduction.livemd", 43 | "docs/getting_started/watches.livemd", 44 | "docs/getting_started/collection_syncing.livemd", 45 | "docs/developer_guides/testing.md", 46 | "docs/developer_guides/operators_manual.md", 47 | "docs/design/metadata.md" 48 | ], 49 | groups_for_extras: [ 50 | "Getting Started": ~r/getting_started/, 51 | "Developer Guides": ~r/developer_guides/, 52 | Design: ~r/design/ 53 | ], 54 | before_closing_head_tag: &docs_before_closing_head_tag/1, 55 | before_closing_body_tag: &docs_before_closing_body_tag/1 56 | ] 57 | end 58 | 59 | defp elixirc_paths(:test), do: ["lib", "test/support"] 60 | defp elixirc_paths(_), do: ["lib"] 61 | 62 | # Run "mix help compile.app" to learn about applications. 63 | def application do 64 | [ 65 | extra_applications: extra_applications(Mix.env()) 66 | ] 67 | end 68 | 69 | defp extra_applications(:test), do: [:logger, :runtime_tools] 70 | defp extra_applications(_), do: [:logger] 71 | 72 | # Run "mix help deps" to learn about dependencies. 73 | defp deps do 74 | [ 75 | {:erlfdb, "~> 0.3"}, 76 | {:ecto, "~> 3.12"}, 77 | {:jason, "~> 1.4"}, 78 | {:credo, "~> 1.6", only: [:dev, :test, :docs]}, 79 | {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, 80 | {:ex_doc, "~> 0.16", only: :dev, runtime: false}, 81 | {:benchee, "~> 1.0", only: :bench} 82 | ] 83 | end 84 | 85 | defp aliases do 86 | [ 87 | lint: [ 88 | "format --check-formatted", 89 | "deps.unlock --check-unused", 90 | "credo --all --strict", 91 | "dialyzer --format short" 92 | ] 93 | ] 94 | end 95 | 96 | defp docs_before_closing_head_tag(:html), do: docs_mermaid_js() 97 | defp docs_before_closing_head_tag(:epub), do: "" 98 | 99 | defp docs_before_closing_body_tag(:html), do: "" 100 | defp docs_before_closing_body_tag(:epub), do: "" 101 | 102 | defp docs_mermaid_js() do 103 | """ 104 | 125 | 126 | """ 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer do 2 | @moduledoc """ 3 | The Ecto FoundationDB Adapter implements a [Layer](https://apple.github.io/foundationdb/layer-concept.html) 4 | on the underlying key-value store provided by FoundationDB. Via this layer, some 5 | common data access patterns are achieved. For those familiar with relational databases 6 | such as Postgres, these patterns will be familiar. However, there are many differences (for example SQL is 7 | not supported), so this document seeks to describe the capabilities of the Ecto FoundationDB Layer in detail. 8 | 9 | ## Keyspace Design 10 | 11 | All keys used by `:ecto_foundationdb` are encoded with [FoundationDB's Tuple Encoding](https://github.com/apple/foundationdb/blob/main/design/tuple.md). 12 | 13 | With the default `:tenant_backend` of `EctoFoundationDB.Tenant.DirectoryTenant`, each tuple written by EctoFDB is 14 | prefixed with a short binary string as allocated by `:erlfdb_directory. This tenant prefixing is the most critical 15 | element of the keyspace because it's the mechanism that guarantees tenants cannot cross their boundaries. **The rest of 16 | this documentation is written assuming you're using the DirectoryTenant`.** 17 | 18 | The first element of the tuple after the tenant prefix is a string prefix that is intended to keep the 19 | `:ecto_foundationdb` keyspace separate from other keys in the FoundationDB cluster. 20 | 21 | Your Schema data and Default indexes are stored with "\\xFD". 22 | 23 | The data associated with schema migrations is stored with "\\xFE". 24 | 25 | The rest of the tenant's keyspace is open for use by you, the application developer. For example, it is safe to write: 26 | 27 | ```elixir 28 | db = FoundationDB.open(MyApp.Repo) 29 | tenant = Tenant.open(MyApp.Repo, "some-org") 30 | 31 | # Multitenancy-Safe. The key is properly packed 32 | :erlfdb.set(db, Tenant.pack(tenant, {"hello"}), "world") 33 | 34 | # Multitenancy-Unsafe. The key is not packed into the tenant's keyspace 35 | :erlfdb.set(db, :erlfdb_tuple.pack({"hello"}), "world") 36 | :erlfdb.set(db, "hello", "world") 37 | ``` 38 | 39 | A value (the binary stored at each key) is either 40 | * some other keys (in the case of Default indexes) or 41 | * Erlang term data encoded with `:erlang.term_to_binary/1` 42 | 43 | ## Primary Write and Read 44 | 45 | Your Ecto Schema has a primary key field, which is usually a string or an integer. This primary 46 | key uniquely identifies an object of your schema within the tenant in which it lives. 47 | 48 | ```elixir 49 | defmodule EctoFoundationDB.Schemas.User do 50 | use Ecto.Schema 51 | schema "users" do 52 | field(:name, :string) 53 | field(:department, :string) 54 | timestamps() 55 | end 56 | end 57 | ``` 58 | 59 | In this example, a User has an `:id` and a `:name`. Also remember that the User is defined within 60 | a tenant which provides a scope under which the User lives. For example, a typical tenant 61 | would be the organization the User belongs to. Since the User is in this tenant, we do not need 62 | to provide an identifier for this organization on the User object itself. 63 | 64 | "Primary Write" refers to the insertion of the User struct into the FoundationDB key-value store 65 | under a single key that uniquely identifies the User. This key includes the `:id` value. 66 | 67 | Your struct data is stored as a `Keyword` encoded with `:erlang.term_to_binary/1`. 68 | 69 | Note: The Primary Write can be skipped by providing the `write_primary: false` option on the `@schema_context`. 70 | See below for more. 71 | 72 | ## Default Indexes 73 | 74 | When a Default index is created via a migration, the Ecto FoundationDB Adapter writes a set of 75 | keys and values to facilitate lookups based on the indexed field. 76 | 77 | ## Advanced Options: `write_primary: false`, `mapped?: false` 78 | 79 | If you choose to use `write_primary: false` on your schema, this skips the Primary Write. The consequence of this are as follows: 80 | 81 | 1. You'll want to make sure your index is created with an option `mapped?: false`. This ensures that the 82 | struct data is written to the index keyspace. 83 | 2. Your index queries will now have performance characteristics similar to a primary key query. That is, you'll 84 | be able to retrieve the struct data with a single `erlfdb:get_range/3`. 85 | 3. The data can **only** be managed by providing a query on the index. You will not be able to access the data 86 | via the primary key. 87 | 4. If `write_primary: false`, then only one index can be created. 88 | """ 89 | end 90 | -------------------------------------------------------------------------------- /test/ecto/integration/progressive_job_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBProgressiveJobTest.TestJob do 2 | @moduledoc false 3 | alias Ecto.Adapters.FoundationDB 4 | 5 | alias EctoFoundationDB.Layer.Pack 6 | alias EctoFoundationDB.ProgressiveJob 7 | alias EctoFoundationDB.Tenant 8 | 9 | @behaviour EctoFoundationDB.ProgressiveJob 10 | 11 | @limit 100 12 | 13 | defp done_key(tenant), do: Tenant.pack(tenant, {"test_job_done"}) 14 | defp claim_key(tenant), do: Tenant.pack(tenant, {"test_job"}) 15 | defp range(tenant), do: Tenant.range(tenant, {"test_"}) 16 | 17 | def reset(tenant) do 18 | FoundationDB.transactional(tenant, fn tx -> 19 | {sk, ek} = range(tenant) 20 | :erlfdb.clear_range(tx, sk, ek) 21 | :erlfdb.clear(tx, done_key(tenant)) 22 | :erlfdb.clear(tx, claim_key(tenant)) 23 | end) 24 | end 25 | 26 | def transactional(tenant, args) do 27 | set_process_config() 28 | 29 | ProgressiveJob.new(tenant, __MODULE__, args) 30 | |> ProgressiveJob.transactional_stream() 31 | |> Enum.to_list() 32 | end 33 | 34 | defp set_process_config() do 35 | Process.put(:claim_stale_msec, 100) 36 | Process.put(:claim_watch_timeout, 120) 37 | end 38 | 39 | @impl true 40 | def init(tenant, args) do 41 | state = args |> Map.put(:count, 0) |> Map.put(:tenant, tenant) 42 | {sk, ek} = Tenant.range(tenant, {}) 43 | {:ok, [claim_key(tenant)], {sk, ek}, state} 44 | end 45 | 46 | @impl true 47 | def done?(state, tx) do 48 | val = 49 | tx 50 | |> :erlfdb.get(done_key(state.tenant)) 51 | |> :erlfdb.wait() 52 | 53 | {val != :not_found, state} 54 | end 55 | 56 | @impl true 57 | def next(state = %{count: count}, tx, {start_key, end_key}) do 58 | if count > state.throw_after, do: throw(:test_error) 59 | 60 | kvs = 61 | tx 62 | |> :erlfdb.get_range(start_key, end_key, limit: @limit) 63 | |> :erlfdb.wait() 64 | 65 | # Pretend that we're writing to a bunch of keys, as we would be doing for index creation 66 | Enum.each(kvs, fn {k, _} -> 67 | :erlfdb.add_write_conflict_key(tx, Tenant.pack(state.tenant, {"test_" <> k})) 68 | end) 69 | 70 | emit = 71 | if length(kvs) < @limit do 72 | # raises exception if the key already exists, similar to an `Ecto.Repo` insert 73 | :not_found = :erlfdb.wait(:erlfdb.get(tx, done_key(state.tenant))) 74 | :erlfdb.set(tx, done_key(state.tenant), "done") 75 | [:done] 76 | else 77 | [] 78 | end 79 | 80 | start_key = 81 | case Enum.reverse(kvs) do 82 | [{key, _} | _] -> 83 | key 84 | 85 | [] -> 86 | end_key 87 | end 88 | 89 | after_tx = fn -> :ok end 90 | {after_tx, emit, {start_key, end_key}, %{state | count: count + length(kvs)}} 91 | end 92 | end 93 | 94 | defmodule EctoFoundationDBProgressiveJobTest do 95 | alias EctoFoundationDBProgressiveJobTest.TestJob 96 | 97 | alias Ecto.Adapters.FoundationDB 98 | 99 | alias EctoFoundationDB.Layer.Pack 100 | alias EctoFoundationDB.Tenant 101 | 102 | use Ecto.Integration.MigrationsCase, async: true 103 | 104 | @n_seed 1000 105 | 106 | # When n_tasks == 1, the job is run normally, happy path 107 | # When n_tasks > 1, the running job fails halfway through, and another job (exactly one) takes over 108 | defp async_jobs(tenant, n_tasks, n_seed) do 109 | TestJob.reset(tenant) 110 | 111 | fun = fn 112 | id = 1 when n_tasks != 1 -> 113 | try do 114 | TestJob.transactional(tenant, %{id: id, throw_after: 100}) 115 | catch 116 | :test_error -> 117 | :test_error 118 | end 119 | 120 | id -> 121 | # The sleep ensures that id=1 claims the work first 122 | :timer.sleep(20) 123 | TestJob.transactional(tenant, %{id: id, throw_after: n_seed + 1}) 124 | end 125 | 126 | 1..n_tasks 127 | |> Task.async_stream(fun, 128 | ordered: false, 129 | max_concurrency: System.schedulers_online() * 2, 130 | timeout: :infinity 131 | ) 132 | |> Enum.to_list() 133 | |> Enum.filter(fn 134 | {:ok, :test_error} -> false 135 | {:ok, []} -> false 136 | _ -> true 137 | end) 138 | end 139 | 140 | test "progressive job contract", context do 141 | tenant = context[:tenant] 142 | 143 | FoundationDB.transactional(tenant, fn tx -> 144 | key = Tenant.pack(tenant, {Ecto.UUID.generate()}) 145 | for i <- 1..@n_seed, do: :erlfdb.set(tx, key, Pack.to_fdb_value(i)) 146 | end) 147 | 148 | assert [{:ok, [:done]}] == async_jobs(tenant, 1, @n_seed) 149 | assert [{:ok, [:done]}] == async_jobs(tenant, 20, @n_seed) 150 | end 151 | end 152 | -------------------------------------------------------------------------------- /test/ecto/integration/schema_metadata_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDBSchemaMetadataTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias EctoFoundationDB.Future 6 | alias EctoFoundationDB.Indexer.SchemaMetadata 7 | alias EctoFoundationDB.Schemas.Event 8 | alias EctoFoundationDB.Schemas.User 9 | alias EctoFoundationDB.Tenant 10 | 11 | defp idcuc(tenant, schema) do 12 | TestRepo.transactional(tenant, fn -> 13 | [ 14 | SchemaMetadata.async_inserts(schema), 15 | SchemaMetadata.async_deletes(schema), 16 | SchemaMetadata.async_collection(schema), 17 | SchemaMetadata.async_updates(schema), 18 | SchemaMetadata.async_changes(schema) 19 | ] 20 | |> TestRepo.await() 21 | end) 22 | end 23 | 24 | test "counters", context do 25 | tenant = context[:tenant] 26 | assert [0, 0, 0, 0, 0] = idcuc(tenant, User) 27 | 28 | user = %User{} = TestRepo.insert!(%User{name: "Alice"}, prefix: tenant) 29 | 30 | assert [1, 0, 1, 0, 1] = idcuc(tenant, User) 31 | 32 | {:ok, _} = TestRepo.update(User.changeset(user, %{name: "Bob"}), prefix: tenant) 33 | 34 | assert [1, 0, 1, 1, 2] = idcuc(tenant, User) 35 | 36 | TestRepo.delete!(user, prefix: tenant) 37 | 38 | assert [1, 1, 2, 1, 3] = idcuc(tenant, User) 39 | 40 | future = 41 | TestRepo.transactional(tenant, fn -> 42 | SchemaMetadata.watch_collection(User) 43 | end) 44 | 45 | TestRepo.transactional(tenant, fn -> 46 | user = TestRepo.insert!(%User{name: "Alice2"}) 47 | TestRepo.delete!(user) 48 | end) 49 | 50 | assert _ = TestRepo.await(future) 51 | end 52 | 53 | test "labeled watch", context do 54 | tenant = context[:tenant] 55 | 56 | # init 57 | {users, futures} = 58 | TestRepo.transactional(tenant, fn -> 59 | users = TestRepo.all(User) 60 | future = SchemaMetadata.watch_collection(User, label: :users) 61 | {users, [future]} 62 | end) 63 | 64 | assigns = %{users: users} 65 | 66 | assert [] = users 67 | 68 | [watch_future] = futures 69 | watch_ref = Future.ref(watch_future) 70 | 71 | # insert and receive the new collection 72 | TestRepo.insert!(%User{name: "Alice"}, prefix: tenant) 73 | 74 | {assigns, futures} = 75 | receive_ready(assigns, futures, [watch_ref], prefix: tenant, watch?: true) 76 | 77 | assert [_] = futures 78 | refute watch_future == hd(futures) 79 | 80 | [watch_future] = futures 81 | watch_ref = Future.ref(watch_future) 82 | 83 | assert %{users: [user = %{name: "Alice"}]} = assigns 84 | assert %Tenant{} = hd(assigns.users).__meta__.prefix 85 | 86 | # delete and receive the new collection 87 | TestRepo.delete!(user, prefix: tenant) 88 | 89 | {assigns, futures} = 90 | receive_ready(assigns, futures, [watch_ref], prefix: tenant, watch?: true) 91 | 92 | assert [_] = futures 93 | 94 | assert %{users: []} = assigns 95 | end 96 | 97 | test "labeled query watch", context do 98 | tenant = context[:tenant] 99 | 100 | import Ecto.Query 101 | 102 | query = from(e in Event, where: e.date >= ^~D[2025-07-15]) 103 | 104 | insert_event = fn date -> 105 | f = 106 | TestRepo.transactional(tenant, fn -> 107 | TestRepo.async_insert_all!(Event, [ 108 | %Event{date: date, user_id: "foo", time: ~T[00:00:00.000000]} 109 | ]) 110 | end) 111 | 112 | [_] = TestRepo.await(f) 113 | end 114 | 115 | insert_event.(~D[2025-07-14]) 116 | 117 | # init 118 | {events, futures} = 119 | TestRepo.transactional(tenant, fn -> 120 | events = TestRepo.all(query) 121 | future = SchemaMetadata.watch_collection(Event, label: :events, query: query) 122 | {events, [future]} 123 | end) 124 | 125 | assigns = %{events: events} 126 | 127 | assert [] = events 128 | 129 | [watch_future] = futures 130 | watch_ref = Future.ref(watch_future) 131 | 132 | # insert and receive the new collection 133 | insert_event.(~D[2025-07-16]) 134 | 135 | {assigns, futures} = 136 | receive_ready(assigns, futures, [watch_ref], prefix: tenant, watch?: false) 137 | 138 | assert [] = futures 139 | 140 | assert %{events: [%{user_id: "foo"}]} = assigns 141 | end 142 | 143 | defp receive_ready(assigns, futures, [watch_ref], opts) do 144 | receive do 145 | {^watch_ref, :ready} when is_reference(watch_ref) -> 146 | {ready_assigns, futures} = 147 | TestRepo.assign_ready(futures, [watch_ref], opts) 148 | 149 | assert [_] = ready_assigns 150 | assert is_list(ready_assigns) 151 | 152 | {Map.merge(assigns, Enum.into(ready_assigns, %{})), futures} 153 | after 154 | 100 -> 155 | raise "Future result not received within 100 msec" 156 | end 157 | end 158 | end 159 | -------------------------------------------------------------------------------- /test/ecto/integration/versionstamp_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Integration.VersionstampTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Adapters.FoundationDB 5 | 6 | alias EctoFoundationDB.Exception.Unsupported 7 | alias EctoFoundationDB.Future 8 | alias EctoFoundationDB.Schemas.QueueItem 9 | alias EctoFoundationDB.Versionstamp 10 | 11 | alias Ecto.Integration.TestRepo 12 | 13 | describe "non-autogenerated primary keys" do 14 | test "nil pk raises", context do 15 | tenant = context[:tenant] 16 | 17 | assert_raise Unsupported, 18 | ~r/does not support inserting records with nil primary keys/, 19 | fn -> 20 | %QueueItem{data: "foo"} 21 | |> FoundationDB.usetenant(tenant) 22 | |> TestRepo.insert() 23 | end 24 | end 25 | 26 | test "internal transaction versionstamp", context do 27 | tenant = context[:tenant] 28 | 29 | {:ok, event} = 30 | %QueueItem{ 31 | id: Versionstamp.next(), 32 | data: "foo" 33 | } 34 | |> FoundationDB.usetenant(tenant) 35 | |> TestRepo.insert() 36 | 37 | assert_raise Unsupported, 38 | ~r/we disallow converting an incomplete versionstamp to an integer/, 39 | fn -> 40 | Versionstamp.to_integer(event.id) 41 | end 42 | end 43 | 44 | test "explicit transaction versionstamp", context do 45 | tenant = context[:tenant] 46 | 47 | {event, vs_future} = 48 | TestRepo.transactional( 49 | tenant, 50 | fn tx -> 51 | assert_raise Unsupported, 52 | ~r/you must use `EctoFoundationDB.Versionstamp.next\/1`/, 53 | fn -> 54 | Versionstamp.next() 55 | end 56 | 57 | {:ok, event} = 58 | %QueueItem{ 59 | id: Versionstamp.next(tx), 60 | data: "foo" 61 | } 62 | |> TestRepo.insert() 63 | 64 | vs_future = Versionstamp.get(tx) 65 | {event, vs_future} 66 | end 67 | ) 68 | 69 | vs = Future.result(vs_future) 70 | event = %{event | id: Versionstamp.resolve(event.id, vs)} 71 | 72 | assert is_integer(Versionstamp.to_integer(event.id)) 73 | end 74 | 75 | test "Repo.async_insert_all!", context do 76 | tenant = context[:tenant] 77 | 78 | future = 79 | TestRepo.transactional( 80 | tenant, 81 | fn tx -> 82 | TestRepo.async_insert_all!(QueueItem, [ 83 | %QueueItem{ 84 | id: Versionstamp.next(tx), 85 | data: "bar" 86 | }, 87 | %QueueItem{ 88 | id: Versionstamp.next(tx), 89 | data: "foo" 90 | } 91 | ]) 92 | end 93 | ) 94 | 95 | [event1, event2] = TestRepo.await(future) 96 | 97 | refute is_nil(event1.__meta__.prefix) 98 | refute is_nil(event2.__meta__.prefix) 99 | assert is_integer(Versionstamp.to_integer(event1.id)) 100 | assert is_integer(Versionstamp.to_integer(event2.id)) 101 | end 102 | end 103 | 104 | test "basic queue operations", context do 105 | tenant = context[:tenant] 106 | 107 | # push 108 | future = 109 | TestRepo.transactional(tenant, fn -> 110 | TestRepo.async_insert_all!(QueueItem, [ 111 | %QueueItem{data: "test_a"}, 112 | %QueueItem{data: "test_b"} 113 | ]) 114 | end) 115 | 116 | [%QueueItem{id: id_a}, %QueueItem{id: id_b}] = TestRepo.await(future) 117 | 118 | # top 119 | assert [%QueueItem{id: ^id_a}] = TestRepo.all(QueueItem, limit: 1, prefix: tenant) 120 | 121 | assert %QueueItem{} = TestRepo.get!(QueueItem, id_a, prefix: tenant) 122 | 123 | # pop (and top comes for free) 124 | top = 125 | TestRepo.transactional(tenant, fn -> 126 | [top] = TestRepo.all(QueueItem, limit: 1) 127 | TestRepo.delete!(%QueueItem{id: top.id}) 128 | end) 129 | 130 | assert %QueueItem{id: ^id_a} = top 131 | 132 | # next top 133 | assert [%QueueItem{id: ^id_b}] = TestRepo.all(QueueItem, limit: 1, prefix: tenant) 134 | end 135 | 136 | test "versionstamp schema with index", context do 137 | tenant = context[:tenant] 138 | 139 | future = 140 | TestRepo.transactional(tenant, fn -> 141 | TestRepo.async_insert_all!(QueueItem, [ 142 | %QueueItem{author: "Alice", data: "test_a"}, 143 | %QueueItem{author: "Bob", data: "test_b"} 144 | ]) 145 | end) 146 | 147 | [%QueueItem{id: id_a}, %QueueItem{id: id_b}] = TestRepo.await(future) 148 | 149 | assert %QueueItem{id: ^id_a} = TestRepo.get_by!(QueueItem, [author: "Alice"], prefix: tenant) 150 | assert %QueueItem{id: ^id_b} = TestRepo.get_by!(QueueItem, [author: "Bob"], prefix: tenant) 151 | end 152 | end 153 | -------------------------------------------------------------------------------- /test/ecto/integration/upsert_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoIntegrationUpsertTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | alias Ecto.Integration.TestRepo 5 | alias EctoFoundationDB.Exception.Unsupported 6 | alias EctoFoundationDB.Schemas.User 7 | import Ecto.Query 8 | 9 | test "on_conflict: :raise", context do 10 | tenant = context[:tenant] 11 | 12 | user = TestRepo.insert!(%User{name: "John"}, prefix: tenant) 13 | 14 | assert_raise(Unsupported, ~r/Key exists/, fn -> 15 | TestRepo.insert!(%User{user | name: "NotJohn"}, prefix: tenant, on_conflict: :raise) 16 | end) 17 | end 18 | 19 | test "on_conflict: :nothing", context do 20 | tenant = context[:tenant] 21 | 22 | user = TestRepo.insert!(%User{name: "John"}, prefix: tenant) 23 | 24 | TestRepo.insert!(%User{user | name: "NotJohn"}, prefix: tenant, on_conflict: :nothing) 25 | 26 | assert %User{name: "John"} = TestRepo.get(User, user.id, prefix: tenant) 27 | end 28 | 29 | test "on_conflict: :replace_all", context do 30 | tenant = context[:tenant] 31 | 32 | user_a = 33 | TestRepo.insert!(%User{name: "John", inserted_at: ~N[2024-08-31 19:56:55]}, prefix: tenant) 34 | 35 | TestRepo.insert!(%User{id: user_a.id, name: "NotJohn"}, 36 | prefix: tenant, 37 | on_conflict: :replace_all 38 | ) 39 | 40 | user_b = TestRepo.get(User, user_a.id, prefix: tenant) 41 | 42 | assert user_a.name != user_b.name 43 | assert user_a.inserted_at != user_b.inserted_at 44 | end 45 | 46 | test "on_conflict: {:replace_all_except, fields}", context do 47 | tenant = context[:tenant] 48 | 49 | user_a = 50 | TestRepo.insert!( 51 | %User{ 52 | name: "John", 53 | inserted_at: ~N[2024-08-31 19:56:55], 54 | updated_at: ~N[2024-08-31 19:56:55] 55 | }, 56 | prefix: tenant 57 | ) 58 | 59 | TestRepo.insert!(%User{id: user_a.id, name: "NotJohn"}, 60 | prefix: tenant, 61 | on_conflict: {:replace_all_except, [:inserted_at]} 62 | ) 63 | 64 | user_b = TestRepo.get(User, user_a.id, prefix: tenant) 65 | 66 | assert user_a.name != user_b.name 67 | assert user_a.inserted_at == user_b.inserted_at 68 | assert user_a.updated_at != user_b.updated_at 69 | end 70 | 71 | test "on_conflict: {:replace, fields}", context do 72 | tenant = context[:tenant] 73 | 74 | user_a = 75 | TestRepo.insert!( 76 | %User{ 77 | name: "John", 78 | inserted_at: ~N[2024-08-31 19:56:55], 79 | updated_at: ~N[2024-08-31 19:56:55] 80 | }, 81 | prefix: tenant 82 | ) 83 | 84 | TestRepo.insert!(%User{id: user_a.id, name: "NotJohn"}, 85 | prefix: tenant, 86 | on_conflict: {:replace, [:name]} 87 | ) 88 | 89 | user_b = TestRepo.get(User, user_a.id, prefix: tenant) 90 | 91 | assert user_a.name != user_b.name 92 | assert user_a.inserted_at == user_b.inserted_at 93 | assert user_a.updated_at == user_b.updated_at 94 | end 95 | 96 | test "on_conflict: keyword list", context do 97 | tenant = context[:tenant] 98 | user = TestRepo.insert!(%User{name: "John"}, prefix: tenant) 99 | 100 | assert_raise( 101 | Unsupported, 102 | ~r/not supported/, 103 | fn -> 104 | TestRepo.insert!(%User{id: user.id, name: "NotJohn"}, 105 | prefix: tenant, 106 | on_conflict: [set: [name: "foo"]] 107 | ) 108 | end 109 | ) 110 | end 111 | 112 | test "on_conflict: Query", context do 113 | tenant = context[:tenant] 114 | user = TestRepo.insert!(%User{name: "John"}, prefix: tenant) 115 | 116 | assert_raise( 117 | Unsupported, 118 | ~r/not supported/, 119 | fn -> 120 | TestRepo.insert!(%User{id: user.id, name: "NotJohn"}, 121 | prefix: tenant, 122 | on_conflict: from(u in User, update: [set: [name: "foo"]]) 123 | ) 124 | end 125 | ) 126 | end 127 | 128 | test "conflict_target: []", context do 129 | tenant = context[:tenant] 130 | 131 | user_a = 132 | TestRepo.insert!(%User{name: "John", inserted_at: ~N[2024-08-31 19:56:55]}, prefix: tenant) 133 | 134 | # In using `conflict_target: []`, we pretend that the data doesn't exist. This speeds 135 | # up data loading but can result in inconsistent indexes if objects do exist in 136 | # the database that are being blindly overwritten. It should be used with extreme caution. 137 | # 138 | # `TestRepo.insert!` can be used here but instead we use `TestRepo.async_insert_all!` so that we 139 | # can exercise that path 140 | f = 141 | TestRepo.transactional(tenant, fn -> 142 | TestRepo.async_insert_all!(User, [%User{id: user_a.id, name: "NotJohn"}], 143 | conflict_target: [] 144 | ) 145 | end) 146 | 147 | [_user_b] = TestRepo.await(f) 148 | 149 | user_b = TestRepo.get(User, user_a.id, prefix: tenant) 150 | 151 | assert user_a.name != user_b.name 152 | assert user_a.inserted_at != user_b.inserted_at 153 | end 154 | end 155 | -------------------------------------------------------------------------------- /test/ecto/integration/pipeline_test.exs: -------------------------------------------------------------------------------- 1 | defmodule EctoIntegrationPipelineTest do 2 | use Ecto.Integration.Case, async: true 3 | 4 | import Ecto.Query 5 | 6 | alias Ecto.Integration.TestRepo 7 | alias EctoFoundationDB.Schemas.User 8 | 9 | test "pipelining", context do 10 | tenant = context[:tenant] 11 | 12 | ts = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) 13 | template = %{name: "", inserted_at: ts, updated_at: ts} 14 | 15 | {2, nil} = 16 | TestRepo.transactional( 17 | tenant, 18 | fn -> 19 | users = [%{template | name: "John"}, %{template | name: "James"}] 20 | TestRepo.insert_all(User, users) 21 | end 22 | ) 23 | 24 | john = TestRepo.get_by!(User, [name: "John"], prefix: tenant) 25 | james = TestRepo.get_by!(User, [name: "James"], prefix: tenant) 26 | 27 | [john, james] = 28 | TestRepo.transactional( 29 | tenant, 30 | fn -> 31 | futures = 32 | [f1, f2] = [ 33 | TestRepo.async_get(User, john.id), 34 | TestRepo.async_get(User, james.id) 35 | ] 36 | 37 | assert nil != f1.ref 38 | assert nil != f2.ref 39 | assert nil == f1.result 40 | assert nil == f2.result 41 | 42 | TestRepo.await(futures) 43 | end 44 | ) 45 | 46 | assert john.name == "John" 47 | assert james.name == "James" 48 | 49 | [john, james] = 50 | TestRepo.transactional( 51 | tenant, 52 | fn -> 53 | futures = 54 | [f1, f2] = [ 55 | TestRepo.async_get_by(User, name: "John"), 56 | TestRepo.async_get_by(User, name: "James") 57 | ] 58 | 59 | assert nil != f1.ref 60 | assert nil != f2.ref 61 | assert nil == f1.result 62 | assert nil == f2.result 63 | 64 | TestRepo.await(futures) 65 | end 66 | ) 67 | 68 | assert john.name == "John" 69 | assert james.name == "James" 70 | 71 | [all_john, all_james] = 72 | TestRepo.transactional( 73 | tenant, 74 | fn -> 75 | futures = 76 | [f1, f2] = [ 77 | TestRepo.async_all(from(u in User, where: u.name == ^"John")), 78 | TestRepo.async_all(from(u in User, where: u.name == ^"James")) 79 | ] 80 | 81 | assert nil != f1.ref 82 | assert nil != f2.ref 83 | assert nil == f1.result 84 | assert nil == f2.result 85 | 86 | TestRepo.await(futures) 87 | end 88 | ) 89 | 90 | assert hd(all_john).name == "John" 91 | assert hd(all_james).name == "James" 92 | 93 | [john, james] = 94 | TestRepo.transactional( 95 | tenant, 96 | fn -> 97 | futures = 98 | [f1, f2] = [ 99 | TestRepo.async_one(from(u in User, where: u.name == ^"John")), 100 | TestRepo.async_one(from(u in User, where: u.name == ^"James")) 101 | ] 102 | 103 | assert nil != f1.ref 104 | assert nil != f2.ref 105 | assert nil == f1.result 106 | assert nil == f2.result 107 | 108 | TestRepo.await(futures) 109 | end 110 | ) 111 | 112 | assert john.name == "John" 113 | assert james.name == "James" 114 | end 115 | 116 | test "safe insert without conflict_target", context do 117 | # The implementation of this test is identical to Repo.insert_all, 118 | # but we include it here as a nontrivial example of several EctoFDB 119 | # features working together. 120 | # 121 | # - Transactions: FDB transactions are ACID and with serializable isolation 122 | # - Pipelining: async_get is used to efficiently check for the existence of 123 | # the data. 124 | # - Upsert with conflict_target: conflict_target is ignored (with `[]`) 125 | # because we've manually confirmed the data does not exist in the DB 126 | # 127 | 128 | tenant = context[:tenant] 129 | 130 | # Here is the data we wish to load into the DB 131 | users = [ 132 | %User{id: Ecto.UUID.autogenerate(), name: "John"}, 133 | %User{id: Ecto.UUID.autogenerate(), name: "James"} 134 | ] 135 | 136 | # Here is the nontrival transaction that we are testing 137 | load_fn = fn -> 138 | TestRepo.transactional( 139 | tenant, 140 | fn -> 141 | nils = 142 | for(u <- users, do: TestRepo.async_get(User, u.id)) 143 | |> TestRepo.await() 144 | 145 | if Enum.all?(nils, &is_nil/1) do 146 | for(u <- users, do: TestRepo.insert!(u, conflict_target: [])) 147 | else 148 | raise "Conflict" 149 | end 150 | end 151 | ) 152 | end 153 | 154 | # The first time we call load_fn, it inserts the data 155 | [john, james] = load_fn.() 156 | 157 | assert john.name == "John" 158 | assert james.name == "James" 159 | 160 | # The second time, it detects the conflict 161 | assert_raise(RuntimeError, ~r/Conflict/, load_fn) 162 | end 163 | end 164 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.3.1", "c786e6a76321121a44229dde3988fc772bca73ea75170a73fd5f4ddf1af95ccf", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "76224c58ea1d0391c8309a8ecbfe27d71062878f59bd41a390266bf4ac1cc56d"}, 3 | "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, 4 | "credo": {:hex, :credo, "1.7.7", "771445037228f763f9b2afd612b6aa2fd8e28432a95dbbc60d8e03ce71ba4446", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8bc87496c9aaacdc3f90f01b7b0582467b69b4bd2441fe8aae3109d843cc2f2e"}, 5 | "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, 6 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 7 | "dialyxir": {:hex, :dialyxir, "1.4.3", "edd0124f358f0b9e95bfe53a9fcf806d615d8f838e2202a9f430d59566b6b53b", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "bf2cfb75cd5c5006bec30141b131663299c661a864ec7fbbc72dfa557487a986"}, 8 | "earmark_parser": {:hex, :earmark_parser, "1.4.39", "424642f8335b05bb9eb611aa1564c148a8ee35c9c8a8bba6e129d51a3e3c6769", [:mix], [], "hexpm", "06553a88d1f1846da9ef066b87b57c6f605552cfbe40d20bd8d59cc6bde41944"}, 9 | "ecto": {:hex, :ecto, "3.12.2", "bae2094f038e9664ce5f089e5f3b6132a535d8b018bd280a485c2f33df5c0ce1", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "492e67c70f3a71c6afe80d946d3ced52ecc57c53c9829791bfff1830ff5a1f0c"}, 10 | "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, 11 | "erlfdb": {:hex, :erlfdb, "0.3.1", "5156b77f8eb97654f78409202432fbdf4eb3243f2b1b156ad016f1efb971fa7e", [:rebar3], [], "hexpm", "ff0f74893de322c119a0176fad7ed841b9d155821c55d1e9ad280e5223bfbe6a"}, 12 | "ex_doc": {:hex, :ex_doc, "0.34.2", "13eedf3844ccdce25cfd837b99bea9ad92c4e511233199440488d217c92571e8", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "5ce5f16b41208a50106afed3de6a2ed34f4acfd65715b82a0b84b49d995f95c1"}, 13 | "file_system": {:hex, :file_system, "1.0.0", "b689cc7dcee665f774de94b5a832e578bd7963c8e637ef940cd44327db7de2cd", [:mix], [], "hexpm", "6752092d66aec5a10e662aefeed8ddb9531d79db0bc145bb8c40325ca1d8536d"}, 14 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 15 | "makeup": {:hex, :makeup, "1.1.1", "fa0bc768698053b2b3869fa8a62616501ff9d11a562f3ce39580d60860c3a55e", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "5dc62fbdd0de44de194898b6710692490be74baa02d9d108bc29f007783b0b48"}, 16 | "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"}, 17 | "makeup_erlang": {:hex, :makeup_erlang, "0.1.5", "e0ff5a7c708dda34311f7522a8758e23bfcd7d8d8068dc312b5eb41c6fd76eba", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "94d2e986428585a21516d7d7149781480013c56e30c6a233534bedf38867a59a"}, 18 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, 19 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 20 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, 21 | } 22 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/layer/primary_kv_codec/stream_decoder.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Layer.PrimaryKVCodec.StreamDecoder do 2 | @moduledoc false 3 | alias EctoFoundationDB.Layer.DecodedKV 4 | alias EctoFoundationDB.Layer.InternalMetadata 5 | alias EctoFoundationDB.Layer.Pack 6 | alias EctoFoundationDB.Layer.PrimaryKVCodec 7 | alias EctoFoundationDB.Tenant 8 | alias EctoFoundationDB.Versionstamp 9 | 10 | def stream_decode(kvs, tenant, opts) do 11 | Stream.transform( 12 | kvs, 13 | fn -> stream_decode_start(tenant, opts) end, 14 | &stream_decode_reducer/2, 15 | &stream_decode_last/1, 16 | &stream_decode_after/1 17 | ) 18 | end 19 | 20 | defp stream_decode_start(tenant, opts) do 21 | emit_db_key? = Keyword.get(opts, :emit_db_key?, false) 22 | 23 | %{ 24 | db_key: nil, 25 | key_tuple: nil, 26 | values: [], 27 | meta: nil, 28 | tenant: tenant, 29 | emit_db_key?: emit_db_key? 30 | } 31 | end 32 | 33 | defp stream_decode_reducer( 34 | {k, v}, 35 | acc = %{meta: nil, tenant: tenant, emit_db_key?: emit_db_key?} 36 | ) do 37 | # v is either a standard ecto object or metadata for a multikey object 38 | # To discover which one, we must convert it from the binary. 39 | # 40 | # If this step crashes, it means that we've unexpectedly encountered an individual 41 | # multikey key-value without having previously found the metadata to guide us to decode it 42 | v = Pack.from_fdb_value(v) 43 | 44 | metadata_key = PrimaryKVCodec.metadata_key() 45 | 46 | case InternalMetadata.fetch(v) do 47 | {:ok, {^metadata_key, meta}} -> 48 | # Found a multikey object, so we start processing it 49 | key_tuple = Tenant.unpack(tenant, k) 50 | {[], %{acc | db_key: k, key_tuple: key_tuple, values: [], meta: meta}} 51 | 52 | {:ok, metadata} -> 53 | raise ArgumentError, """ 54 | EctoFoundationDB encountered metadata #{metadata}. We don't know how to process this. 55 | 56 | Data: #{inspect(v)} 57 | """ 58 | 59 | :error -> 60 | key_tuple = Tenant.unpack(tenant, k) 61 | data_object = extract_complete_vs(key_tuple, v) 62 | 63 | item = %DecodedKV{ 64 | codec: Pack.primary_write_key_to_codec(tenant, k), 65 | data_object: data_object 66 | } 67 | 68 | {emit(emit_db_key?, k, item), %{acc | db_key: nil, key_tuple: nil, values: [], meta: nil}} 69 | end 70 | end 71 | 72 | defp stream_decode_reducer( 73 | {k, v}, 74 | acc = %{ 75 | db_key: orig_key, 76 | key_tuple: key_tuple, 77 | values: values, 78 | meta: {n, i, crc}, 79 | tenant: tenant, 80 | emit_db_key?: emit_db_key? 81 | } 82 | ) do 83 | split_key_tuple = Tenant.unpack(tenant, k) 84 | 85 | case parse_codec_metadata_tuple(split_key_tuple) do 86 | {true, {^n, i2, ^crc}} when i2 == i + 1 and n == i2 + 1 -> 87 | fdb_value = :erlang.iolist_to_binary(Enum.reverse([v | values])) 88 | 89 | case :erlang.crc32(fdb_value) do 90 | ^crc -> 91 | data_object = Pack.from_fdb_value(fdb_value) 92 | data_object = extract_complete_vs(key_tuple, data_object) 93 | 94 | item = 95 | %DecodedKV{ 96 | codec: Pack.primary_write_key_to_codec(tenant, key_tuple), 97 | data_object: data_object, 98 | multikey?: true 99 | } 100 | 101 | {emit(emit_db_key?, orig_key, item), %{acc | key_tuple: nil, values: [], meta: nil}} 102 | 103 | other_crc -> 104 | raise """ 105 | Metadata error. Encountered: CRC #{other_crc}, Expected: CRC #{crc}, 106 | """ 107 | end 108 | 109 | {true, meta = {^n, i2, ^crc}} when i2 == i + 1 and i2 < n -> 110 | {[], %{acc | values: [v | values], meta: meta}} 111 | 112 | other -> 113 | raise """ 114 | Metadata error. Previous: #{inspect({n, i, crc})}, Encountered: #{inspect(other)} 115 | """ 116 | end 117 | end 118 | 119 | defp stream_decode_last(acc), do: {[], acc} 120 | 121 | defp stream_decode_after(_acc), do: :ok 122 | 123 | defp extract_complete_vs(key_tuple, data_object) do 124 | [{pk_field, stored_pk} | data_object_rest] = data_object 125 | 126 | # When incomplete versionstamp is stored on in the value, we need to retrieve the pk from the key 127 | if Versionstamp.incomplete?(stored_pk) do 128 | [{pk_field, Pack.get_vs_from_key_tuple(key_tuple)} | data_object_rest] 129 | else 130 | data_object 131 | end 132 | end 133 | 134 | defp parse_codec_metadata_tuple(key_tuple) do 135 | last_element = elem(key_tuple, tuple_size(key_tuple) - 1) 136 | 137 | case last_element do 138 | meta = {_n, _i, _crc} -> 139 | # the only nested tuple we use is for this codec, so if the last 140 | # element is a tuple, we know it's our tuple 141 | {true, meta} 142 | 143 | _ -> 144 | false 145 | end 146 | end 147 | 148 | defp emit(false, _key, item), do: [item] 149 | defp emit(true, key, item), do: [{key, item}] 150 | end 151 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/tenant/backend.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Tenant.Backend do 2 | @moduledoc false 3 | alias EctoFoundationDB.Database 4 | alias EctoFoundationDB.Layer.Pack 5 | alias EctoFoundationDB.Options 6 | alias EctoFoundationDB.Tenant 7 | 8 | @type txobj() :: :erlfdb.database() | :erlfdb.tenant() 9 | @type ref() :: any() 10 | @type db_object() :: any() 11 | @type opened() :: any() 12 | @type meta() :: any() 13 | @type tenant_name() :: :erlfdb.tenant_name() 14 | 15 | @callback txobj(db :: :erlfdb.database(), opened :: opened(), meta :: meta()) :: txobj() 16 | @callback ref(opened :: opened(), meta :: meta()) :: ref() 17 | @callback make_meta(opened :: opened()) :: meta() 18 | @callback list(db :: :erlfdb.database(), options :: Options.t()) :: list(db_object()) 19 | @callback create(db :: :erlfdb.database(), tenant_name :: tenant_name(), options :: Options.t()) :: 20 | :ok | {:error, :tenant_already_exists} 21 | @callback delete(db :: :erlfdb.database(), tenant_name :: tenant_name(), options :: Options.t()) :: 22 | :ok 23 | @callback get(db :: :erlfdb.database(), tenant_name :: tenant_name(), options :: Options.t()) :: 24 | {:ok, db_object()} | {:error, :tenant_does_not_exist} 25 | @callback open(db :: :erlfdb.database(), tenant_name :: tenant_name(), options :: Options.t()) :: 26 | opened() 27 | @callback all_data_ranges(meta :: meta()) :: list(tuple()) 28 | @callback get_name(id :: String.t(), options :: Options.t()) :: String.t() 29 | @callback get_id(db_object :: db_object(), options :: Options.t()) :: String.t() 30 | @callback extend_tuple(tuple :: tuple(), meta :: meta()) :: tuple() 31 | @callback extract_tuple(tuple :: tuple(), meta :: meta()) :: tuple() 32 | 33 | @spec db_open!(Database.t(), Tenant.id(), Options.t()) :: Tenant.t() 34 | def db_open!(db, id, options) do 35 | :ok = ensure_created(db, id, options) 36 | db_open(db, id, options) 37 | end 38 | 39 | @doc """ 40 | If the tenant doesn't exist, create it. Otherwise, no-op. 41 | """ 42 | @spec ensure_created(Database.t(), Tenant.id(), Options.t()) :: :ok 43 | def ensure_created(db, id, options) do 44 | case exists?(db, id, options) do 45 | true -> :ok 46 | false -> create(db, id, options) 47 | end 48 | end 49 | 50 | @doc """ 51 | Returns true if the tenant exists in the database. False otherwise. 52 | """ 53 | @spec exists?(Database.t(), Tenant.id(), Options.t()) :: boolean() 54 | def exists?(db, id, options) do 55 | case get(db, id, options) do 56 | {:ok, _} -> true 57 | {:error, :tenant_does_not_exist} -> false 58 | end 59 | end 60 | 61 | @spec db_open(Database.t(), Tenant.id(), Options.t()) :: Tenant.t() 62 | def db_open(db, id, options) do 63 | module = get_module(options) 64 | tenant_name = module.get_name(id, options) 65 | opened = module.open(db, tenant_name, options) 66 | meta = module.make_meta(opened) 67 | ref = module.ref(opened, meta) 68 | 69 | %Tenant{ 70 | id: id, 71 | backend: meta.__struct__, 72 | ref: ref, 73 | txobj: module.txobj(db, opened, meta), 74 | meta: meta, 75 | options: [ 76 | metadata_cache: Options.get(options, :metadata_cache) 77 | ] 78 | } 79 | end 80 | 81 | @spec list(Database.t(), Options.t()) :: [Tenant.id()] 82 | def list(db, options) do 83 | module = get_module(options) 84 | 85 | list = module.list(db, options) 86 | 87 | for db_object <- list do 88 | module.get_id(db_object, options) 89 | end 90 | end 91 | 92 | @spec create(Database.t(), Tenant.id(), Options.t()) :: :ok 93 | def create(db, id, options) do 94 | module = get_module(options) 95 | tenant_name = module.get_name(id, options) 96 | 97 | module.create(db, tenant_name, options) 98 | end 99 | 100 | @spec clear(Database.t(), Tenant.id(), Options.t()) :: :ok 101 | def clear(db, id, options) do 102 | tenant = db_open(db, id, options) 103 | 104 | ranges = 105 | get_module(options).all_data_ranges(tenant.meta) 106 | 107 | :erlfdb.transactional(Tenant.txobj(tenant), fn tx -> 108 | for {start_key, end_key} <- ranges, do: :erlfdb.clear_range(tx, start_key, end_key) 109 | end) 110 | 111 | :ok 112 | end 113 | 114 | @spec empty(Database.t(), Tenant.id(), Options.t()) :: :ok 115 | def empty(db, id, options) do 116 | tenant = db_open(db, id, options) 117 | 118 | {start_key, end_key} = 119 | Pack.adapter_repo_range(tenant) 120 | 121 | :erlfdb.transactional(Tenant.txobj(tenant), fn tx -> 122 | :erlfdb.clear_range(tx, start_key, end_key) 123 | end) 124 | 125 | :ok 126 | end 127 | 128 | @spec delete(Database.t(), Tenant.id(), Options.t()) :: :ok | {:error, atom()} 129 | def delete(db, id, options) do 130 | module = get_module(options) 131 | tenant_name = module.get_name(id, options) 132 | 133 | module.delete(db, tenant_name, options) 134 | end 135 | 136 | def get(db, id, options) do 137 | module = get_module(options) 138 | tenant_name = module.get_name(id, options) 139 | module.get(db, tenant_name, options) 140 | end 141 | 142 | defp get_module(options) do 143 | Options.get(options, :tenant_backend) 144 | end 145 | 146 | def set_option(tenant, key, value) do 147 | %Tenant{options: options} = tenant 148 | %Tenant{tenant | options: Keyword.merge(options, [{key, value}])} 149 | end 150 | end 151 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/tenant/directory_tenant.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Tenant.DirectoryTenant do 2 | @moduledoc """ 3 | The default backend for EctoFDB multitenancy. 4 | 5 | It uses the Directory Layer defined by `:erlfdb_directory` to 6 | partition tenant keyspaces from each other. There is no guarantee 7 | provided by FDB at the transaction-level, so care must be taken to 8 | always pack keys appropriately. Standard use of Ecto.Repo functions 9 | is guaranteed to respect tenant boundaries. 10 | """ 11 | defstruct [:ref, :node, :prefix] 12 | 13 | alias EctoFoundationDB.Options 14 | alias EctoFoundationDB.Tenant.Backend 15 | 16 | @behaviour Backend 17 | 18 | @type t() :: %__MODULE__{} 19 | 20 | @impl true 21 | def txobj(db, _opened, _meta), do: db 22 | @impl true 23 | def ref(_opened, %__MODULE__{ref: ref}), do: ref 24 | @impl true 25 | def make_meta(node), do: %__MODULE__{ref: make_ref(), node: node, prefix: prefix(node)} 26 | @impl true 27 | def get_name(id, _options), do: id 28 | @impl true 29 | def list(db, options), do: :erlfdb_directory.list(db, tenant_node(db, options)) 30 | 31 | @impl true 32 | def create(db, "", options) do 33 | tenant_node(db, options) 34 | :ok 35 | end 36 | 37 | def create(db, tenant_name, options) do 38 | :erlfdb_directory.create(db, tenant_node(db, options), tenant_name) 39 | :ok 40 | rescue 41 | e in ErlangError -> 42 | case e do 43 | %ErlangError{ 44 | original: {:erlfdb_directory, {:create_error, :path_exists, _path}} 45 | } -> 46 | {:error, :tenant_already_exists} 47 | end 48 | end 49 | 50 | @impl true 51 | def delete(db, "", options) do 52 | tenant_dir_name = "#{Options.get(options, :storage_id)}" 53 | :persistent_term.erase({__MODULE__, tenant_dir_name}) 54 | 55 | try do 56 | :erlfdb_directory.remove(db, root_node(), tenant_dir_name) 57 | rescue 58 | e in ErlangError -> 59 | case e do 60 | %ErlangError{ 61 | original: {:erlfdb_directory, {:delete_error, :path_missing, _path}} 62 | } -> 63 | :ok 64 | end 65 | end 66 | end 67 | 68 | def delete(db, tenant_name, options) do 69 | :erlfdb_directory.remove(db, tenant_node(db, options), tenant_name) 70 | rescue 71 | e in ErlangError -> 72 | case e do 73 | %ErlangError{ 74 | original: {:erlfdb_directory, {:delete_error, :path_missing, _path}} 75 | } -> 76 | :ok 77 | end 78 | end 79 | 80 | @impl true 81 | def get(db, "", options) do 82 | tenant_dir_name = "#{Options.get(options, :storage_id)}" 83 | 84 | if :erlfdb_directory.exists(db, root_node(), tenant_dir_name) do 85 | {:ok, tenant_node(db, options)} 86 | else 87 | {:error, :tenant_does_not_exist} 88 | end 89 | end 90 | 91 | def get(db, tenant_name, options) do 92 | if :erlfdb_directory.exists(db, tenant_node(db, options), tenant_name) do 93 | {:ok, open(db, tenant_name, options)} 94 | else 95 | {:error, :tenant_does_not_exist} 96 | end 97 | end 98 | 99 | @impl true 100 | def open(db, "", options), do: tenant_node(db, options) 101 | 102 | def open(db, tenant_name, options), 103 | do: :erlfdb_directory.open(db, tenant_node(db, options), tenant_name) 104 | 105 | @impl true 106 | def all_data_ranges(meta) do 107 | prefix = prefix(meta.node) 108 | 109 | [ 110 | # subspace keys (how this adapter encourages the user to write custom data) 111 | {prefix, :erlfdb_key.strinc(prefix)}, 112 | 113 | # tupled keys (how this adapter writes Ecto data) 114 | :erlfdb_tuple.range({prefix}) 115 | ] 116 | end 117 | 118 | @impl true 119 | def get_id({{:utf8, id}, _node}, _options), do: id 120 | 121 | @impl true 122 | def extend_tuple(x, meta), do: add_tuple_head(x, meta.prefix) 123 | 124 | @impl true 125 | def extract_tuple(tuple, _meta), do: delete_tuple_head(tuple) 126 | 127 | defp add_tuple_head(tuple, head) when is_tuple(tuple) do 128 | :erlang.insert_element(1, tuple, head) 129 | end 130 | 131 | defp add_tuple_head(list, head) when is_list(list) do 132 | [head | list] 133 | |> :erlang.list_to_tuple() 134 | end 135 | 136 | defp add_tuple_head(function, head) when is_function(function) do 137 | function.(1) 138 | |> add_tuple_head(head) 139 | end 140 | 141 | defp delete_tuple_head(tuple) do 142 | :erlang.delete_element(1, tuple) 143 | end 144 | 145 | defp tenant_node(db, options) do 146 | tenant_dir_name = "#{Options.get(options, :storage_id)}" 147 | 148 | case :persistent_term.get({__MODULE__, tenant_dir_name}, nil) do 149 | nil -> 150 | tenant_dir = :erlfdb_directory.create_or_open(db, root_node(), tenant_dir_name) 151 | :persistent_term.put({__MODULE__, tenant_dir_name}, tenant_dir) 152 | tenant_dir 153 | 154 | tenant_dir -> 155 | tenant_dir 156 | end 157 | end 158 | 159 | defp root_node() do 160 | case :persistent_term.get({__MODULE__, :root}, nil) do 161 | nil -> 162 | root = :erlfdb_directory.root(node_prefix: <<0xFE>>, content_prefix: <<>>) 163 | :persistent_term.put({__MODULE__, :root}, root) 164 | root 165 | 166 | root -> 167 | root 168 | end 169 | end 170 | 171 | defp prefix(node) do 172 | subspace = :erlfdb_directory.get_subspace(node) 173 | :erlfdb_subspace.key(subspace) 174 | end 175 | end 176 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/indexer.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Indexer do 2 | @moduledoc """ 3 | Implement this behaviour to create a custom index. 4 | 5 | Each Indexer has access to read, write, and **clear** any and all data in the database. 6 | A faulty implementation may lead to data loss or corruption. 7 | """ 8 | alias EctoFoundationDB.Index 9 | alias EctoFoundationDB.Layer.Metadata 10 | alias EctoFoundationDB.Layer.PrimaryKVCodec 11 | alias EctoFoundationDB.QueryPlan 12 | alias EctoFoundationDB.Tenant 13 | 14 | @callback create_range(Tenant.t(), Index.t()) :: {:erlfdb.key(), :erlfdb.key()} 15 | @callback drop_ranges(Tenant.t(), Index.t()) :: 16 | list(:erlfdb.key()) | list({:erlfdb.key(), :erlfdb.key()}) 17 | @callback create( 18 | Tenant.t(), 19 | :erlfdb.transaction(), 20 | Index.t(), 21 | Ecto.Schema.t(), 22 | tuple(), 23 | integer() 24 | ) :: 25 | {integer(), {:erlfdb.key(), :erlfdb.key()}} 26 | @callback set(Tenant.t(), :erlfdb.transaction(), Index.t(), Ecto.Schema.t(), tuple()) :: :ok 27 | @callback clear(Tenant.t(), :erlfdb.transaction(), Index.t(), Ecto.Schema.t(), tuple()) :: :ok 28 | @callback update( 29 | Tenant.t(), 30 | :erlfdb.transaction(), 31 | Index.t(), 32 | Ecto.Schema.t(), 33 | tuple(), 34 | Keyword.t() 35 | ) :: :ok 36 | @callback range(Index.t(), QueryPlan.t(), Keyword.t()) :: tuple() 37 | @callback unpack(Index.t(), QueryPlan.t(), tuple()) :: tuple() 38 | @optional_callbacks update: 6, unpack: 3 39 | 40 | def create_range(tenant, idx), 41 | do: idx[:indexer].create_range(tenant, idx) 42 | 43 | def drop_ranges(tenant, idx), 44 | do: idx[:indexer].drop_ranges(tenant, idx) 45 | 46 | def create(tenant, tx, idx, schema, range, limit), 47 | do: idx[:indexer].create(tenant, tx, idx, schema, range, limit) 48 | 49 | def set(tenant, tx, metadata, schema, kv = {kv_codec, _}) do 50 | %Metadata{indexes: idxs, partial_indexes: partial_idxs} = metadata 51 | idxs = idxs ++ filter_partials(partial_idxs, kv_codec.packed, []) 52 | 53 | for idx <- idxs, 54 | do: idx[:indexer].set(tenant, tx, idx, schema, kv) 55 | end 56 | 57 | def clear(tenant, tx, metadata, schema, kv = {kv_codec, _}) do 58 | %Metadata{indexes: idxs, partial_indexes: partial_idxs} = metadata 59 | idxs = idxs ++ filter_partials(partial_idxs, kv_codec.packed, []) 60 | 61 | for idx <- idxs, 62 | do: idx[:indexer].clear(tenant, tx, idx, schema, kv) 63 | end 64 | 65 | def update(tenant, tx, metadata, schema, kv = {kv_codec, _}, updates) do 66 | %Metadata{indexes: idxs, partial_indexes: partial_idxs} = metadata 67 | idxs = idxs ++ filter_partials(partial_idxs, kv_codec.packed, []) 68 | 69 | for idx <- idxs do 70 | apply( 71 | idx[:indexer], 72 | :update, 73 | [tenant, tx, idx, schema, kv, updates], 74 | &_update/6 75 | ) 76 | end 77 | end 78 | 79 | def range(idx, plan, options), 80 | do: idx[:indexer].range(idx, plan, options) 81 | 82 | def unpack(idx, plan, fdb_kv), 83 | do: apply(idx[:indexer], :unpack, [idx, plan, fdb_kv], &_unpack/3) 84 | 85 | ## Default behavior for standard key-value response 86 | defp _unpack(_idx, plan, {fdb_key, fdb_value}) do 87 | [kv] = 88 | [{fdb_key, fdb_value}] 89 | |> PrimaryKVCodec.stream_decode(plan.tenant) 90 | |> Enum.to_list() 91 | 92 | kv 93 | end 94 | 95 | # Default behavior for get_mapped_range response 96 | defp _unpack(_idx, _plan, {{_pkey, _pvalue}, {_skeybegin, _skeyend}, []}), 97 | do: nil 98 | 99 | defp _unpack(_idx, plan, {{_pkey, _pvalue}, {_skeybegin, _skeyend}, fdb_kvs}) do 100 | [kv] = 101 | fdb_kvs 102 | |> PrimaryKVCodec.stream_decode(plan.tenant) 103 | |> Enum.to_list() 104 | 105 | kv 106 | end 107 | 108 | defp _update(tenant, tx, idx, schema, kv, updates) do 109 | if Keyword.get(idx[:options], :mapped?, true) do 110 | index_fields = idx[:fields] 111 | set_data = updates[:set] || [] 112 | clear_data = updates[:clear] || [] 113 | 114 | x = 115 | MapSet.intersection( 116 | MapSet.new(clear_data ++ Keyword.keys(set_data)), 117 | MapSet.new(index_fields) 118 | ) 119 | 120 | if MapSet.size(x) == 0 do 121 | :ok 122 | else 123 | __update(tenant, tx, idx, schema, kv, updates) 124 | end 125 | else 126 | __update(tenant, tx, idx, schema, kv, updates) 127 | end 128 | end 129 | 130 | defp __update(tenant, tx, idx, schema, kv = {kv_codec, v}, updates) do 131 | idx[:indexer].clear(tenant, tx, idx, schema, kv) 132 | 133 | kv = 134 | {kv_codec, 135 | v 136 | |> Keyword.merge(updates[:set] || []) 137 | |> Keyword.drop(updates[:clear] || [])} 138 | 139 | idx[:indexer].set(tenant, tx, idx, schema, kv) 140 | end 141 | 142 | defp apply(module, fun, args, default_fun) do 143 | if function_exported?(module, fun, length(args)) do 144 | apply(module, fun, args) 145 | else 146 | apply(default_fun, args) 147 | end 148 | end 149 | 150 | defp filter_partials([], _k, acc) do 151 | Enum.reverse(acc) 152 | end 153 | 154 | defp filter_partials( 155 | [{partial_idx, {start_key, cursor_key, _end_key}} | partial_idxs], 156 | fdb_key, 157 | acc 158 | ) do 159 | if fdb_key >= start_key and fdb_key < cursor_key do 160 | filter_partials(partial_idxs, fdb_key, [partial_idx | acc]) 161 | else 162 | filter_partials(partial_idxs, fdb_key, acc) 163 | end 164 | end 165 | end 166 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v0.5.1 (TBD) 4 | 5 | ### Enhancements 6 | 7 | * Improved support for `order_by` + `limit` queries. 8 | * Caller may provide query to run on assign_ready for SchemaMetadata watches. 9 | 10 | ## v0.5.0 (2025-07-08) 11 | 12 | ### Enhancements 13 | 14 | * `EctoFoundationDB.CLI` [[doc](operators_manual.md)]: Functions for an operator to use to safely manage 15 | data migrations that cannot be done automatically. 16 | * Index Metadata [[doc](metadata.html)] now makes use of FoundationDB's `\xff/metadataVersion` key, 17 | which allows the client to cache metadata and maintain transactional isolation 18 | without having to wait on any keys. Also, the cache is shared across all open tenants of the same id on a given node. 19 | * `EctoFoundationDB.Versionstamp` [[doc](Ecto.Adapters.FoundationDB.html#module-versionstamps-autoincrement)]: Added the ability to insert objects with a monotonically increasing integer id, via FoundationDB's versionstamp. 20 | * `SchemaMetadata` [[doc](Ecto.Adapters.FoundationDB.html#module-schema-metadata)]: This is a new built-in Indexer that allows your app to watch and sync collection of objects in a tenant. 21 | * Added ability to drop an index. 22 | 23 | ### Breaking changes 24 | 25 | * Key construction has changed, making databases created on <= 0.4.x incompatible with >=0.5. Specifically, a binary, atom, or 26 | number primary key is now encoded in the FDB key with the Tuple layer. All other types remain encoded with term_to_binary. 27 | If you need help upgrading your database, please put in a GitHub Issue. We strive for a stable v1.0. 28 | 29 | ### Bug fixes 30 | 31 | * Fixed a bug where index creation was failing for multikey objects 32 | * (#57) Fixed a bug where index management was failing while a new index was being created 33 | 34 | ### Deprecations 35 | 36 | * Ecto has deprecated `Repo.transaction` in favor of `Repo.transact`. Since this decision doesn't align with FoundationDB's view of 37 | transactions, we have chosen to deprecate `Repo.transaction` in favor of `Repo.transactional`. This terminology better aligns with 38 | `:erlfdb` and provides a distinction from RDBMS transactions, and allows us to avoid future deprecations. 39 | 40 | ### New documentation 41 | 42 | * [Guide for Operators](operators_manual.html): Describes how to use the `EctoFoundationDB.CLI` functions to rename a field while guaraneeting that all 43 | concurrent queries in your distributed application are successful. 44 | * [Metadata Design](metadata.html): Describes how index metadata is managed and cached in EctoFDB. 45 | * [Sync Engine Part I - Single Object](watches.livemd): Revamped Livebook that demonstrates how to create a Sync Engine for a single object (for syncing reads) 46 | * [Sync Engine Part II - Collections](collection_syncing.livemd): New Livebook that demonstrates a Sync Engine for a collection of objects in a tenant (still for reads) 47 | 48 | ## v0.4.0 (2025-01-16) 49 | 50 | ### Enhancements 51 | 52 | * [Large Structs](Ecto.Adapters.FoundationDB.html#module-advanced-options): If your struct encodes to a size larger than 100,000 Bytes, it will now be split across several FDB key-values automatically. 53 | Previously, EctoFDB did not attempt to detect this and allowed FoundationDB to throw error code 2103: "value_too_large - Value length exceeds limit". 54 | See documentation for configuration options. 55 | * `EctoFoundationDB.Sandbox` now uses `m:erlfdb_sandbox`. Sandbox directory name is now `.erlfdb_sandbox`. Directories named `.erlfdb` should be removed. 56 | 57 | ### Bug fixes 58 | 59 | * Upgrade erlfdb to v0.2.2 60 | 61 | ## v0.3.1 (2024-10-24) 62 | 63 | ### Bug fixes 64 | 65 | * Fixed consistency issue with index updates. Previously, the old index key was still queryable. 66 | * Fixed write amplification issue when updating struct's non-indexed fields. 67 | 68 | ### New Documentation 69 | 70 | * Added `fdb_api_counting_text.exs` which tests and documents the `:erlfdb` operations that our Layer is expected to make. 71 | 72 | ## v0.3.0 (2024-10-20) 73 | 74 | ### \*\* Major breaking changes \*\* 75 | 76 | Databases that have been created using a prior version of EctoFoundationDB will be broken on 77 | EctoFDB v0.3 and above. Please start a new database with EctoFDB v0.3. If you currently have 78 | a database on v0.2 or earlier, please [submit an issue](https://github.com/foundationdb-beam/ecto_foundationdb/issues) 79 | to discuss the upgrade path. 80 | 81 | ### Enhancements 82 | 83 | * [Watches](Ecto.Adapters.FoundationDB.html#module-watches): Support for FDB Watches, which is like a database-driven PubSub on an Ecto struct. 84 | * [Directory Tenants](EctoFoundationDB.Tenant.html): A new default backend for Multitenancy that is production-ready. Managed tenants have been moved to "Experimental" status. 85 | * `@schema_context usetenant: true` is no longer required. 86 | * The `:open_db` option now defines a 1-arity function that accepts the Repo module. 87 | 88 | ### New Documentation 89 | 90 | * [Livebook | Watches in LiveView](watches.livemd) 91 | 92 | ## v0.2.1 (2024-09-23) 93 | 94 | ### Bug fixes 95 | 96 | * Upgrade erlfdb to 0.2.1, allowing Livebook Desktop to discover fdbcli location in /usr/local/bin 97 | 98 | ## v0.2.0 (2024-09-21) 99 | 100 | ### Bug fixes 101 | 102 | ### Enhancements 103 | 104 | * [Upserts](Ecto.Adapters.FoundationDB.html#module-upserts): Support for Ecto options `:on_conflict` and `:conflict_target` 105 | * [Pipelining](Ecto.Adapters.FoundationDB.html#module-pipelining): New Repo functions for async/await within a transaction. 106 | 107 | ### New Documentation 108 | 109 | * [testing.md](testing.html): Document to describe how to set up a Sandbox 110 | * [CHANGELOG.md](changelog.html): This file! 111 | 112 | ## v0.1.2 (2024-08-31) 113 | 114 | ### Bug fixes 115 | 116 | * Upgrade erlfdb 117 | 118 | ## v0.1.1 (2024-08-25) 119 | 120 | ### Enhancements 121 | 122 | * Upgrade to Ecto 3.12 123 | 124 | ### New documentation 125 | 126 | * [Livebook | Getting Started](introduction.livemd): How to get started with EctoFoundationDB. 127 | 128 | ## v0.1.0 (2024-04-07) 129 | 130 | ### Features 131 | 132 | * Multitenancy 133 | * Basic CRUD operations 134 | * Indexes 135 | -------------------------------------------------------------------------------- /lib/ecto/adapters/foundationdb/ecto_adapter_schema.ex: -------------------------------------------------------------------------------- 1 | defmodule Ecto.Adapters.FoundationDB.EctoAdapterSchema do 2 | @moduledoc false 3 | @behaviour Ecto.Adapter.Schema 4 | 5 | alias EctoFoundationDB.Exception.IncorrectTenancy 6 | alias EctoFoundationDB.Exception.Unsupported 7 | alias EctoFoundationDB.Future 8 | alias EctoFoundationDB.Layer.Fields 9 | alias EctoFoundationDB.Layer.Metadata 10 | alias EctoFoundationDB.Layer.Tx 11 | alias EctoFoundationDB.Schema 12 | alias EctoFoundationDB.Tenant 13 | 14 | @impl Ecto.Adapter.Schema 15 | def autogenerate(:binary_id), do: Ecto.UUID.generate() 16 | 17 | def autogenerate(type), 18 | do: raise("FoundationDB Adapter does not support autogenerating #{type}") 19 | 20 | @impl Ecto.Adapter.Schema 21 | def insert_all( 22 | adapter_meta, 23 | schema_meta, 24 | _header, 25 | entries, 26 | _on_conflict, 27 | _returning, 28 | _placeholders, 29 | options 30 | ) do 31 | %{source: source, schema: schema, prefix: tenant, context: context} = 32 | assert_tenancy!(schema_meta) 33 | 34 | entries = 35 | Enum.map(entries, fn data_object -> 36 | pk_field = Fields.get_pk_field!(schema) 37 | pk = data_object[pk_field] 38 | 39 | if is_nil(pk) do 40 | raise Unsupported, """ 41 | FoundationDB Adapter does not support inserting records with nil primary keys. 42 | """ 43 | end 44 | 45 | future = Future.before_transactional() 46 | {{pk_field, pk}, future, data_object} 47 | end) 48 | 49 | num_ins = 50 | Metadata.transactional(tenant, adapter_meta, source, fn tx, metadata -> 51 | Tx.insert_all( 52 | tenant, 53 | tx, 54 | {schema, source, context}, 55 | entries, 56 | metadata, 57 | options 58 | ) 59 | end) 60 | 61 | {num_ins, nil} 62 | end 63 | 64 | @impl Ecto.Adapter.Schema 65 | def insert(adapter_meta, schema_meta, data_object, on_conflict, returning, options) do 66 | {_count, nil} = 67 | insert_all( 68 | adapter_meta, 69 | schema_meta, 70 | nil, 71 | [data_object], 72 | on_conflict, 73 | returning, 74 | [], 75 | options 76 | ) 77 | 78 | {:ok, []} 79 | end 80 | 81 | @impl Ecto.Adapter.Schema 82 | def update( 83 | adapter_meta, 84 | schema_meta, 85 | update_data, 86 | filters, 87 | _returning, 88 | options 89 | ) do 90 | %{source: source, schema: schema, prefix: tenant, context: context} = 91 | assert_tenancy!(schema_meta) 92 | 93 | pk_field = Fields.get_pk_field!(schema) 94 | pk = filters[pk_field] 95 | future = Future.before_transactional() 96 | 97 | res = 98 | Metadata.transactional(tenant, adapter_meta, source, fn tx, metadata -> 99 | Tx.update_pks( 100 | tenant, 101 | tx, 102 | {schema, source, context}, 103 | pk_field, 104 | [{pk, future}], 105 | update_data, 106 | metadata, 107 | options 108 | ) 109 | end) 110 | 111 | case res do 112 | 1 -> 113 | {:ok, []} 114 | 115 | 0 -> 116 | {:error, :stale} 117 | end 118 | end 119 | 120 | @impl Ecto.Adapter.Schema 121 | def delete( 122 | adapter_meta, 123 | schema_meta, 124 | filters, 125 | _returning, 126 | _options 127 | ) do 128 | %{source: source, schema: schema, prefix: tenant, context: context} = 129 | assert_tenancy!(schema_meta) 130 | 131 | pk_field = Fields.get_pk_field!(schema) 132 | pk = filters[pk_field] 133 | future = Future.before_transactional() 134 | 135 | res = 136 | Metadata.transactional(tenant, adapter_meta, source, fn tx, metadata -> 137 | Tx.delete_pks( 138 | tenant, 139 | tx, 140 | {schema, source, context}, 141 | [{pk, future}], 142 | metadata 143 | ) 144 | end) 145 | 146 | case res do 147 | 1 -> 148 | {:ok, []} 149 | 150 | 0 -> 151 | {:error, :stale} 152 | end 153 | end 154 | 155 | def watch(_module, _repo, nil, {_adapter_meta, _options}) do 156 | nil 157 | end 158 | 159 | def watch(module, repo, struct, {adapter_meta, options}) do 160 | # This is not an Ecto callback, so we have to construct our own schema_meta 161 | schema_meta = %{ 162 | schema: struct.__struct__, 163 | source: Ecto.get_meta(struct, :source), 164 | prefix: Keyword.get(options, :prefix, Ecto.get_meta(struct, :prefix)) 165 | } 166 | 167 | %{schema: schema, source: source, context: context, prefix: tenant} = 168 | assert_tenancy!(schema_meta) 169 | 170 | pk_field = Fields.get_pk_field!(schema) 171 | pk = Map.get(struct, pk_field) 172 | 173 | Tx.transactional(tenant, fn tx -> 174 | future_ref = Tx.watch(tenant, tx, {schema, source, context}, {pk_field, pk}, options) 175 | 176 | # See EctoAdapterAssigns for the other half of this implementation. 177 | Future.new_deferred(future_ref, fn _ -> 178 | {schema, {:pk, pk}, options, 179 | &watch(module, repo, &1, {adapter_meta, Keyword.merge(options, &2)})} 180 | end) 181 | end) 182 | end 183 | 184 | defp assert_tenancy!(schema_meta = %{source: source, schema: schema, prefix: tenant}) do 185 | schema_meta = Map.put(schema_meta, :context, Schema.get_context!(source, schema)) 186 | 187 | case Tx.safe?(tenant) do 188 | {false, :missing_tenant} -> 189 | raise IncorrectTenancy, """ 190 | FoundationDB Adapter is expecting the struct for schema \ 191 | #{inspect(schema)} to include a tenant in the prefix metadata, \ 192 | but a nil prefix was provided. 193 | 194 | Call `Ecto.Adapters.FoundationDB.usetenant(struct, tenant)` before inserting. 195 | 196 | Or use the option `prefix: tenant` on the call to your Repo. 197 | """ 198 | 199 | {true, tenant = %Tenant{}} -> 200 | Map.put(schema_meta, :prefix, tenant) 201 | end 202 | end 203 | end 204 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/future.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.Future do 2 | @moduledoc """ 3 | Opaque struct that represents an unresolved result set from some FoundationDB 4 | query operation. 5 | 6 | If you have received a Future from an EctoFDB Repo API, please consult the documentation 7 | for that API for dealing with the Future. The functions here are intended to be for 8 | internal use only. 9 | """ 10 | alias EctoFoundationDB.Layer.Tx 11 | defstruct [:ref, :tx, :erlfdb_future, :result, :handler, :must_wait?] 12 | 13 | @token :__ectofdbfuture__ 14 | 15 | def token(), do: @token 16 | 17 | # Before entering a transactional that uses this Future module, we must know whether or not 18 | # we are required to wait on the Future upon leaving the transactional (`must_wait?`). 19 | def before_transactional() do 20 | %__MODULE__{ 21 | ref: nil, 22 | handler: &Function.identity/1, 23 | must_wait?: not Tx.in_tx?() 24 | } 25 | end 26 | 27 | # Upon leaving a transactional that uses this Future module, `leaving_transactional` must 28 | # be called so that any Futures have a change to wait on their results if necessary. 29 | def leaving_transactional(fut = %__MODULE__{must_wait?: true}) do 30 | %__MODULE__{ 31 | fut 32 | | tx: nil, 33 | erlfdb_future: nil, 34 | result: result(fut), 35 | handler: &Function.identity/1 36 | } 37 | end 38 | 39 | def leaving_transactional(fut = %__MODULE__{must_wait?: false}) do 40 | fut 41 | end 42 | 43 | def new() do 44 | %__MODULE__{handler: &Function.identity/1, must_wait?: true} 45 | end 46 | 47 | def ref(%__MODULE__{ref: ref}), do: ref 48 | 49 | @doc """ 50 | Creates a Future that will be resolved outside of the transaction in which it was created. 51 | 52 | Used for: 53 | 54 | - watch 55 | - versionstamp 56 | """ 57 | def new_deferred(erlfdb_future, handler \\ &Function.identity/1) do 58 | # The future for a watch and versionstamp is fulfilled outside of a transaction, so there is no tx val 59 | %__MODULE__{ 60 | tx: :deferred, 61 | ref: get_ref(erlfdb_future), 62 | erlfdb_future: erlfdb_future, 63 | handler: handler, 64 | must_wait?: false 65 | } 66 | end 67 | 68 | def set(fut, tx, erlfdb_future, f \\ &Function.identity/1) do 69 | ref = get_ref(erlfdb_future) 70 | %__MODULE__{handler: g} = fut 71 | %__MODULE__{fut | ref: ref, tx: tx, erlfdb_future: erlfdb_future, handler: &f.(g.(&1))} 72 | end 73 | 74 | def set_result(fut, result) do 75 | %__MODULE__{handler: f} = fut 76 | 77 | %__MODULE__{ 78 | fut 79 | | tx: nil, 80 | erlfdb_future: nil, 81 | result: f.(result), 82 | handler: &Function.identity/1 83 | } 84 | end 85 | 86 | def result(fut = %__MODULE__{erlfdb_future: nil, handler: f}) do 87 | f.(fut.result) 88 | end 89 | 90 | def result(fut = %__MODULE__{tx: :deferred}) do 91 | %__MODULE__{erlfdb_future: erlfdb_future, handler: handler} = fut 92 | res = :erlfdb.wait(erlfdb_future) 93 | handler.(res) 94 | end 95 | 96 | def result(fut) do 97 | %__MODULE__{tx: tx, erlfdb_future: erlfdb_future, handler: handler} = fut 98 | 99 | # Since we only have a single future, we can use :erlfdb.wait/1 as long as it's 100 | # not a :fold_future. Doing so is good for bookkeeping (fdb_api_counting_test) 101 | res = 102 | case elem(erlfdb_future, 0) do 103 | :fold_future -> 104 | [res] = :erlfdb.wait_for_all_interleaving(tx, [erlfdb_future]) 105 | res 106 | 107 | _ -> 108 | :erlfdb.wait(erlfdb_future) 109 | end 110 | 111 | handler.(res) 112 | end 113 | 114 | def await_all(futs) do 115 | futs 116 | |> await_stream() 117 | |> Enum.to_list() 118 | end 119 | 120 | # Future: If there is a wrapping transaction with an `async_*` qualifier, the wait happens here 121 | def await_stream(futs) do 122 | futs = Enum.to_list(futs) 123 | 124 | # important to maintain order of the input futures 125 | reffed_futures = 126 | for %__MODULE__{ref: ref, erlfdb_future: erlfdb_future} <- futs, 127 | not is_nil(erlfdb_future), 128 | do: {ref, erlfdb_future} 129 | 130 | results = 131 | if length(reffed_futures) > 0 do 132 | [%__MODULE__{tx: tx} | _] = futs 133 | {refs, erlfdb_futures} = Enum.unzip(reffed_futures) 134 | Enum.zip(refs, :erlfdb.wait_for_all_interleaving(tx, erlfdb_futures)) |> Enum.into(%{}) 135 | else 136 | %{} 137 | end 138 | 139 | Stream.map( 140 | futs, 141 | fn fut = %__MODULE__{ref: ref, result: result, handler: f} -> 142 | case Map.get(results, ref, nil) do 143 | nil -> 144 | %__MODULE__{ 145 | fut 146 | | tx: nil, 147 | erlfdb_future: nil, 148 | result: f.(result), 149 | handler: &Function.identity/1 150 | } 151 | 152 | new_result -> 153 | %__MODULE__{ 154 | fut 155 | | tx: nil, 156 | erlfdb_future: nil, 157 | result: f.(new_result), 158 | handler: &Function.identity/1 159 | } 160 | end 161 | end 162 | ) 163 | end 164 | 165 | def apply(fut = %__MODULE__{erlfdb_future: nil}, f) do 166 | %__MODULE__{handler: g, result: result} = fut 167 | %__MODULE__{result: f.(g.(result)), handler: &Function.identity/1} 168 | end 169 | 170 | def apply(fut, f) do 171 | %__MODULE__{handler: g} = fut 172 | %__MODULE__{fut | handler: &f.(g.(&1))} 173 | end 174 | 175 | def find_ready(futs, ready_ref) do 176 | # Must only be called if you've received a {ready_ref, :ready} 177 | # message in your mailbox. 178 | 179 | find_ready(futs, ready_ref, []) 180 | end 181 | 182 | defp find_ready([], _ready_ref, acc), do: {nil, Enum.reverse(acc)} 183 | 184 | defp find_ready([h | t], ready_ref, acc) do 185 | %__MODULE__{erlfdb_future: erlfdb_future} = h 186 | 187 | if match_ref?(erlfdb_future, ready_ref) do 188 | {h, Enum.reverse(acc) ++ t} 189 | else 190 | find_ready(t, ready_ref, [h | acc]) 191 | end 192 | end 193 | 194 | defp match_ref?({:erlfdb_future, ref1, _}, ref2) when ref1 === ref2, do: true 195 | defp match_ref?({:fold_future, _, erlfdb_future}, ref2), do: match_ref?(erlfdb_future, ref2) 196 | defp match_ref?(_, _), do: false 197 | 198 | defp get_ref({:erlfdb_future, ref, _}), do: ref 199 | defp get_ref({:fold_future, _, {:erlfdb_future, ref, _}}), do: ref 200 | defp get_ref(_), do: :erlang.error(:badarg) 201 | end 202 | -------------------------------------------------------------------------------- /docs/getting_started/introduction.livemd: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ```elixir 4 | Mix.install([ 5 | {:ecto_foundationdb, "~> 0.3"} 6 | ]) 7 | ``` 8 | 9 | ## Setup 10 | 11 | Hello! This guide simulates what your experience might be when developing an application with EctoFoundationDB. Specifically, it focuses on the mechanism that EctoFoundationDB uses to create and manage indexes. 12 | 13 | It assumes the reader is familiar with general Ecto features. 14 | 15 | Before we get started, a couple of important points about executing these commands on your system. 16 | 17 | > If you received an error on the `Mix.install` setup, please make sure you have both `foundationdb-server` and `foundationdb-clients` packages installed on your system. Also, ensure that your Livebook PATH environment variable includes the directory containing the `fdbcli` binary. 18 | 19 | > This LiveBook expects your system to have a running instance of FoundationDB, and it writes and deletes data from it. If your system's `/etc/foundationdb/fdb.cluster` is pointing to a real database, do not execute these commands! 20 | 21 | With that out of the way, we'll start off with creating your Repo module. 22 | 23 | ```elixir 24 | defmodule MyApp.Repo do 25 | use Ecto.Repo, otp_app: :my_app, adapter: Ecto.Adapters.FoundationDB 26 | 27 | use EctoFoundationDB.Migrator 28 | 29 | @impl true 30 | def migrations() do 31 | [ 32 | # {0, IndexesMigration} 33 | ] 34 | end 35 | end 36 | ``` 37 | 38 | Notice that the line with `IndexesMigration` is commented out. We'll come back to this later. 39 | 40 | ## Developing your app 41 | 42 | This next step simulates your app's startup. Normally, you would have a project defining `:my_app` and the Repo would be included in your supervision tree. In this Guide, we're starting the Repo as an isolated resource. 43 | 44 | ```elixir 45 | {:ok, _} = Ecto.Adapters.FoundationDB.ensure_all_started(MyApp.Repo.config(), :temporary) 46 | MyApp.Repo.start_link(log: false) 47 | ``` 48 | 49 | Next, we define an `Ecto.Schema` for events that are coming from a temperature sensor. This is a pretty standard Schema module. 50 | 51 | ```elixir 52 | defmodule TemperatureEvent do 53 | use Ecto.Schema 54 | 55 | @primary_key {:id, :binary_id, autogenerate: true} 56 | 57 | schema "temperature_events" do 58 | field(:recorded_at, :naive_datetime_usec) 59 | field(:kelvin, :float) 60 | field(:site, :string) 61 | timestamps() 62 | end 63 | end 64 | ``` 65 | 66 | We're going to create a module that will help us insert some `TemperatureEvents`. 67 | 68 | ```elixir 69 | defmodule Sensor do 70 | alias Ecto.Adapters.FoundationDB 71 | 72 | def record(n, tenant) do 73 | MyApp.Repo.transactional(tenant, 74 | fn -> 75 | for _ <- 1..n, do: record(nil) 76 | end) 77 | end 78 | 79 | def record(tenant) do 80 | %TemperatureEvent{ 81 | site: "surface", 82 | kelvin: 373.15 + :rand.normal(0, 5), 83 | recorded_at: NaiveDateTime.utc_now() 84 | } 85 | |> FoundationDB.usetenant(tenant) 86 | |> MyApp.Repo.insert!() 87 | end 88 | end 89 | ``` 90 | 91 | Now, we create and open a new Tenant to store our `TemperatureEvents`. 92 | 93 | ```elixir 94 | alias EctoFoundationDB.Tenant 95 | 96 | tenant = Tenant.open!(MyApp.Repo, "experiment-42c") 97 | ``` 98 | 99 | We're ready to record an event from our temperature sensor! Feel free to Reevaluate this block several times. You'll record 4 new events each time. 100 | 101 | ```elixir 102 | for _ <- 1..4, do: Sensor.record(tenant) 103 | ``` 104 | 105 | We can list all the events from the Tenant. This uses a single FoundationDB Transaction. 106 | 107 | ```elixir 108 | MyApp.Repo.all(TemperatureEvent, prefix: tenant) 109 | ``` 110 | 111 | If there's a large number of events, you can stream them instead of reading them all at once. This uses multiple FoundationDB Transactions. 112 | 113 | ```elixir 114 | MyApp.Repo.stream(TemperatureEvent, prefix: tenant) 115 | |> Enum.to_list() 116 | |> length() 117 | ``` 118 | 119 | Next, we'd like to read all events from `"surface"`. If you're executing this LiveBook in order, you'll receive an exception on this step. 120 | 121 | ```elixir 122 | import Ecto.Query 123 | 124 | query = from(e in TemperatureEvent, where: e.site == ^"surface") 125 | MyApp.Repo.all(query, prefix: tenant) 126 | ``` 127 | 128 | Did you get an exception? If so, scroll back up to the `defmodule MyApp.Repo` block in the Setup section, un-comment the line with `IndexesMigration`, and Reevaluate that block. Then come back and continue from here. You don't need to Reevaluate other blocks above this text. 129 | 130 | 👋 131 | 132 | Welcome back! You've instructed the Repo to load a migration next time we open a Tenant. But we still need to define that Migration. The block below defines two indexes. 133 | 134 | ```elixir 135 | defmodule IndexesMigration do 136 | use EctoFoundationDB.Migration 137 | 138 | @impl true 139 | def change() do 140 | [ 141 | create(index(TemperatureEvent, [:site])), 142 | create(index(TemperatureEvent, [:recorded_at])) 143 | ] 144 | end 145 | end 146 | ``` 147 | 148 | Now, we re-open the Tenant. **Something very important happens here.** 149 | 150 | This block simulates you restarting your app, and your client reconnecting. We'll just simply call `open!/2` again. 151 | 152 | ```elixir 153 | tenant = Tenant.open!(MyApp.Repo, "experiment-42c") 154 | ``` 155 | 156 | Great! If you made it to this step, then the Migration has executed automatically, and the indexes are ready to be used. 157 | 158 | ## Querying your data 159 | 160 | This next block has the same query as the one that threw an exception earlier. This time, you should retrieve the expected events. 161 | 162 | ```elixir 163 | import Ecto.Query 164 | query = from(e in TemperatureEvent, where: e.site == ^"surface") 165 | MyApp.Repo.all(query, prefix: tenant) 166 | ``` 167 | 168 | We can also use the timestamp index that we created in a new query. 169 | 170 | ```elixir 171 | now = NaiveDateTime.utc_now() 172 | past = NaiveDateTime.add(now, -1200, :second) 173 | 174 | query = 175 | from(e in TemperatureEvent, 176 | where: e.recorded_at >= ^past and e.recorded_at < ^now 177 | ) 178 | 179 | MyApp.Repo.all(query, prefix: tenant) 180 | ``` 181 | 182 | Finally, just for fun, let's insert 10,000 `TemperatureEvent`s! 183 | 184 | ```elixir 185 | num = 10000 186 | batch = 100 187 | 188 | {t, :ok} = 189 | :timer.tc(fn -> 190 | Stream.duplicate(batch, div(num, batch)) 191 | |> Task.async_stream( 192 | Sensor, 193 | :record, 194 | [tenant], 195 | max_concurrency: System.schedulers_online() * 8, 196 | ordered: false, 197 | timeout: 30000 198 | ) 199 | |> Stream.run() 200 | end) 201 | 202 | IO.puts("Done in #{t / 1000} msec") 203 | ``` 204 | 205 | ## Cleaning up 206 | 207 | And if you'd like to tidy up, you can easily delete all the data. 208 | 209 | ```elixir 210 | # Note: destructive! 211 | MyApp.Repo.delete_all(TemperatureEvent, prefix: tenant) 212 | ``` 213 | 214 | ```elixir 215 | # Note: destructive! 216 | Tenant.clear_delete!(MyApp.Repo, "experiment-42c") 217 | ``` 218 | -------------------------------------------------------------------------------- /lib/ecto_foundationdb/query_plan.ex: -------------------------------------------------------------------------------- 1 | defmodule EctoFoundationDB.QueryPlan do 2 | @moduledoc "See `Ecto.Adapters.FoundationDB`" 3 | alias EctoFoundationDB.Exception.Unsupported 4 | alias EctoFoundationDB.Layer.Fields 5 | alias EctoFoundationDB.QueryPlan.Between 6 | alias EctoFoundationDB.QueryPlan.Equal 7 | alias EctoFoundationDB.QueryPlan.None 8 | 9 | defstruct [:tenant, :source, :schema, :context, :constraints, :updates, :layer_data, :ordering] 10 | 11 | @type t() :: %__MODULE__{} 12 | 13 | defmodule None do 14 | @moduledoc false 15 | defstruct [:is_pk?] 16 | end 17 | 18 | defmodule Equal do 19 | @moduledoc false 20 | defstruct [:field, :is_pk?, :param] 21 | end 22 | 23 | defmodule Between do 24 | @moduledoc false 25 | defstruct [ 26 | :field, 27 | :is_pk?, 28 | :param_left, 29 | :param_right, 30 | :inclusive_left?, 31 | :inclusive_right? 32 | ] 33 | end 34 | 35 | def all_range(tenant, source, schema, context, id_s, id_e, options) do 36 | %__MODULE__{ 37 | tenant: tenant, 38 | source: source, 39 | schema: schema, 40 | context: context, 41 | constraints: [ 42 | %Between{ 43 | field: :_, 44 | is_pk?: true, 45 | param_left: id_s, 46 | param_right: id_e, 47 | inclusive_left?: is_nil(id_s) || Keyword.get(options, :inclusive_left?, true), 48 | inclusive_right?: is_nil(id_e) || Keyword.get(options, :inclusive_right?, false) 49 | } 50 | ], 51 | updates: [], 52 | layer_data: %{}, 53 | ordering: [] 54 | } 55 | end 56 | 57 | def get(tenant, source, schema, context, wheres, updates, params, ordering) do 58 | constraints = 59 | case walk_ast(wheres, schema, params, []) do 60 | [] -> 61 | [%None{is_pk?: true}] 62 | 63 | list -> 64 | list 65 | end 66 | 67 | %__MODULE__{ 68 | tenant: tenant, 69 | source: source, 70 | schema: schema, 71 | context: context, 72 | constraints: 73 | Enum.sort(constraints, fn 74 | %Equal{}, %Between{} -> true 75 | _, _ -> false 76 | end), 77 | updates: resolve_updates(updates, params), 78 | layer_data: %{}, 79 | ordering: ordering 80 | } 81 | end 82 | 83 | def walk_ast([], _schema, _params, constraints) do 84 | Enum.reverse(constraints) 85 | end 86 | 87 | def walk_ast( 88 | [%Ecto.Query.BooleanExpr{op: :and, expr: {:and, [], [lhs, rhs]}} | rest], 89 | schema, 90 | params, 91 | constraints 92 | ) do 93 | case get_op({lhs, rhs}, schema, params) do 94 | nil -> 95 | constraints_lhs = 96 | walk_ast([%Ecto.Query.BooleanExpr{op: :and, expr: lhs}], schema, params, []) 97 | 98 | constraints_rhs = 99 | walk_ast([%Ecto.Query.BooleanExpr{op: :and, expr: rhs}], schema, params, []) 100 | 101 | constraints = constraints_rhs ++ constraints_lhs ++ constraints 102 | walk_ast(rest, schema, params, constraints) 103 | 104 | op -> 105 | walk_ast(rest, schema, params, [op | constraints]) 106 | end 107 | end 108 | 109 | def walk_ast( 110 | wheres = [%Ecto.Query.BooleanExpr{expr: expr} | rest], 111 | schema, 112 | params, 113 | constraints 114 | ) do 115 | case get_op(expr, schema, params) do 116 | nil -> 117 | raise Unsupported, """ 118 | FoundationDB Adapter has not implemented support for your query 119 | 120 | #{inspect(wheres)}" 121 | """ 122 | 123 | op -> 124 | walk_ast(rest, schema, params, [op | constraints]) 125 | end 126 | end 127 | 128 | def walk_ast(wheres, _schema, _params, _constraints) do 129 | raise Unsupported, """ 130 | FoundationDB Adapter has not implemented support for your query 131 | 132 | #{inspect(wheres)}" 133 | """ 134 | end 135 | 136 | def get_op( 137 | {:==, [], [{{:., [], [{:&, [], [0]}, where_field]}, [], []}, where_param]}, 138 | schema, 139 | params 140 | ) do 141 | %Equal{ 142 | field: where_field, 143 | is_pk?: pk?(schema, where_field), 144 | param: get_pinned_param(params, where_param) 145 | } 146 | end 147 | 148 | def get_op( 149 | { 150 | {op_left, [], 151 | [ 152 | {{:., [], [{:&, [], [0]}, where_field_gt]}, [], []}, 153 | where_param_left 154 | ]}, 155 | {op_right, [], 156 | [ 157 | {{:., [], [{:&, [], [0]}, where_field_lt]}, [], []}, 158 | where_param_right 159 | ]} 160 | }, 161 | schema, 162 | params 163 | ) 164 | when where_field_gt == where_field_lt and 165 | (op_left == :> or op_left == :>=) and 166 | (op_right == :< or op_right == :<=) do 167 | %Between{ 168 | field: where_field_gt, 169 | is_pk?: pk?(schema, where_field_gt), 170 | param_left: get_pinned_param(params, where_param_left), 171 | param_right: get_pinned_param(params, where_param_right), 172 | inclusive_left?: op_left == :>=, 173 | inclusive_right?: op_right == :<= 174 | } 175 | end 176 | 177 | def get_op( 178 | {op, [], [{{:., [], [{:&, [], [0]}, where_field]}, [], []}, where_param]}, 179 | schema, 180 | params 181 | ) 182 | when op in ~w[> >=]a do 183 | %Between{ 184 | field: where_field, 185 | is_pk?: pk?(schema, where_field), 186 | param_left: get_pinned_param(params, where_param), 187 | param_right: nil, 188 | inclusive_left?: op == :>=, 189 | inclusive_right?: true 190 | } 191 | end 192 | 193 | def get_op( 194 | {op, [], [{{:., [], [{:&, [], [0]}, where_field]}, [], []}, where_param]}, 195 | schema, 196 | params 197 | ) 198 | when op in ~w[< <=]a do 199 | %Between{ 200 | field: where_field, 201 | is_pk?: pk?(schema, where_field), 202 | param_left: nil, 203 | param_right: get_pinned_param(params, where_param), 204 | inclusive_left?: true, 205 | inclusive_right?: op == :<= 206 | } 207 | end 208 | 209 | def get_op(_, _schema, _params) do 210 | nil 211 | end 212 | 213 | def get_pinned_param(params, {:^, [], [pos]}) do 214 | Enum.at(params, pos) 215 | end 216 | 217 | def get_pinned_param(_params, val) do 218 | val 219 | end 220 | 221 | defp resolve_updates([%Ecto.Query.QueryExpr{expr: [set: pins]}], params) do 222 | field_vals = 223 | for {field, {:^, [], [param_pos]}} <- pins do 224 | {field, Enum.at(params, param_pos)} 225 | end 226 | 227 | [set: field_vals] 228 | end 229 | 230 | defp resolve_updates([%Ecto.Query.QueryExpr{expr: [_ | _]}], _params) do 231 | raise Unsupported, """ 232 | FoundationDB Adapter does not support your update operation. 233 | """ 234 | end 235 | 236 | defp resolve_updates([], _params) do 237 | [] 238 | end 239 | 240 | defp pk?(nil, _param), do: nil 241 | 242 | defp pk?(schema, param) do 243 | Fields.get_pk_field!(schema) == param 244 | end 245 | end 246 | --------------------------------------------------------------------------------