├── .formatter.exs ├── .github ├── dependabot.yml └── workflows │ └── elixir.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE.md ├── README.md ├── lib ├── groot.ex └── groot │ ├── application.ex │ ├── clock_sync.ex │ ├── register.ex │ └── storage.ex ├── mix.exs ├── mix.lock └── test ├── groot_test.exs └── test_helper.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs"] 4 | ] 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: mix 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "10:00" 8 | open-pull-requests-limit: 10 9 | ignore: 10 | - dependency-name: ex_doc 11 | versions: 12 | - 0.24.0 13 | - 0.24.1 14 | - 0.24.2 15 | -------------------------------------------------------------------------------- /.github/workflows/elixir.yml: -------------------------------------------------------------------------------- 1 | name: Elixir CI 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | MIX_ENV: test 11 | 12 | jobs: 13 | deps: 14 | name: Install Dependencies 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | elixir: [1.11] 19 | otp: [23.3] 20 | steps: 21 | - name: checkout 22 | uses: actions/checkout@v2 23 | with: 24 | fetch-depth: 0 25 | - name: setup 26 | uses: erlef/setup-beam@v1 27 | with: 28 | elixir-version: ${{ matrix.elixir }} 29 | otp-version: ${{ matrix.otp }} 30 | - name: Retrieve Cached Dependencies 31 | uses: actions/cache@v2 32 | id: mix-cache 33 | with: 34 | path: | 35 | deps 36 | _build 37 | priv/plts 38 | key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }} 39 | - name: Install deps 40 | if: steps.mix-cache.outputs.cache-hit != 'true' 41 | run: | 42 | mkdir -p priv/plts 43 | mix local.rebar --force 44 | mix local.hex --force 45 | mix deps.get 46 | mix deps.compile 47 | mix dialyzer --plt 48 | 49 | analyze: 50 | name: Analysis 51 | needs: deps 52 | runs-on: ubuntu-latest 53 | strategy: 54 | matrix: 55 | elixir: [1.11] 56 | otp: [23.3] 57 | steps: 58 | - uses: actions/checkout@v2 59 | with: 60 | fetch-depth: 0 61 | - name: Setup elixir 62 | uses: erlef/setup-beam@v1 63 | with: 64 | elixir-version: ${{ matrix.elixir }} 65 | otp-version: ${{ matrix.otp }} 66 | - name: Retrieve Cached Dependencies 67 | uses: actions/cache@v2 68 | id: mix-cache 69 | with: 70 | path: | 71 | deps 72 | _build 73 | priv/plts 74 | key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }} 75 | - name: Run Dialyzer 76 | run: mix dialyzer --no-check --halt-exit-status 77 | 78 | tests: 79 | name: Tests 80 | needs: deps 81 | runs-on: ubuntu-latest 82 | strategy: 83 | matrix: 84 | elixir: [1.11] 85 | otp: [23.3] 86 | steps: 87 | - uses: actions/checkout@v2 88 | with: 89 | fetch-depth: 0 90 | - uses: erlef/setup-beam@v1 91 | with: 92 | elixir-version: ${{ matrix.elixir }} 93 | otp-version: ${{ matrix.otp }} 94 | - name: Retrieve Cached Dependencies 95 | uses: actions/cache@v2 96 | id: mix-cache 97 | with: 98 | path: | 99 | deps 100 | _build 101 | priv/plts 102 | key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }} 103 | - name: Start EPMD 104 | run: epmd -daemon 105 | - name: Run Tests 106 | run: mix test 107 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | groot-*.tar 24 | 25 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## 0.1.1 4 | 5 | * [a94744e](https://github.com/keathley/groot/commit/a94744e3a27049ff92f86155886b443a150c7060) Update README.md - Chris Keathley 6 | * [c1cde45](https://github.com/keathley/groot/commit/c1cde45a7088564f0377365cc202838425c86cf8) Improve more docs - Chris Keathley 7 | 8 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Christopher Keathley 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Groot 2 | 3 | I am Groot. 4 | 5 | ## Usage 6 | 7 | Groot provides a distributed KV store for ephemeral data. It utilizes LWW-register 8 | CRDTs and Hybrid-Logical clocks to ensure availability and a level 9 | of consistency. For technical information on Groot's implementation, please 10 | refer to the [docs](https://hexdocs.pm/groot). 11 | 12 | ## Installation 13 | 14 | ```elixir 15 | def deps do 16 | [ 17 | {:groot, "~> 0.1"} 18 | ] 19 | end 20 | ``` 21 | 22 | ## Usage 23 | 24 | ```elixir 25 | # Changes are propogated to other nodes. 26 | :ok = Groot.set(:key, "value") 27 | 28 | # Read existing values. "Gets" are always done from a local ETS table. 29 | "value" = Groot.get(:key) 30 | ``` 31 | 32 | `set` operations will be replicated to all connected nodes. If new nodes join, or if a node rejoins the cluster after a network partition, then the other nodes in the cluster will replicate all of their registers to the new node. 33 | 34 | ## Caveats 35 | 36 | Groot relies on distributed erlang. All of the data stored in Groot is 37 | ephemeral and is *not* maintained between node restarts. 38 | 39 | Because we're using CRDTs to propagate changes, a change made on one node may take time to spread to the other nodes. It's safe to run the same operation on multiple nodes. Groot always chooses the register with the latest HLC. 40 | 41 | Groot replicates all registers to all nodes. If you attempt to store thousands of keys in Groot, you'll probably have a bad time. 42 | 43 | ## Should I use this? 44 | 45 | If you need to store and replicate a relatively small amount of transient 46 | values, then Groot may be a good solution for you. If you need anything beyond those features, Groot is probably a bad fit. 47 | 48 | Here are some examples of good use cases: 49 | 50 | * Feature Flags - [rollout](https://github.com/keathley/rollout) is an example of this. 51 | * Runtime configuration changes 52 | * User session state 53 | * Generic caching 54 | -------------------------------------------------------------------------------- /lib/groot.ex: -------------------------------------------------------------------------------- 1 | defmodule Groot do 2 | @moduledoc """ 3 | Groot provides an eventually consistent, ephemeral KV store. It relies on 4 | distributed erlang and uses LWW-registers and Hybrid-logical clocks 5 | to ensure maximum availability. Groot utilizes ETS for efficient reading. 6 | 7 | ## Usage 8 | 9 | ```elixir 10 | # Changes are propogated to other nodes. 11 | :ok = Groot.set(:key, "value") 12 | 13 | # Read existing values 14 | "value" = Groot.get(:key) 15 | ``` 16 | 17 | Updates will replicate to all connected nodes. If a new node joins, or if a node 18 | rejoins the cluster after a network partition then the other nodes in the 19 | cluster will replicate all of their registers to the new node. 20 | 21 | ## Consistency 22 | 23 | Groot uses LWW register CRDTs for storing values. Each register includes a 24 | hybrid logical clock (HLC). Ordering of events is determined by comparing HLCs. 25 | If a network partition occurs nodes on either side of the partition will 26 | continue to accept `set` and `get` operations. Once the partition heals, all 27 | registers will be replicated to all nodes. If there are any conflicts, the 28 | register with the largest HLC will be chosen. 29 | 30 | Groot may lose writes under specific failures scenarios. For instance, if 31 | there is a network partition between 2 nodes, neither node will be able to 32 | replicate to the other. If either node crashes after accepting a write, that 33 | write will be lost. 34 | 35 | ## Data limitations 36 | 37 | Groot replicates all keys to all connected nodes. Thus there may be performance 38 | issues if you attempt to store hundreds or thousands of keys. This issue may 39 | be fixed in a future release. 40 | """ 41 | 42 | alias Groot.Storage 43 | 44 | @doc """ 45 | Gets a register's value. If the register is not found it returns `nil`. 46 | """ 47 | @spec get(term()) :: term() | nil 48 | def get(key) do 49 | Storage.get(key) 50 | end 51 | 52 | @doc """ 53 | Sets the value for a register. 54 | """ 55 | @spec set(term(), term()) :: :ok 56 | def set(key, value) do 57 | Storage.set(key, value) 58 | end 59 | end 60 | 61 | -------------------------------------------------------------------------------- /lib/groot/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Groot.Application do 2 | @moduledoc false 3 | 4 | use Application 5 | 6 | def start(_type, _args) do 7 | node_id = gen_node_id() 8 | 9 | children = [ 10 | {HLClock, name: Groot.Clock, node_id: node_id}, 11 | {Groot.ClockSync, [sync_interval: 3_000, clock: Groot.Clock]}, 12 | {Groot.Storage, []} 13 | ] 14 | 15 | opts = [strategy: :one_for_one, name: Groot.Supervisor] 16 | Supervisor.start_link(children, opts) 17 | end 18 | 19 | defp gen_node_id do 20 | 8 21 | |> :crypto.strong_rand_bytes() 22 | |> :crypto.bytes_to_integer() 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/groot/clock_sync.ex: -------------------------------------------------------------------------------- 1 | defmodule Groot.ClockSync do 2 | @moduledoc false 3 | # This module regularly sends our local HLC to a random node in our cluster. 4 | # Each node in the cluster does this periodically in order to passively 5 | # keep HLCs in close proximity to each other. This synchronization is naive 6 | # but it works fine for small cluster sizes. On large clusters it would be 7 | # better to use views similar to HyParView ensure messages converge efficiently. 8 | 9 | use GenServer 10 | 11 | alias __MODULE__ 12 | 13 | def start_link(args) do 14 | GenServer.start_link(__MODULE__, args, name: __MODULE__) 15 | end 16 | 17 | def sync_remote_clock(server, hlc) do 18 | GenServer.cast(server, {:sync_remote_clock, hlc}) 19 | end 20 | 21 | def init(args) do 22 | data = %{ 23 | sync_interval: Keyword.fetch!(args, :sync_interval), 24 | clock: Keyword.fetch!(args, :clock), 25 | } 26 | 27 | schedule_sync(data) 28 | 29 | {:ok, data} 30 | end 31 | 32 | def handle_cast({:sync_remote_clock, hlc}, data) do 33 | HLClock.recv_timestamp(data.clock, hlc) 34 | {:noreply, data} 35 | end 36 | 37 | def handle_info(:sync, data) do 38 | case Node.list() do 39 | [] -> 40 | schedule_sync(data) 41 | {:noreply, data} 42 | 43 | nodes -> 44 | node = Enum.random(nodes) 45 | {:ok, hlc} = HLClock.send_timestamp(data.clock) 46 | ClockSync.sync_remote_clock({ClockSync, node}, hlc) 47 | schedule_sync(data) 48 | {:noreply, data} 49 | end 50 | end 51 | 52 | def handle_info(_msg, data) do 53 | {:noreply, data} 54 | end 55 | 56 | defp schedule_sync(%{sync_interval: interval}) do 57 | Process.send_after(self(), :sync, interval) 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/groot/register.ex: -------------------------------------------------------------------------------- 1 | defmodule Groot.Register do 2 | @moduledoc false 3 | # LWW Register. 4 | 5 | # Creates a new register 6 | def new(key, val) do 7 | {:ok, hlc} = HLClock.send_timestamp(Groot.Clock) 8 | 9 | %{key: key, value: val, hlc: hlc} 10 | end 11 | 12 | # Updates the value and creates a new HLC for our register 13 | def update(register, val) do 14 | {:ok, hlc} = HLClock.send_timestamp(Groot.Clock) 15 | 16 | %{register | value: val, hlc: hlc} 17 | end 18 | 19 | # Finds the "latest" register by comparing HLCs 20 | def latest(reg1, reg2) do 21 | [reg1, reg2] 22 | |> Enum.reject(&is_nil/1) 23 | |> Enum.sort_by(& &1, fn a, b -> !HLClock.before?(a.hlc, b.hlc) end) 24 | |> Enum.at(0) 25 | end 26 | end 27 | 28 | -------------------------------------------------------------------------------- /lib/groot/storage.ex: -------------------------------------------------------------------------------- 1 | defmodule Groot.Storage do 2 | @moduledoc false 3 | # This module provides a genserver for maintaining registers. It monitors 4 | # node connects in order to propagate existing registers. 5 | # TODO: This genserver is a bottleneck on the system. We should really try 6 | # to resolve this and move more work into the calling process in the future. 7 | 8 | use GenServer 9 | 10 | alias Groot.Register 11 | 12 | def start_link(args) do 13 | GenServer.start_link(__MODULE__, args, name: __MODULE__) 14 | end 15 | 16 | # Lookup the value for the key in ets. Return nil otherwise 17 | def get(key) do 18 | case :ets.lookup(__MODULE__, key) do 19 | [] -> 20 | nil 21 | 22 | [{^key, value}] -> 23 | value 24 | end 25 | end 26 | 27 | # The main api for setting a keys value 28 | def set(key, value) do 29 | GenServer.call(__MODULE__, {:set, key, value}) 30 | end 31 | 32 | # Deletes all keys in the currently connected cluster. This is only 33 | # intended to be used in development and test 34 | def delete_all() do 35 | GenServer.multi_call(__MODULE__, :delete_all) 36 | end 37 | 38 | def init(_args) do 39 | :net_kernel.monitor_nodes(true) 40 | tab = __MODULE__ = :ets.new(__MODULE__, [:named_table, :set, :protected]) 41 | registers = %{} 42 | schedule_sync_timeout() 43 | 44 | {:ok, %{table: tab, registers: registers}} 45 | end 46 | 47 | def handle_call({:set, key, value}, _from, data) do 48 | registers = Map.update(data.registers, key, Register.new(key, value), fn reg -> 49 | Register.update(reg, value) 50 | end) 51 | :ets.insert(data.table, {key, registers[key].value}) 52 | GenServer.abcast(__MODULE__, {:update_register, registers[key]}) 53 | 54 | {:reply, :ok, %{data | registers: registers}} 55 | end 56 | 57 | def handle_call(:delete_all, _from, data) do 58 | registers = %{} 59 | :ets.delete_all_objects(data.table) 60 | 61 | {:reply, :ok, %{data | registers: registers}} 62 | end 63 | 64 | def handle_cast({:update_register, reg}, data) do 65 | registers = Map.update(data.registers, reg.key, reg, fn existing_reg -> 66 | Register.latest(reg, existing_reg) 67 | end) 68 | :ets.insert(data.table, {reg.key, registers[reg.key].value}) 69 | {:noreply, %{data | registers: registers}} 70 | end 71 | 72 | def handle_cast({:update_registers, registers}, data) do 73 | new_registers = merge(data.registers, registers) 74 | 75 | for {key, reg} <- new_registers do 76 | :ets.insert(data.table, {key, reg.value}) 77 | end 78 | 79 | {:noreply, %{data | registers: new_registers}} 80 | end 81 | 82 | def handle_info(msg, data) do 83 | case msg do 84 | {:nodeup, node} -> 85 | GenServer.cast({__MODULE__, node}, {:update_registers, data.registers}) 86 | {:noreply, data} 87 | 88 | :sync_timeout -> 89 | GenServer.abcast(__MODULE__, {:update_registers, data.registers}) 90 | schedule_sync_timeout() 91 | {:noreply, data} 92 | 93 | _msg -> 94 | {:noreply, data} 95 | end 96 | end 97 | 98 | defp schedule_sync_timeout do 99 | # Wait between 10 and 20 seconds before doing another sync 100 | next_timeout = (:rand.uniform(10) * 1000) + 10_000 101 | Process.send_after(self(), :sync_timeout, next_timeout) 102 | end 103 | 104 | defp merge(r1, r2) do 105 | keys = 106 | [Map.keys(r1), Map.keys(r2)] 107 | |> List.flatten() 108 | |> Enum.uniq 109 | 110 | keys 111 | |> Enum.map(fn key -> {key, Register.latest(r1[key], r2[key])} end) 112 | |> Enum.into(%{}) 113 | end 114 | end 115 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Groot.MixProject do 2 | use Mix.Project 3 | 4 | @version "0.1.2" 5 | 6 | def project do 7 | [ 8 | app: :groot, 9 | version: @version, 10 | elixir: "~> 1.9", 11 | start_permanent: Mix.env() == :prod, 12 | deps: deps(), 13 | aliases: aliases(), 14 | 15 | description: description(), 16 | package: package(), 17 | name: "Groot", 18 | source_url: "https://github.com/keathley/groot", 19 | docs: docs(), 20 | ] 21 | end 22 | 23 | # Run "mix help compile.app" to learn about applications. 24 | def application do 25 | [ 26 | extra_applications: [:logger], 27 | mod: {Groot.Application, []} 28 | ] 29 | end 30 | 31 | # Run "mix help deps" to learn about dependencies. 32 | defp deps do 33 | [ 34 | {:hlclock, "~> 1.0"}, 35 | 36 | {:dialyxir, "~> 1.1", only: [:dev, :test], runtime: false}, 37 | {:local_cluster, "~> 1.0", only: [:dev, :test]}, 38 | {:schism, "~> 1.0", only: [:dev, :test]}, 39 | {:ex_doc, ">= 0.0.0", only: :dev}, 40 | ] 41 | end 42 | 43 | def aliases do 44 | [ 45 | test: ["test --no-start"] 46 | ] 47 | end 48 | 49 | def description do 50 | """ 51 | Groot is a distributed KV store built on distributed erlang, LWW Register 52 | CRDTS, and Hybrid Logical Clocks. 53 | """ 54 | end 55 | 56 | def package do 57 | [ 58 | name: "groot", 59 | licenses: ["MIT"], 60 | links: %{"GitHub" => "https://github.com/keathley/groot"}, 61 | ] 62 | end 63 | 64 | def docs do 65 | [ 66 | source_ref: "v#{@version}", 67 | source_url: "https://github.com/keathley/groot", 68 | main: "Groot", 69 | ] 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "dialyxir": {:hex, :dialyxir, "1.1.0", "c5aab0d6e71e5522e77beff7ba9e08f8e02bad90dfbeffae60eaf0cb47e29488", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "07ea8e49c45f15264ebe6d5b93799d4dd56a44036cf42d0ad9c960bc266c0b9a"}, 3 | "earmark": {:hex, :earmark, "1.4.4", "4821b8d05cda507189d51f2caeef370cf1e18ca5d7dfb7d31e9cafe6688106a4", [:mix], [], "hexpm", "1f93aba7340574847c0f609da787f0d79efcab51b044bb6e242cae5aca9d264d"}, 4 | "earmark_parser": {:hex, :earmark_parser, "1.4.29", "149d50dcb3a93d9f3d6f3ecf18c918fb5a2d3c001b5d3305c926cddfbd33355b", [:mix], [], "hexpm", "4902af1b3eb139016aed210888748db8070b8125c2342ce3dcae4f38dcc63503"}, 5 | "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, 6 | "ex_doc": {:hex, :ex_doc, "0.29.1", "b1c652fa5f92ee9cf15c75271168027f92039b3877094290a75abcaac82a9f77", [:mix], [{:earmark_parser, "~> 1.4.19", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "b7745fa6374a36daf484e2a2012274950e084815b936b1319aeebcf7809574f6"}, 7 | "global_flags": {:hex, :global_flags, "1.0.0", "ee6b864979a1fb38d1fbc67838565644baf632212bce864adca21042df036433", [:rebar3], [], "hexpm", "85d944cecd0f8f96b20ce70b5b16ebccedfcd25e744376b131e89ce61ba93176"}, 8 | "hlclock": {:hex, :hlclock, "1.0.0", "7a72fc7a20a9382499216227edf97a8b118e21fc3fcad0e81b8d10c616ce1431", [:mix], [], "hexpm", "d3f994336a7fcbc68bf08b14b2101b61e57bef82c032a6e05c1cdc753612c941"}, 9 | "local_cluster": {:hex, :local_cluster, "1.2.1", "8eab3b8a387680f0872eacfb1a8bd5a91cb1d4d61256eec6a655b07ac7030c73", [:mix], [{:global_flags, "~> 1.0", [hex: :global_flags, repo: "hexpm", optional: false]}], "hexpm", "aae80c9bc92c911cb0be085fdeea2a9f5b88f81b6bec2ff1fec244bb0acc232c"}, 10 | "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, 11 | "makeup_elixir": {:hex, :makeup_elixir, "0.16.0", "f8c570a0d33f8039513fbccaf7108c5d750f47d8defd44088371191b76492b0b", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "28b2cbdc13960a46ae9a8858c4bebdec3c9a6d7b4b9e7f4ed1502f8159f338e7"}, 12 | "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, 13 | "nimble_parsec": {:hex, :nimble_parsec, "1.2.3", "244836e6e3f1200c7f30cb56733fd808744eca61fd182f731eac4af635cc6d0b", [:mix], [], "hexpm", "c8d789e39b9131acf7b99291e93dae60ab48ef14a7ee9d58c6964f59efb570b0"}, 14 | "schism": {:hex, :schism, "1.0.1", "b700883b4023b06faa5ab4add3aba5706877feb0a3dcfe8127b5dfeefe2513a5", [:mix], [], "hexpm", "23080d2e0b4490eb2e207c8fee71d34bc0e58cc4f0f6879ca06b8fabe0c531ca"}, 15 | } 16 | -------------------------------------------------------------------------------- /test/groot_test.exs: -------------------------------------------------------------------------------- 1 | defmodule GrootTest do 2 | use ExUnit.Case 3 | doctest Groot 4 | 5 | setup_all do 6 | Application.ensure_all_started(:groot) 7 | nodes = LocalCluster.start_nodes("groot", 2) 8 | 9 | {:ok, nodes: nodes} 10 | end 11 | 12 | setup do 13 | Groot.Storage.delete_all() 14 | 15 | :ok 16 | end 17 | 18 | test "registers are replicated to connected nodes", %{nodes: nodes} do 19 | [n1, n2] = nodes 20 | 21 | Groot.set(:key, "value") 22 | 23 | eventually(fn -> 24 | assert :rpc.call(n1, Groot, :get, [:key]) == "value" 25 | assert :rpc.call(n2, Groot, :get, [:key]) == "value" 26 | end) 27 | end 28 | 29 | test "disconnected nodes are caught up when they reconnect", %{nodes: nodes} do 30 | [n1, n2] = nodes 31 | 32 | Schism.partition([n1]) 33 | 34 | :rpc.call(n2, Groot, :set, [:key, "value"]) 35 | 36 | eventually(fn -> 37 | assert Groot.get(:key) == "value" 38 | assert :rpc.call(n1, Groot, :get, [:key]) == nil 39 | assert :rpc.call(n2, Groot, :get, [:key]) == "value" 40 | end) 41 | 42 | Schism.heal([n1, n2]) 43 | 44 | eventually(fn -> 45 | assert Groot.get(:key) == "value" 46 | assert :rpc.call(n1, Groot, :get, [:key]) == "value" 47 | assert :rpc.call(n2, Groot, :get, [:key]) == "value" 48 | end) 49 | end 50 | 51 | test "sending a register from the past is discarded", %{nodes: nodes} do 52 | [n1, n2] = nodes 53 | 54 | Schism.partition([n1]) 55 | 56 | :rpc.call(n2, Groot, :set, [:key, "first"]) 57 | 58 | eventually(fn -> 59 | assert Groot.get(:key) == "first" 60 | assert :rpc.call(n1, Groot, :get, [:key]) == nil 61 | assert :rpc.call(n2, Groot, :get, [:key]) == "first" 62 | end) 63 | 64 | :rpc.call(n1, Groot, :set, [:key, "second"]) 65 | 66 | eventually(fn -> 67 | assert Groot.get(:key) == "second" 68 | assert :rpc.call(n1, Groot, :get, [:key]) == "second" 69 | assert :rpc.call(n2, Groot, :get, [:key]) == "first" 70 | end) 71 | 72 | Schism.heal([n1, n2]) 73 | 74 | eventually(fn -> 75 | assert Groot.get(:key) == "second" 76 | assert :rpc.call(n1, Groot, :get, [:key]) == "second" 77 | assert :rpc.call(n2, Groot, :get, [:key]) == "second" 78 | end) 79 | end 80 | 81 | @tag :skip 82 | test "crashing processes does not result in lost data" do 83 | flunk "Not Implemented" 84 | end 85 | 86 | def eventually(f, retries \\ 0) do 87 | f.() 88 | rescue 89 | err -> 90 | if retries >= 10 do 91 | reraise err, __STACKTRACE__ 92 | else 93 | :timer.sleep(500) 94 | eventually(f, retries + 1) 95 | end 96 | catch 97 | _exit, _term -> 98 | :timer.sleep(500) 99 | eventually(f, retries + 1) 100 | end 101 | end 102 | 103 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | LocalCluster.start() 2 | ExUnit.start() 3 | --------------------------------------------------------------------------------