├── .formatter.exs ├── .github └── workflows │ └── test.yml ├── .gitignore ├── LICENSE ├── README.md ├── VERSION ├── config └── config.exs ├── example ├── .formatter.exs ├── .gitignore ├── README.md ├── lib │ ├── example.ex │ └── example │ │ ├── application.ex │ │ ├── swarm.ex │ │ └── worker.ex ├── mix.exs ├── mix.lock ├── start.sh └── test │ ├── example_test.exs │ └── test_helper.exs ├── lib ├── sworm.ex └── sworm │ ├── application.ex │ ├── delegate.ex │ ├── directory_manager.ex │ ├── macro.ex │ ├── main.ex │ ├── manager.ex │ ├── supervisor.ex │ └── util.ex ├── mix.exs ├── mix.lock └── test ├── cluster_handoff_test.exs ├── cluster_test.exs ├── support ├── helpers.ex ├── sworm_case.ex └── test_handoff_sworm.ex ├── sworm ├── directory_manager_test.exs ├── error_test.exs ├── macro_test.exs └── shutdown_test.exs ├── sworm_test.exs └── test_helper.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Elixir CI 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | pull_request: 7 | branches: ["main"] 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | name: OTP ${{matrix.otp}} / Elixir ${{matrix.elixir}} 15 | env: 16 | MIX_ENV: test 17 | strategy: 18 | matrix: 19 | otp: ["24.0"] 20 | elixir: ["1.14"] 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v3 24 | - uses: erlef/setup-elixir@v1 25 | with: 26 | otp-version: ${{matrix.otp}} 27 | elixir-version: ${{matrix.elixir}} 28 | - name: Restore dependencies cache 29 | uses: actions/cache@v3 30 | with: 31 | path: deps 32 | key: ${{ runner.os }}-mix-${{ hashFiles('**/mix.lock') }} 33 | restore-keys: ${{ runner.os }}-mix- 34 | - name: Install dependencies 35 | run: mix deps.get 36 | - name: Run tests 37 | run: mix test 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | sworm-*.tar 24 | 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2019 Arjan Scherpenisse 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sworm 2 | 3 | [![Build status](https://github.com/arjan/sworm/actions/workflows/test.yml/badge.svg)](https://github.com/arjan/sworm/actions/workflows/test.yml) 4 | [![Hex pm](https://img.shields.io/hexpm/v/sworm.svg?style=flat)](https://hex.pm/packages/sworm) 5 | [![Hex Docs](https://img.shields.io/badge/hex-docs-lightgreen.svg)](https://hexdocs.pm/sworm/) 6 | [![Total Download](https://img.shields.io/hexpm/dt/sworm.svg)](https://hex.pm/packages/sworm) 7 | [![License](https://img.shields.io/hexpm/l/sworm.svg)](https://github.com/arjan/sworm/blob/master/LICENSE) 8 | [![Last Updated](https://img.shields.io/github/last-commit/arjan/sworm.svg)](https://github.com/arjan/sworm/commits/master) 9 | 10 | A combination of a global, distributed process registry and 11 | supervisor, rolled into one, friendly API. 12 | 13 | This library aims to be a drop-in replacement for 14 | [Swarm](https://github.com/bitwalker/swarm), but it is built on top of 15 | [Horde](https://github.com/derekkraan/horde). 16 | 17 | ## Usage 18 | 19 | Sworms can be defined using a macro and then added to your supervision 20 | tree. To replicate Swarm, create the following module: 21 | 22 | ```elixir 23 | defmodule Swarm do 24 | use Sworm 25 | end 26 | ``` 27 | 28 | You are not entirely done yet! Unlike the original Swarm, which has a 29 | "singleton" process tree, you will need to add each `Sworm` to your 30 | own application's supervision tree: 31 | 32 | ```elixir 33 | children = [ 34 | Swarm, 35 | ... 36 | ] 37 | ``` 38 | 39 | Now you can call `Swarm.registered()`, `Swarm.register_name` etc like you're used to. 40 | 41 | ## Architecture 42 | 43 | Sworm combines Horde's DynamicSupervisor and Registry modules to 44 | reproduce the Swarm library. 45 | 46 | To be able to register an aribtrary `{m, f, a}` specification with 47 | Sworm, it spawns a _delegate process_ and uses this process as the 48 | primary process for name registration and supervision. This delegate 49 | process then spawns and links the actual process as specified in the 50 | MFA. 51 | 52 | This way, any MFA can be used with Sworm like it can with Swarm, and 53 | does not need to be aware of it, because the delegate process handles 54 | name registration, process shutdown on name conflicts, and process handoff. 55 | 56 | ## Node affinity / node deny-/allowlisting 57 | 58 | Contrarily to Swarm, Sworm does not have a deny- or allowlisting 59 | mechanism. By design, each Sworm in the cluster only distributes 60 | processes among those nodes that explicitly have that particular sworm 61 | started in its supervision tree. 62 | 63 | Sworm maintains a cluster-global directory CRDT of registered Sworms, 64 | keeping track of on which node which type(s) of Sworm run. 65 | 66 | This ensures that processes are only started through Sworm on nodes 67 | that the sworm itself is also running on, instead of assuming that the 68 | cluster is homogenous and processes can run on each node, like Swarm 69 | does. 70 | 71 | For an even more finegrained control over process placement, you can pass in a 72 | custom [`:distribution_strategy`][dist] option on compile time, like this: 73 | 74 | ```elixir 75 | defmodule MyTemporaryProcesses do 76 | use Sworm, distribution_strategy: Horde.UniformQuorumDistribution 77 | end 78 | ``` 79 | 80 | The default distribution strategy is [Horde.UniformDistribution][dist_horde]. 81 | 82 | [dist]: https://hexdocs.pm/horde/Horde.DistributionStrategy.html#t:t/0 83 | [dist_horde]: https://hexdocs.pm/horde/Horde.UniformDistribution.html 84 | 85 | ## Child restart strategy 86 | 87 | By default, the restart strategy in the [child 88 | specification][childspec] of the supervision tree is set to 89 | `:transient`. To change this, declare the `restart:` option in your 90 | Sworm module like this: 91 | 92 | ```elixir 93 | defmodule MyTemporaryProcesses do 94 | use Sworm, restart: :temporary 95 | end 96 | ``` 97 | 98 | [childspec]: https://hexdocs.pm/elixir/Supervisor.html#module-child-specification 99 | 100 | ## CRDT options 101 | 102 | To override Horde's [`:delta_crdt_options`][crdt], pass them in the `use` statement: 103 | 104 | ```elixir 105 | defmodule MyTemporaryProcesses do 106 | use Sworm, delta_crdt_options: [sync_interval: 100] 107 | end 108 | ``` 109 | 110 | These CRDT options are used for both the internal Registry and the DynamicSupervisor CRDTs. 111 | 112 | [crdt]: https://hexdocs.pm/delta_crdt/0.6.4/DeltaCrdt.html#t:crdt_option/0 113 | 114 | ## Process state handoff 115 | 116 | Each individual Sworm can be configured to perform state a handoff to 117 | transition the state of the process. 118 | 119 | The case here is that when a node shuts down, Sworm will move the 120 | processes running on that node onto one of the other nodes of the 121 | cluster. By default, these processes are started with a clean sheet, 122 | e.g., the state of the process is lost. But when the Sworm is 123 | configured to perform process handoffs, the processes in the sworm are 124 | given some time to hand off their state into the cluster, so that the 125 | state can be restored right after the process is started again on 126 | another node. 127 | 128 | > Process handoff in Sworm works differently from the Swarm library. 129 | 130 | Process handoff must be explicitly enabled per sworm: 131 | 132 | ```elixir 133 | defmodule MyProcesses do 134 | use Sworm, handoff: true 135 | end 136 | ``` 137 | 138 | Or, in `config.exs`: 139 | 140 | ```elixir 141 | config :sworm, MyProcesses, handoff: true 142 | ``` 143 | 144 | When a handoff occurs, the process that is about to exit, receives the 145 | following message: 146 | 147 | {MyProcesses, :begin_handoff, delegate, ref} 148 | 149 | If it wants to pass on its internal state it needs to send the 150 | delegate a corresponding ack: 151 | 152 | send(delegate, {ref, :handoff_state, some_state}) 153 | 154 | Now, on the other node, the new process will be started in the normal 155 | way, however, right after it is started it will receive the 156 | `:end_handoff` signal: 157 | 158 | {MyProcesses, :end_handoff, some_state} 159 | 160 | It can then restore its state to the state that was sent by its 161 | predecessor. 162 | 163 | The most basic implementation in a genserver process of this flow is this: 164 | 165 | ```elixir 166 | def handle_info({MyProcesses, :begin_handoff, delegate, ref}, state) do 167 | send(delegate, {ref, :handoff_state, state}) 168 | {:noreply, state} 169 | end 170 | 171 | def handle_info({MyProcesses, :end_handoff, state}, _state) do 172 | {:noreply, state} 173 | end 174 | ``` 175 | 176 | ## Installation 177 | 178 | If [available in Hex](https://hex.pm/docs/publish), the package can be installed 179 | by adding `sworm` to your list of dependencies in `mix.exs`: 180 | 181 | ```elixir 182 | def deps do 183 | [ 184 | {:sworm, "~> 0.1"} 185 | ] 186 | end 187 | ``` 188 | 189 | Documentation can be generated with [ExDoc](https://github.com/elixir-lang/ex_doc) 190 | and published on [HexDocs](https://hexdocs.pm). Once published, the docs can 191 | be found at [https://hexdocs.pm/sworm](https://hexdocs.pm/sworm). 192 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.5.13 -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | if Mix.env() == :test do 4 | config :logger, level: :warn 5 | 6 | # config :logger, 7 | # format: "$message\n", 8 | # level: String.to_atom(System.get_env("LOG_LEVEL") || "info"), 9 | # handle_otp_reports: true, 10 | # handle_sasl_reports: true 11 | end 12 | -------------------------------------------------------------------------------- /example/.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /example/.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | example-*.tar 24 | 25 | -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | # Sworm Example 2 | 3 | This example creates an `Example.Swarm` and adds it to the applications' supervision tree. 4 | 5 | From now on, you can call `Swarm` functions as usual on this 6 | 7 | ``` 8 | iex> Example.Swarm.register_name("foo", Example.Worker, :start_link, []) 9 | {:ok, #PID<0.603.0>} 10 | iex> Example.Swarm.register_name("bar", Example.Worker, :start_link, []) 11 | {:ok, #PID<17229.814.0>} 12 | iex> Example.Swarm.registered 13 | [ 14 | {"foo", #PID<0.603.0>}, 15 | {"bar", #PID<17229.814.0>>} 16 | ] 17 | ``` 18 | 19 | ## Running in a cluster 20 | 21 | The `start.sh` script lets you start a test cluster 22 | 23 | `./start.sh a` -> starts `a@127.0.0.1` node, etc. 24 | 25 | You still need to manually connect the nodes using something like `Node.ping :"b@127.0.0.1"` 26 | 27 | The Example.Swarm automatically distributes itself over the cluster, 28 | so when using `register_name` you see the processes on all nodes, and 29 | you see local and remote processes mixed when calling 30 | `Example.Swarm.registered()`. 31 | -------------------------------------------------------------------------------- /example/lib/example.ex: -------------------------------------------------------------------------------- 1 | defmodule Example do 2 | defmodule TestServer do 3 | require Logger 4 | 5 | use GenServer 6 | 7 | def start_link() do 8 | GenServer.start_link(__MODULE__, []) 9 | end 10 | 11 | def init(a) do 12 | {:ok, a} 13 | end 14 | 15 | def handle_info({:begin_handoff, delegate, ref}, state) do 16 | Logger.info("begin handoff") 17 | 18 | send(delegate, {ref, :handoff_state, "asdf"}) 19 | {:noreply, state} 20 | end 21 | end 22 | 23 | def process(name) do 24 | Example.Swarm.register_name(name, TestServer, :start_link, []) 25 | end 26 | 27 | defmodule Counter do 28 | require Logger 29 | 30 | use GenServer 31 | 32 | def start_link() do 33 | GenServer.start_link(__MODULE__, []) 34 | end 35 | 36 | def init(a) do 37 | :timer.send_interval(1000, :count) 38 | {:ok, 0} 39 | end 40 | 41 | def handle_info({Example.Swarm, :begin_handoff, delegate, ref}, state) do 42 | IO.puts("Begin handoff: #{state}") 43 | send(delegate, {ref, :handoff_state, state}) 44 | {:noreply, state} 45 | end 46 | 47 | def handle_info({Example.Swarm, :end_handoff, state}, _state) do 48 | IO.puts("End handoff: #{state}") 49 | {:noreply, state} 50 | end 51 | 52 | def handle_info(:count, state) do 53 | IO.puts("Count: #{state}") 54 | {:noreply, state + 1} 55 | end 56 | end 57 | 58 | def counter(name) do 59 | Example.Swarm.register_name(name, Counter, :start_link, []) 60 | end 61 | 62 | def c do 63 | counter(:a) 64 | end 65 | 66 | def many do 67 | for n <- 1..10, do: counter(n) 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /example/lib/example/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Example.Application do 2 | # See https://hexdocs.pm/elixir/Application.html 3 | # for more information on OTP Applications 4 | @moduledoc false 5 | 6 | use Application 7 | 8 | def start(_type, _args) do 9 | # List all child processes to be supervised 10 | children = [ 11 | Example.Swarm 12 | ] 13 | 14 | spawn(fn -> 15 | Process.sleep(2000) 16 | Node.ping(:"a@127.0.0.1") 17 | Node.ping(:"b@127.0.0.1") 18 | Node.ping(:"c@127.0.0.1") 19 | end) 20 | 21 | # See https://hexdocs.pm/elixir/Supervisor.html 22 | # for other strategies and supported options 23 | opts = [strategy: :one_for_one, name: Example.Supervisor] 24 | Supervisor.start_link(children, opts) 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /example/lib/example/swarm.ex: -------------------------------------------------------------------------------- 1 | defmodule Example.Swarm do 2 | use Sworm, handoff: true, restart: :temporary 3 | end 4 | -------------------------------------------------------------------------------- /example/lib/example/worker.ex: -------------------------------------------------------------------------------- 1 | defmodule Example.Worker do 2 | use GenServer 3 | 4 | def mass_start(name, n \\ 10) do 5 | for _ <- 1..n do 6 | Task.async(fn -> 7 | Example.Swarm.register_name(name, __MODULE__, :start_link, []) 8 | end) 9 | end 10 | |> Task.await_many() 11 | end 12 | 13 | def start_link() do 14 | GenServer.start_link(__MODULE__, []) 15 | end 16 | 17 | def handle_call(:state, _from, state) do 18 | {:reply, state, state} 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /example/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Example.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :example, 7 | version: "0.1.0", 8 | elixir: "~> 1.8", 9 | start_permanent: Mix.env() == :prod, 10 | deps: deps() 11 | ] 12 | end 13 | 14 | # Run "mix help compile.app" to learn about applications. 15 | def application do 16 | [ 17 | extra_applications: [:logger], 18 | mod: {Example.Application, []} 19 | ] 20 | end 21 | 22 | # Run "mix help deps" to learn about dependencies. 23 | defp deps do 24 | [ 25 | {:sworm, path: ".."} 26 | ] 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /example/mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "delta_crdt": {:hex, :delta_crdt, "0.6.4", "79d235eef82a58bb0cb668bc5b9558d2e65325ccb46b74045f20b36fd41671da", [:mix], [{:merkle_map, "~> 0.2.0", [hex: :merkle_map, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "4a81f579c06aeeb625db54c6c109859a38aa00d837e3e7f8ac27b40cea34885a"}, 3 | "horde": {:hex, :horde, "0.8.7", "e51ab8e0e5bc7dcd0caa85d84b144cccfde97994bd865d822c7e489746b87e7f", [:mix], [{:delta_crdt, "~> 0.6.2", [hex: :delta_crdt, repo: "hexpm", optional: false]}, {:libring, "~> 1.4", [hex: :libring, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 0.5.0 or ~> 1.0", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "835aede887d777542f85e0a88293c18113abcc1356006050ec216da16aa5e0e3"}, 4 | "libring": {:hex, :libring, "1.6.0", "d5dca4bcb1765f862ab59f175b403e356dec493f565670e0bacc4b35e109ce0d", [:mix], [], "hexpm", "5e91ece396af4bce99953d49ee0b02f698cd38326d93cd068361038167484319"}, 5 | "merkle_map": {:hex, :merkle_map, "0.2.1", "01a88c87a6b9fb594c67c17ebaf047ee55ffa34e74297aa583ed87148006c4c8", [:mix], [], "hexpm", "fed4d143a5c8166eee4fa2b49564f3c4eace9cb252f0a82c1613bba905b2d04d"}, 6 | "murmur": {:hex, :murmur, "1.0.1", "a6e6bced2dd0d666090a9cf3e73699f3b9176bbcf32d35b0f022f137667608e3", [:mix], [], "hexpm"}, 7 | "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, 8 | "telemetry_poller": {:hex, :telemetry_poller, "1.0.0", "db91bb424e07f2bb6e73926fcafbfcbcb295f0193e0a00e825e589a0a47e8453", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b3a24eafd66c3f42da30fc3ca7dda1e9d546c12250a2d60d7b81d264fbec4f6e"}, 9 | "xxhash": {:hex, :xxhash, "0.2.1", "ab0893a8124f3c11116c57e500485dc5f67817d1d4c44f0fff41f3fd3c590607", [:mix], [], "hexpm"}, 10 | } 11 | -------------------------------------------------------------------------------- /example/start.sh: -------------------------------------------------------------------------------- 1 | NODE=$1 2 | iex --cookie cookiexyz --name $NODE@127.0.0.1 -S mix 3 | -------------------------------------------------------------------------------- /example/test/example_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ExampleTest do 2 | use ExUnit.Case 3 | doctest Example 4 | 5 | test "greets the world" do 6 | assert Example.hello() == :world 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /example/test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | -------------------------------------------------------------------------------- /lib/sworm.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm do 2 | @moduledoc """ 3 | Sworm takes the accessible API from 4 | [Swarm](https://github.com/bitwalker/swarm), and combines it with 5 | the robustness of [Horde](https://github.com/derekkraan/horde). 6 | 7 | It strives to be a combination of a global, distributed process 8 | registry and supervisor, accessible through a friendly API. 9 | 10 | ## Usage 11 | 12 | The concept behind Sworm is that there can be multiple, distinct 13 | "sworms" living inside a cluster of BEAM nodes. To define a Sworm, 14 | you define a module like this: 15 | 16 | defmodule MyProcesses do 17 | use Sworm 18 | end 19 | 20 | Now, the `MyProcesses` module must be added to your application's supervison tree. 21 | 22 | When you now start the application, you can use the functions from 23 | the `Sworm` module inside your `MyProcesses` module: 24 | 25 | {:ok, pid} = MyProcesses.register_name("my worker", MyWorker, :start_link, [arg1, arg2]) 26 | 27 | 28 | """ 29 | 30 | alias Sworm.Main 31 | 32 | @doc """ 33 | Create a child specification for adding a new Sworm to the supervisor tree. 34 | """ 35 | @spec child_spec(sworm :: atom(), opts :: [term()]) :: Supervisor.child_spec() 36 | defdelegate child_spec(sworm, opts \\ []), to: Main 37 | 38 | @doc """ 39 | Start and link a Sworm in a standalone fashion. 40 | 41 | > You almost will never need this function, as it is more usual to 42 | > start a Sworm directly in a supervisor tree, using the provided 43 | > child_spec function. 44 | """ 45 | @spec start_link(sworm :: atom(), opts :: [term()]) :: {:ok, pid()} 46 | defdelegate start_link(sworm, opts \\ []), to: Main 47 | 48 | @doc """ 49 | Register a name in the given Sworm. This function takes a 50 | module/function/args triplet, and starts the process, registers the 51 | pid with the given name, and handles cluster topology changes by 52 | restarting the process on its new node using the given MFA. 53 | 54 | Processes that are started this way are added to the Sworm's dynamic 55 | Horde supervisor, distributed over the members of the Horde 56 | according to its cluster strategy, and restarted when they crash. 57 | 58 | When the node on which the process is spawned exits, the processes 59 | are restarted on one of the other nodes in the cluster. 60 | """ 61 | @spec register_name( 62 | sworm :: atom(), 63 | name :: term(), 64 | module :: atom(), 65 | function :: atom(), 66 | args :: [term] 67 | ) :: {:ok, pid} | {:error, term} 68 | defdelegate register_name(sworm, name, m, f, a), to: Main 69 | 70 | @doc """ 71 | Registers the given name to the given process. Names 72 | registered this way will not be shifted when the cluster 73 | topology changes, and are not restarted by Sworm. 74 | 75 | If no pid is given, `self()` is used for the registration. 76 | """ 77 | @spec register_name(sworm :: atom(), name :: term(), pid :: pid()) :: :yes | :no 78 | defdelegate register_name(sworm, name, pid \\ self()), to: Main 79 | 80 | @doc """ 81 | Either finds the named process in the sworm or registers it using 82 | the ``register/4`` function. 83 | """ 84 | @spec whereis_or_register_name( 85 | sworm :: atom(), 86 | name :: term(), 87 | module :: atom(), 88 | function :: atom(), 89 | args :: [term] 90 | ) :: {:ok, pid()} | {:error, term()} 91 | defdelegate whereis_or_register_name(sworm, name, m, f, a), to: Main 92 | 93 | @doc """ 94 | Unregisters the given name from the sworm. 95 | """ 96 | @spec unregister_name(sworm :: atom(), name :: term()) :: :ok 97 | defdelegate unregister_name(sworm, name), to: Main 98 | 99 | @doc """ 100 | Get the pid of a registered name within a sworm. 101 | """ 102 | @spec whereis_name(sworm :: atom(), name :: term()) :: pid() | nil 103 | defdelegate whereis_name(sworm, name), to: Main 104 | 105 | @doc """ 106 | Gets a list of all registered names and their pids within a sworm 107 | """ 108 | @spec registered(sworm :: atom()) :: [{name :: term(), pid()}] 109 | defdelegate registered(sworm), to: Main 110 | 111 | @doc """ 112 | Joins a process to a group. 113 | 114 | Returns an error when the given process is not part of the sworm. 115 | """ 116 | @spec join(sworm :: atom(), term(), pid()) :: :ok | {:error, :not_found} 117 | defdelegate join(sworm, group, pid \\ self()), to: Main 118 | 119 | @doc """ 120 | Removes a process from a group 121 | 122 | Returns an error when the given process is not part of the sworm. 123 | """ 124 | @spec leave(sworm :: atom(), term(), pid()) :: :ok | {:error, :not_found} 125 | defdelegate leave(sworm, group, pid \\ self()), to: Main 126 | 127 | @doc """ 128 | Gets all the members of a group within the sworm. 129 | 130 | Returns a list of pids. 131 | """ 132 | @spec members(sworm :: atom(), term()) :: [pid()] 133 | defdelegate members(sworm, group), to: Main 134 | 135 | defmacro __using__(opts), do: Sworm.Macro.using(opts) 136 | end 137 | -------------------------------------------------------------------------------- /lib/sworm/application.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Application do 2 | @moduledoc false 3 | 4 | use Application 5 | 6 | @env Mix.env() 7 | 8 | def start(_type, _args) do 9 | opts = [strategy: :one_for_one, name: Sworm.Supervisor] 10 | Supervisor.start_link(children(@env), opts) 11 | end 12 | 13 | if @env == :test do 14 | defp children(:test) do 15 | children(:prod) ++ [HandoffSworm] 16 | end 17 | end 18 | 19 | defp children(_) do 20 | [ 21 | {Horde.Registry, name: Sworm.Directory, keys: :unique}, 22 | Sworm.DirectoryManager 23 | ] 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /lib/sworm/delegate.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Delegate do 2 | @moduledoc false 3 | 4 | use GenServer 5 | import Sworm.Util 6 | require Logger 7 | 8 | def start_link(sworm, name, mfa_or_pid) do 9 | GenServer.start_link(__MODULE__, {sworm, name, mfa_or_pid}, []) 10 | end 11 | 12 | def start(sworm, name, mfa_or_pid) do 13 | GenServer.start(__MODULE__, {sworm, name, mfa_or_pid}, []) 14 | end 15 | 16 | ### 17 | 18 | defmodule State do 19 | @moduledoc false 20 | 21 | defstruct [:pid, :name, :sworm] 22 | end 23 | 24 | @impl GenServer 25 | def init({sworm, name, mfa_or_pid}) do 26 | Process.flag(:trap_exit, true) 27 | 28 | with {:ok, _} <- Horde.Registry.register(registry_name(sworm), {:delegate, name}, nil), 29 | {:ok, pid, self_started} <- ensure_worker(mfa_or_pid) do 30 | Process.link(pid) 31 | 32 | with {:ok, _} <- Horde.Registry.register(registry_name(sworm), {:worker, pid}, nil), 33 | {_, _} <- 34 | Horde.Registry.update_value(registry_name(sworm), {:delegate, name}, fn _ -> pid end) do 35 | check_end_handoff(pid, sworm, name) 36 | 37 | {:ok, %State{pid: pid, name: name, sworm: sworm}} 38 | else 39 | {:error, {:already_registered, d}} -> 40 | Logger.debug( 41 | "already registered worker for #{inspect(name)}, to #{inspect(d)}, bail out" 42 | ) 43 | 44 | init_bail(self_started, pid) 45 | 46 | :error -> 47 | Logger.debug("pid update failed for #{inspect(name)}") 48 | init_bail(self_started, pid) 49 | end 50 | else 51 | {:error, {:already_registered, delegate}} -> 52 | Logger.debug( 53 | "already registered delegate for #{inspect(name)}, to #{inspect(delegate)}, bail out" 54 | ) 55 | 56 | {:ok, worker_pid} = GenServer.call(delegate, :get_worker_pid) 57 | {:stop, {:shutdown, {:already_started, worker_pid}}} 58 | 59 | {:error, reason} -> 60 | {:stop, reason} 61 | end 62 | end 63 | 64 | defp init_bail(self_started, pid) do 65 | Process.unlink(pid) 66 | 67 | if self_started do 68 | Process.exit(pid, :normal) 69 | end 70 | 71 | {:stop, {:shutdown, {:already_started, pid}}} 72 | end 73 | 74 | defp ensure_worker(pid) when is_pid(pid) do 75 | {:ok, pid, false} 76 | end 77 | 78 | defp ensure_worker({m, f, a}) do 79 | with {:ok, pid} <- apply(m, f, a) do 80 | {:ok, pid, true} 81 | end 82 | end 83 | 84 | @impl true 85 | def handle_call(:get_worker_pid, _from, state) do 86 | {:reply, {:ok, state.pid}, state} 87 | end 88 | 89 | def handle_call({:register_name, name}, _from, state) do 90 | reply = Horde.Registry.register(registry_name(state.sworm), {:delegate, name}, state.pid) 91 | {:reply, reply, state} 92 | end 93 | 94 | def handle_call({:join, group}, _from, state) do 95 | case Horde.Registry.register(registry_name(state.sworm), {:group, group, self()}, state.pid) do 96 | {:ok, _} -> :ok 97 | {:error, {:already_registered, _}} -> :ok 98 | end 99 | 100 | {:reply, :ok, state} 101 | end 102 | 103 | def handle_call({:leave, group}, _from, state) do 104 | :ok = Horde.Registry.unregister(registry_name(state.sworm), {:group, group, self()}) 105 | {:reply, :ok, state} 106 | end 107 | 108 | @impl true 109 | def handle_info({:EXIT, pid, reason}, %State{pid: pid} = state) do 110 | {:stop, reason, state} 111 | end 112 | 113 | def handle_info({:EXIT, _, :shutdown}, state) do 114 | {:stop, :shutdown, state} 115 | end 116 | 117 | def handle_info({:EXIT, _, {:name_conflict, {_name, _}, _reg, _winner}}, state) do 118 | Horde.DynamicSupervisor.terminate_child(supervisor_name(state.sworm), self()) 119 | {:stop, :normal, state} 120 | end 121 | 122 | def handle_info({:EXIT, _, reason}, state) do 123 | {:stop, reason, state} 124 | end 125 | 126 | def handle_info(message, state) do 127 | Logger.info("Delegate #{inspect(self())} Got unexpected info message: #{inspect(message)}") 128 | {:noreply, state} 129 | end 130 | 131 | @impl true 132 | def terminate(:shutdown, state) do 133 | if get_sworm_config(state.sworm, :handoff, false) do 134 | perform_begin_handoff(state) 135 | end 136 | end 137 | 138 | def terminate(_reason, _state) do 139 | :ok 140 | end 141 | 142 | ### 143 | 144 | defp perform_begin_handoff(state) do 145 | Logger.debug("Delegate #{inspect(self())} state handoff") 146 | 147 | ref = make_ref() 148 | send(state.pid, {state.sworm, :begin_handoff, self(), ref}) 149 | 150 | receive do 151 | {^ref, :ignore} -> 152 | :ok 153 | 154 | {^ref, :handoff_state, data} -> 155 | Horde.Registry.register(registry_name(state.sworm), {:handoff_state, state.name}, data) 156 | # give CRDT some time to propagate before we shut down 157 | Process.sleep(2000) 158 | 159 | :ok 160 | after 161 | 1000 -> 162 | :ok 163 | end 164 | 165 | :ok 166 | end 167 | 168 | defp check_end_handoff(pid, sworm, name) do 169 | Horde.Registry.select(registry_name(sworm), [ 170 | {{{:handoff_state, name}, :"$1", :"$2"}, [], [{{:"$2"}}]} 171 | ]) 172 | |> case do 173 | [] -> 174 | :ok 175 | 176 | [{state}] -> 177 | # notify the worker to restore itself 178 | send(pid, {sworm, :end_handoff, state}) 179 | end 180 | end 181 | end 182 | -------------------------------------------------------------------------------- /lib/sworm/directory_manager.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.DirectoryManager do 2 | @moduledoc false 3 | 4 | @selector [{{:"$1", :"$2", :"$3"}, [], [{{:"$1", :"$3"}}]}] 5 | 6 | use GenServer 7 | 8 | def start_link(_) do 9 | GenServer.start_link(__MODULE__, [], name: __MODULE__) 10 | end 11 | 12 | def register(sworm) do 13 | GenServer.call(__MODULE__, {:register, sworm}) 14 | end 15 | 16 | def nodes_for_sworm(sworm) do 17 | Horde.Registry.select(Sworm.Directory, @selector) 18 | |> Enum.map(fn 19 | {{^sworm, node}, :alive} -> node 20 | _ -> nil 21 | end) 22 | |> Enum.reject(&is_nil/1) 23 | |> Enum.sort() 24 | end 25 | 26 | ### 27 | 28 | require Logger 29 | 30 | def init([]) do 31 | :net_kernel.monitor_nodes(true, []) 32 | {:ok, update_nodes([])} 33 | end 34 | 35 | def handle_call({:register, sworm}, _from, state) do 36 | # register this sworm in the Sworm Directory 37 | reply = Horde.Registry.register(Sworm.Directory, {sworm, node()}, :alive) 38 | 39 | {:reply, reply, state} 40 | end 41 | 42 | def handle_info({node_event, _node}, state) 43 | when node_event == :nodeup or node_event == :nodedown do 44 | {:noreply, update_nodes(state)} 45 | end 46 | 47 | def handle_info(_request, state) do 48 | {:noreply, state} 49 | end 50 | 51 | def update_nodes(state) do 52 | nodes = Node.list([:this, :visible]) |> Enum.sort() 53 | 54 | case nodes == state do 55 | true -> 56 | state 57 | 58 | false -> 59 | Logger.info("Node directory list updated to #{inspect(nodes)}") 60 | Horde.Cluster.set_members(Sworm.Directory, Enum.map(nodes, &{Sworm.Directory, &1})) 61 | 62 | # remove all entries from directory which are not part of the current nodes 63 | for {{_sworm, node} = key, status} <- Horde.Registry.select(Sworm.Directory, @selector) do 64 | node_ok = Enum.member?(nodes, node) 65 | 66 | case status do 67 | :alive when node_ok -> 68 | :ok 69 | 70 | :alive -> 71 | # need to filter it, temporarily; node might reappear 72 | register_or_update(key, {:dead, now()}) 73 | 74 | {:dead, _ts} when node_ok -> 75 | # it reappeared; set it to alive again 76 | register_or_update(key, :alive) 77 | 78 | {:dead, ts} -> 79 | if now() - ts > 3600_000 do 80 | # too long; unregister the service for good. 81 | Horde.Registry.unregister(Sworm.Directory, key) 82 | end 83 | 84 | :ok 85 | end 86 | end 87 | 88 | nodes 89 | end 90 | end 91 | 92 | defp register_or_update(key, value) do 93 | with {:error, {:already_registered, _}} <- 94 | Horde.Registry.register(Sworm.Directory, key, value) do 95 | Horde.Registry.update_value(Sworm.Directory, key, fn _ -> value end) 96 | end 97 | end 98 | 99 | defp now(), do: :erlang.system_time(:millisecond) 100 | end 101 | -------------------------------------------------------------------------------- /lib/sworm/macro.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Macro do 2 | @moduledoc false 3 | 4 | def using(opts) do 5 | (Sworm.__info__(:functions) 6 | |> Enum.map(fn {name, arity} -> 7 | args = make_args(arity - 1) 8 | 9 | quote do 10 | def unquote(name)(unquote_splicing(args)) do 11 | apply(Sworm.Main, unquote(name), [__MODULE__, unquote_splicing(args)]) 12 | end 13 | end 14 | end)) ++ 15 | [ 16 | quote do 17 | def configure do 18 | Application.put_env(:sworm, __MODULE__, unquote(opts)) 19 | 20 | :ok 21 | end 22 | 23 | @on_load :configure 24 | end 25 | ] 26 | end 27 | 28 | defp make_args(0), do: [] 29 | defp make_args(arity), do: 1..arity |> Enum.map(fn n -> Macro.var(:"arg#{n}", nil) end) 30 | end 31 | -------------------------------------------------------------------------------- /lib/sworm/main.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Main do 2 | @moduledoc false 3 | 4 | import Sworm.Util 5 | 6 | def child_spec(sworm, opts) do 7 | %{ 8 | id: sworm, 9 | start: {Sworm.Supervisor, :start_link, [sworm, opts]}, 10 | restart: :permanent, 11 | shutdown: 5000, 12 | type: :supervisor 13 | } 14 | end 15 | 16 | def start_link(sworm, opts \\ []) do 17 | %{start: {m, f, a}} = child_spec(sworm, opts) 18 | {:ok, _} = apply(m, f, a) 19 | end 20 | 21 | def register_name(sworm, name, m, f, a) do 22 | spec = %{ 23 | id: name, 24 | start: {Sworm.Delegate, :start_link, [sworm, name, {m, f, a}]}, 25 | type: :worker, 26 | restart: get_sworm_config(sworm, :restart, :transient), 27 | shutdown: 5000 28 | } 29 | 30 | with :undefined <- whereis_name(sworm, name), 31 | {:ok, delegate_pid} <- Horde.DynamicSupervisor.start_child(supervisor_name(sworm), spec) do 32 | GenServer.call(delegate_pid, :get_worker_pid) 33 | else 34 | pid when is_pid(pid) -> 35 | {:error, {:already_started, pid}} 36 | 37 | {:error, {:shutdown, {:already_started, pid}}} -> 38 | {:error, {:already_started, pid}} 39 | 40 | {:error, reason} -> 41 | {:error, reason} 42 | end 43 | end 44 | 45 | def register_name(sworm, name, pid \\ self()) do 46 | reply = 47 | case lookup(sworm, {:worker, pid}) do 48 | [{delegate, nil}] -> GenServer.call(delegate, {:register_name, name}) 49 | _ -> Sworm.Delegate.start(sworm, name, pid) 50 | end 51 | 52 | case reply do 53 | {:ok, _} -> :yes 54 | {:error, _} -> :no 55 | end 56 | end 57 | 58 | def whereis_or_register_name(sworm, name, m, f, a) do 59 | case whereis_name(sworm, name) do 60 | :undefined -> 61 | with {:error, {:already_started, pid}} <- register_name(sworm, name, m, f, a) do 62 | {:ok, pid} 63 | end 64 | 65 | pid when is_pid(pid) -> 66 | {:ok, pid} 67 | end 68 | end 69 | 70 | def unregister_name(sworm, name) do 71 | case lookup(sworm, {:delegate, name}) do 72 | [{delegate, _worker}] -> 73 | Horde.DynamicSupervisor.terminate_child(supervisor_name(sworm), delegate) 74 | 75 | _ -> 76 | {:error, :not_found} 77 | end 78 | end 79 | 80 | def whereis_name(sworm, name) do 81 | case lookup(sworm, {:delegate, name}) do 82 | [{_delegate, worker_pid}] when is_pid(worker_pid) -> 83 | worker_pid 84 | 85 | _ -> 86 | :undefined 87 | end 88 | end 89 | 90 | def registered(sworm) do 91 | match = [{{{:delegate, :"$1"}, :"$2", :"$3"}, [], [{{:"$1", :"$3"}}]}] 92 | 93 | Horde.Registry.select(registry_name(sworm), match) 94 | |> Enum.filter(&is_pid(elem(&1, 1))) 95 | end 96 | 97 | def members(sworm, group) do 98 | match = [{{{:group, group, :_}, :"$2", :"$3"}, [], [:"$3"]}] 99 | Horde.Registry.select(registry_name(sworm), match) 100 | end 101 | 102 | def join(sworm, group, worker \\ self()) do 103 | case lookup(sworm, {:worker, worker}) do 104 | [{delegate_pid, nil}] -> 105 | GenServer.call(delegate_pid, {:join, group}) 106 | 107 | _ -> 108 | {:error, :not_found} 109 | end 110 | end 111 | 112 | def leave(sworm, group, worker \\ self()) do 113 | case lookup(sworm, {:worker, worker}) do 114 | [{delegate_pid, nil}] -> 115 | GenServer.call(delegate_pid, {:leave, group}) 116 | 117 | _ -> 118 | {:error, :not_found} 119 | end 120 | end 121 | 122 | ### 123 | 124 | defp lookup(sworm, key) do 125 | Horde.Registry.lookup(registry_name(sworm), key) 126 | end 127 | end 128 | -------------------------------------------------------------------------------- /lib/sworm/manager.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Manager do 2 | @moduledoc false 3 | 4 | use GenServer 5 | 6 | import Sworm.Util 7 | 8 | def child_spec({sworm, opts}) do 9 | %{ 10 | id: manager_name(sworm), 11 | start: {__MODULE__, :start_link, [{sworm, opts}]}, 12 | restart: :transient 13 | } 14 | end 15 | 16 | def start_link({sworm, opts}) do 17 | GenServer.start_link(__MODULE__, {sworm, opts}, name: manager_name(sworm)) 18 | end 19 | 20 | ### 21 | 22 | require Logger 23 | 24 | defmodule State do 25 | @moduledoc false 26 | defstruct sworm: nil, nodes: [], opts: [] 27 | end 28 | 29 | def init({sworm, opts}) do 30 | :timer.send_interval(1000, :check) 31 | {:ok, %State{sworm: sworm, opts: opts}} 32 | end 33 | 34 | def handle_info(:check, state) do 35 | Sworm.DirectoryManager.register(state.sworm) 36 | {:noreply, update_nodes(state)} 37 | end 38 | 39 | def update_nodes(state) do 40 | nodes = Sworm.DirectoryManager.nodes_for_sworm(state.sworm) 41 | 42 | case nodes == state.nodes do 43 | true -> 44 | state 45 | 46 | false -> 47 | Logger.debug("[#{state.sworm}] Node list updated to #{inspect(nodes)}") 48 | 49 | for mod <- [supervisor_name(state.sworm), registry_name(state.sworm)] do 50 | Horde.Cluster.set_members(mod, Enum.map(nodes, fn node -> {mod, node} end)) 51 | end 52 | 53 | %State{state | nodes: nodes} 54 | end 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/sworm/supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Supervisor do 2 | @moduledoc false 3 | @behaviour Supervisor 4 | 5 | require Logger 6 | import Sworm.Util 7 | 8 | def start_link(sworm, opts) do 9 | Supervisor.start_link(__MODULE__, {sworm, opts}, name: sworm) 10 | end 11 | 12 | @impl true 13 | def init({sworm, _opts} = arg) do 14 | distribution_strategy = 15 | get_sworm_config(sworm, :distribution_strategy, Horde.UniformDistribution) 16 | 17 | delta_crdt_options = get_sworm_config(sworm, :delta_crdt_options, []) 18 | 19 | children = [ 20 | {Horde.Registry, 21 | name: registry_name(sworm), keys: :unique, delta_crdt_options: delta_crdt_options}, 22 | {Horde.DynamicSupervisor, 23 | name: supervisor_name(sworm), 24 | strategy: :one_for_one, 25 | children: [], 26 | delta_crdt_options: delta_crdt_options, 27 | distribution_strategy: distribution_strategy}, 28 | {Sworm.Manager, arg} 29 | ] 30 | 31 | Supervisor.init(children, strategy: :one_for_all) 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /lib/sworm/util.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Util do 2 | @moduledoc false 3 | 4 | def registry_name(sworm), do: Module.concat(sworm, "Registry") 5 | def supervisor_name(sworm), do: Module.concat(sworm, "Supervisor") 6 | def manager_name(sworm), do: Module.concat(sworm, "Manager") 7 | 8 | def get_sworm_config(sworm, key, default) do 9 | Application.get_env(:sworm, sworm, []) 10 | |> Keyword.get(key, default) 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Sworm.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :sworm, 7 | version: File.read!("VERSION"), 8 | elixir: "~> 1.7", 9 | description: 10 | "A combination of a global, distributed process registry and supervisor, rolled into one, friendly API.", 11 | build_embedded: Mix.env() == :prod, 12 | start_permanent: Mix.env() == :prod, 13 | elixirc_paths: elixirc_paths(Mix.env()), 14 | package: package(), 15 | docs: docs(), 16 | deps: deps() 17 | ] 18 | end 19 | 20 | # Run "mix help compile.app" to learn about applications. 21 | def application do 22 | [ 23 | extra_applications: [:logger], 24 | mod: {Sworm.Application, []} 25 | ] 26 | end 27 | 28 | defp package do 29 | [ 30 | files: ["lib", "mix.exs", "*.md", "LICENSE", "VERSION"], 31 | maintainers: ["Arjan Scherpenisse"], 32 | licenses: ["MIT"], 33 | links: %{Github: "https://github.com/arjan/sworm"} 34 | ] 35 | end 36 | 37 | defp docs do 38 | [ 39 | main: "readme", 40 | formatter_opts: [gfm: true], 41 | extras: [ 42 | "README.md" 43 | ] 44 | ] 45 | end 46 | 47 | # Run "mix help deps" to learn about dependencies. 48 | defp deps do 49 | [ 50 | {:horde, "~> 0.6"}, 51 | {:ex_doc, "~> 0.20", only: :dev, runtime: false}, 52 | {:dialyxir, "~> 1.0.0-rc.6", only: :dev, runtime: false}, 53 | {:ex_unit_clustered_case, github: "bitwalker/ex_unit_clustered_case", only: :test} 54 | ] 55 | end 56 | 57 | defp elixirc_paths(:test), do: ["lib", "test/support"] 58 | defp elixirc_paths(_), do: ["lib"] 59 | end 60 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "delta_crdt": {:hex, :delta_crdt, "0.5.10", "e866f8d1b89bee497a98b9793e9ba0ea514112a1c41a0c30dcde3463d4984d14", [:mix], [{:merkle_map, "~> 0.2.0", [hex: :merkle_map, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ed5c685df9528788d7c056762c23f75358f3cadd4779698188a55ccae24d087a"}, 3 | "dialyxir": {:hex, :dialyxir, "1.0.0-rc.7", "6287f8f2cb45df8584317a4be1075b8c9b8a69de8eeb82b4d9e6c761cf2664cd", [:mix], [{:erlex, ">= 0.2.5", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "506294d6c543e4e5282d4852aead19ace8a35bedeb043f9256a06a6336827122"}, 4 | "earmark": {:hex, :earmark, "1.4.3", "364ca2e9710f6bff494117dbbd53880d84bebb692dafc3a78eb50aa3183f2bfd", [:mix], [], "hexpm", "8cf8a291ebf1c7b9539e3cddb19e9cef066c2441b1640f13c34c1d3cfc825fec"}, 5 | "earmark_parser": {:hex, :earmark_parser, "1.4.31", "a93921cdc6b9b869f519213d5bc79d9e218ba768d7270d46fdcf1c01bacff9e2", [:mix], [], "hexpm", "317d367ee0335ef037a87e46c91a2269fef6306413f731e8ec11fc45a7efd059"}, 6 | "erlex": {:hex, :erlex, "0.2.5", "e51132f2f472e13d606d808f0574508eeea2030d487fc002b46ad97e738b0510", [:mix], [], "hexpm", "756d3e19b056339af674b715fdd752c5dac468cf9d0e2d1a03abf4574e99fbf8"}, 7 | "ex_doc": {:hex, :ex_doc, "0.29.3", "f07444bcafb302db86e4f02d8bbcd82f2e881a0dcf4f3e4740e4b8128b9353f7", [:mix], [{:earmark_parser, "~> 1.4.31", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "3dc6787d7b08801ec3b51e9bd26be5e8826fbf1a17e92d1ebc252e1a1c75bfe1"}, 8 | "ex_unit_clustered_case": {:git, "https://github.com/bitwalker/ex_unit_clustered_case.git", "7762c0a0cce1c78703a1955ce9bc89c0ff9d2f88", []}, 9 | "horde": {:hex, :horde, "0.7.1", "161e140e4e4fab5416b90a4e5b68fbe7fb78f62b265f87f01d661a58aa72be0c", [:mix], [{:delta_crdt, "~> 0.5.10", [hex: :delta_crdt, repo: "hexpm", optional: false]}, {:libring, "~> 1.4", [hex: :libring, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 0.4.0", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "cbf5416cd4d221be07420d40a575cdaa1300a03d7ef593f805705e6b54f6187c"}, 10 | "libring": {:hex, :libring, "1.6.0", "d5dca4bcb1765f862ab59f175b403e356dec493f565670e0bacc4b35e109ce0d", [:mix], [], "hexpm", "5e91ece396af4bce99953d49ee0b02f698cd38326d93cd068361038167484319"}, 11 | "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, 12 | "makeup_elixir": {:hex, :makeup_elixir, "0.16.0", "f8c570a0d33f8039513fbccaf7108c5d750f47d8defd44088371191b76492b0b", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "28b2cbdc13960a46ae9a8858c4bebdec3c9a6d7b4b9e7f4ed1502f8159f338e7"}, 13 | "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, 14 | "merkle_map": {:hex, :merkle_map, "0.2.0", "5391ac61e016ce4aeb66ce39f05206a382fd4b66ee4b63c08a261d5633eadd76", [:mix], [], "hexpm", "fb1cc3a80e0b0d439a83bdb42bde4d03e8970a436bc949b9fa8d951c18fdafde"}, 15 | "nimble_parsec": {:hex, :nimble_parsec, "1.2.3", "244836e6e3f1200c7f30cb56733fd808744eca61fd182f731eac4af635cc6d0b", [:mix], [], "hexpm", "c8d789e39b9131acf7b99291e93dae60ab48ef14a7ee9d58c6964f59efb570b0"}, 16 | "telemetry": {:hex, :telemetry, "0.4.1", "ae2718484892448a24470e6aa341bc847c3277bfb8d4e9289f7474d752c09c7f", [:rebar3], [], "hexpm", "4738382e36a0a9a2b6e25d67c960e40e1a2c95560b9f936d8e29de8cd858480f"}, 17 | "telemetry_poller": {:hex, :telemetry_poller, "0.4.1", "50d03d976a3b8ab4898d9e873852e688840df47685a13af90af40e1ba43a758b", [:rebar3], [{:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c5bacbbcd62c1fe4e4517485bd64312622e9b83683273dcf2627ff224d7d485b"}, 18 | } 19 | -------------------------------------------------------------------------------- /test/cluster_handoff_test.exs: -------------------------------------------------------------------------------- 1 | defmodule SwormClusterHandoffTest do 2 | use SwormCase 3 | 4 | import Sworm.Support.Helpers 5 | 6 | sworm_scenario nil, "given a healthy cluster" do 7 | test "state is propagated when process moves to another server on node shutdown", %{ 8 | cluster: c 9 | } do 10 | n = Cluster.random_member(c) 11 | 12 | Cluster.call(n, HandoffSworm, :start_one, ["hi"]) 13 | 14 | # settle 15 | until_match( 16 | [[{"hi", _}], [{"hi", _}]], 17 | Cluster.members(c) 18 | |> Enum.map(fn n -> Cluster.call(n, HandoffSworm, :registered, []) end) 19 | ) 20 | 21 | [[{_, pid}] | _] = 22 | Cluster.members(c) |> Enum.map(fn n -> Cluster.call(n, HandoffSworm, :registered, []) end) 23 | 24 | # set some state 25 | assert 0 == GenServer.call(pid, :get) 26 | assert 1 == GenServer.call(pid, :inc) 27 | assert 2 == GenServer.call(pid, :inc) 28 | assert 3 == GenServer.call(pid, :inc) 29 | assert 4 == GenServer.call(pid, :inc) 30 | 31 | Cluster.stop_node(c, node(pid)) 32 | 33 | [other] = Cluster.members(c) -- [node(pid)] 34 | 35 | wait_until(fn -> 36 | [{"hi", pid}] = Cluster.call(other, HandoffSworm, :registered, []) 37 | 38 | # process now runs on the other node 39 | assert node(pid) == other 40 | end) 41 | 42 | # get the new pid 43 | [{"hi", pid}] = Cluster.call(other, HandoffSworm, :registered, []) 44 | 45 | # ensure that the state has been moved through the handoff process 46 | until_match(4, GenServer.call(pid, :get)) 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /test/cluster_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Swurm do 2 | use Sworm 3 | 4 | defmodule TestServer do 5 | use GenServer 6 | require Logger 7 | 8 | def start_link() do 9 | GenServer.start_link(__MODULE__, []) 10 | end 11 | 12 | def init(a) do 13 | Process.flag(:trap_exit, true) 14 | {:ok, a} 15 | end 16 | 17 | def handle_info(message, state) do 18 | Logger.warn("*** #{inspect(message)}") 19 | 20 | {:noreply, state} 21 | end 22 | end 23 | 24 | def start_one(name) do 25 | Swurm.register_name(name, TestServer, :start_link, []) 26 | end 27 | 28 | def start_many(name, n \\ 10) do 29 | for _ <- 1..n do 30 | Task.async(fn -> start_one(name) end) 31 | end 32 | |> Task.await_many() 33 | end 34 | end 35 | 36 | defmodule SwormClusterTest do 37 | use SwormCase 38 | 39 | sworm_scenario Swurm, "given a healthy cluster" do 40 | test "can call on all nodes", %{cluster: c} do 41 | assert [[], []] = 42 | Cluster.members(c) |> Enum.map(fn n -> Cluster.call(n, Swurm, :registered, []) end) 43 | end 44 | 45 | test "register process on one server; registration is propagated to other nodes", %{ 46 | cluster: c 47 | } do 48 | n = Cluster.random_member(c) 49 | Cluster.call(n, Swurm, :start_one, ["hi"]) 50 | 51 | # settle 52 | wait_until(fn -> 53 | match?( 54 | [[{"hi", p}], [{"hi", p}]], 55 | Cluster.members(c) |> Enum.map(fn n -> Cluster.call(n, Swurm, :registered, []) end) 56 | ) 57 | end) 58 | 59 | # now stop it 60 | [[{"hi", p}] | _] = 61 | Cluster.members(c) |> Enum.map(fn n -> Cluster.call(n, Swurm, :registered, []) end) 62 | 63 | GenServer.stop(p) 64 | 65 | # settle 66 | 67 | wait_until(fn -> 68 | [[], []] == 69 | Cluster.members(c) |> Enum.map(fn n -> Cluster.call(n, Swurm, :registered, []) end) 70 | end) 71 | end 72 | end 73 | 74 | def custom_sworm() do 75 | Swurm.start_link(delta_crdt_options: [sync_interval: 5000]) 76 | end 77 | 78 | sworm_scenario {__MODULE__, :custom_sworm, []}, 79 | "given a clean cluster for simultaneous registers", 80 | cluster_size: 5 do 81 | test "many simultaneous registers for the same name always result in a valid pid", %{ 82 | cluster: c 83 | } do 84 | [_, _, _, _, _] = Cluster.members(c) 85 | 86 | n_start = 100 87 | 88 | results = 89 | for n <- 1..200 do 90 | node = Cluster.random_member(c) 91 | 92 | Task.async(fn -> 93 | Process.sleep(Enum.random(1..100)) 94 | 95 | Cluster.call(node, Swurm, :start_many, ["hi_#{n}", n_start]) 96 | |> Enum.sort() 97 | |> Enum.reverse() 98 | end) 99 | end 100 | |> Task.await_many(20_000) 101 | 102 | for start_results <- results do 103 | [{:ok, pid} | rest] = start_results 104 | assert length(rest) == n_start - 1 105 | assert [{:error, {:already_started, ^pid}}] = Enum.uniq(rest) 106 | end 107 | end 108 | end 109 | 110 | sworm_scenario Swurm, "given another cluster for simultaneous registers", cluster_size: 2 do 111 | test "whereis_name always returns either a pid or undefined", %{ 112 | cluster: c 113 | } do 114 | [a, b] = Cluster.members(c) 115 | 116 | ta = 117 | Task.async(fn -> 118 | Cluster.call(a, fn -> 119 | for _ <- 1..1000 do 120 | Process.sleep(3) 121 | Swurm.whereis_name("test1") 122 | end 123 | end) 124 | end) 125 | 126 | tb = 127 | Task.async(fn -> 128 | Cluster.call(b, fn -> 129 | Swurm.start_one("test1") 130 | end) 131 | end) 132 | 133 | [r, {:ok, pid}] = Task.await_many([ta, tb]) 134 | 135 | r = r |> Enum.reject(&(&1 == :undefined)) |> Enum.uniq() 136 | assert r == [pid] 137 | end 138 | end 139 | 140 | sworm_scenario Swurm, "given a cluster that is shutting down" do 141 | test "register process on one server; process moves to other node when it goes down", %{ 142 | cluster: c 143 | } do 144 | n = Cluster.random_member(c) 145 | Cluster.call(n, Swurm, :start_one, ["hi"]) 146 | 147 | until_match([_], Cluster.call(n, Swurm, :registered, [])) 148 | 149 | [{"hi", pid}] = Cluster.call(n, Swurm, :registered, []) 150 | 151 | target_node = node(pid) 152 | [other_node] = Cluster.members(c) -- [target_node] 153 | 154 | until_match( 155 | [{"hi", ^pid}], 156 | Cluster.call(other_node, Swurm, :registered, []) 157 | ) 158 | 159 | Cluster.stop_node(c, target_node) 160 | 161 | wait_until(fn -> 162 | [{"hi", pid}] = Cluster.call(other_node, Swurm, :registered, []) 163 | 164 | # process now runs on the other node 165 | node(pid) == other_node 166 | end) 167 | end 168 | end 169 | 170 | sworm_scenario Swurm, "directory" do 171 | test "directory is updated when nodes join and leave", %{ 172 | cluster: c 173 | } do 174 | [a, b] = Cluster.members(c) 175 | 176 | until_match([_, _], Cluster.call(a, Sworm.DirectoryManager, :nodes_for_sworm, [Swurm])) 177 | 178 | Cluster.stop_node(c, b) 179 | 180 | until_match([_], Cluster.call(a, Sworm.DirectoryManager, :nodes_for_sworm, [Swurm])) 181 | end 182 | end 183 | 184 | require Logger 185 | 186 | sworm_scenario Swurm, "given a partitioned cluster" do 187 | test "resolves name conflicts", %{cluster: c} do 188 | [a, b] = Cluster.members(c) 189 | Cluster.partition(c, [[a], [b]]) 190 | 191 | assert {:ok, pid_a} = Cluster.call(a, Swurm, :start_one, ["hi"]) 192 | assert {:ok, pid_b} = Cluster.call(b, Swurm, :start_one, ["hi"]) 193 | 194 | assert pid_a != pid_b 195 | 196 | Process.sleep(500) 197 | 198 | Cluster.heal(c) 199 | 200 | wait_until(fn -> 201 | case {Cluster.call(a, Swurm, :registered, []), Cluster.call(b, Swurm, :registered, [])} do 202 | {[{"hi", pid}], [{"hi", pid}]} -> 203 | true 204 | 205 | _ -> 206 | false 207 | end 208 | end) 209 | 210 | # we now have only one pid 211 | [{"hi", pid}] = Cluster.call(a, Swurm, :registered, []) 212 | [{"hi", ^pid}] = Cluster.call(b, Swurm, :registered, []) 213 | 214 | # stop it before exiting 215 | GenServer.stop(pid) 216 | 217 | until_match([], Cluster.call(a, Swurm, :registered, [])) 218 | until_match([], Cluster.call(b, Swurm, :registered, [])) 219 | end 220 | end 221 | end 222 | -------------------------------------------------------------------------------- /test/support/helpers.ex: -------------------------------------------------------------------------------- 1 | defmodule Sworm.Support.Helpers do 2 | defmacro sworm_scenario(sworm, title, opts \\ [], do: block) do 3 | opts = Keyword.merge([cluster_size: 2, boot_timeout: 20_000], opts) 4 | 5 | quote do 6 | scenario unquote(title), unquote(opts) do 7 | node_setup do 8 | {:ok, _} = Application.ensure_all_started(:sworm) 9 | mod = unquote(sworm) 10 | 11 | case unquote(sworm) do 12 | nil -> 13 | :ok 14 | 15 | {m, f, a} -> 16 | {:ok, pid} = apply(m, f, a) 17 | Process.unlink(pid) 18 | 19 | mod -> 20 | opts = [delta_crdt_options: [sync_interval: 50]] 21 | {:ok, pid} = mod.start_link(opts) 22 | Process.unlink(pid) 23 | end 24 | 25 | :ok 26 | end 27 | 28 | unquote(block) 29 | end 30 | end 31 | end 32 | 33 | def wait_until(condition, timeout \\ 5_000) do 34 | cond do 35 | condition.() -> 36 | :ok 37 | 38 | timeout <= 0 -> 39 | ExUnit.Assertions.flunk("Timeout reached waiting for condition") 40 | 41 | true -> 42 | Process.sleep(100) 43 | wait_until(condition, timeout - 100) 44 | end 45 | end 46 | 47 | defmacro until_match(a, b, timeout \\ 2500) do 48 | quote do 49 | Sworm.Support.Helpers.wait_until( 50 | fn -> 51 | match?(unquote(a), unquote(b)) 52 | end, 53 | unquote(timeout) 54 | ) 55 | 56 | unquote(b) 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /test/support/sworm_case.ex: -------------------------------------------------------------------------------- 1 | defmodule SwormCase do 2 | defmacro __using__(_) do 3 | quote do 4 | import SwormCase 5 | require Sworm.Support.Helpers 6 | import Sworm.Support.Helpers 7 | 8 | use ExUnit.ClusteredCase 9 | end 10 | end 11 | 12 | ### 13 | 14 | def sworm(name) do 15 | {:ok, pid} = Sworm.start_link(name) 16 | 17 | ExUnit.Callbacks.on_exit(fn -> 18 | Process.sleep(50) 19 | Process.exit(pid, :kill) 20 | Process.sleep(200) 21 | end) 22 | end 23 | 24 | def mailbox() do 25 | mailbox([]) 26 | end 27 | 28 | def mailbox(acc) do 29 | receive do 30 | item -> mailbox([item | acc]) 31 | after 32 | 100 -> Enum.reverse(acc) 33 | end 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /test/support/test_handoff_sworm.ex: -------------------------------------------------------------------------------- 1 | defmodule HandoffSworm do 2 | @moduledoc false 3 | 4 | use Sworm, handoff: true 5 | 6 | defmodule TestServer do 7 | use GenServer 8 | 9 | def start_link() do 10 | GenServer.start_link(__MODULE__, 0) 11 | end 12 | 13 | def init(state) do 14 | {:ok, state} 15 | end 16 | 17 | def handle_call(:inc, _from, state) do 18 | state = state + 1 19 | {:reply, state, state} 20 | end 21 | 22 | def handle_call(:get, _from, state) do 23 | {:reply, state, state} 24 | end 25 | 26 | def handle_info({HandoffSworm, :begin_handoff, delegate, ref}, state) do 27 | send(delegate, {ref, :handoff_state, state}) 28 | {:noreply, state} 29 | end 30 | 31 | def handle_info({HandoffSworm, :end_handoff, state}, _state) do 32 | {:noreply, state} 33 | end 34 | end 35 | 36 | def start_one(name) do 37 | HandoffSworm.register_name(name, TestServer, :start_link, []) 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /test/sworm/directory_manager_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MySwarm do 2 | use Sworm 3 | 4 | defmodule TestServer do 5 | use GenServer 6 | 7 | def start_link() do 8 | GenServer.start_link(__MODULE__, []) 9 | end 10 | 11 | def init(a) do 12 | {:ok, a} 13 | end 14 | end 15 | 16 | def start_one(name) do 17 | MySwarm.register_name(name, TestServer, :start_link, []) 18 | end 19 | end 20 | 21 | defmodule Sworm.DirectoryManagerTest do 22 | use SwormCase 23 | 24 | import Sworm.Support.Helpers 25 | 26 | sworm_scenario MySwarm, "given a cluster with 2 nodes" do 27 | test "directory is updated when nodes join and leave", %{ 28 | cluster: c 29 | } do 30 | [a, _b] = Cluster.members(c) 31 | 32 | until_match([_, _], Cluster.call(a, Sworm.DirectoryManager, :nodes_for_sworm, [MySwarm])) 33 | 34 | # stop_node(c, b) 35 | Cluster.partition(c, 2) 36 | 37 | until_match([_], Cluster.call(a, Sworm.DirectoryManager, :nodes_for_sworm, [MySwarm])) 38 | 39 | # stop_node(c, b) 40 | Cluster.heal(c) 41 | 42 | until_match([_, _], Cluster.call(a, Sworm.DirectoryManager, :nodes_for_sworm, [MySwarm])) 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /test/sworm/error_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Sworm.ErrorTests do 2 | use SwormCase 3 | 4 | setup do 5 | # start it 6 | sworm(TestSworm) 7 | 8 | :ok 9 | end 10 | 11 | defmodule CrashingServer do 12 | use GenServer 13 | 14 | def start_link() do 15 | GenServer.start_link(__MODULE__, []) 16 | end 17 | 18 | def init([]), do: {:stop, :cannot_start} 19 | end 20 | 21 | test "supervisor" do 22 | assert [] = Sworm.registered(TestSworm) 23 | 24 | assert {:error, :cannot_start} = 25 | Sworm.register_name(TestSworm, "foo", CrashingServer, :start_link, []) 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /test/sworm/macro_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Sworm.MacroTest do 2 | use SwormCase 3 | 4 | defmodule MySworm do 5 | use Sworm 6 | end 7 | 8 | defmodule TestServer do 9 | use GenServer 10 | alias Sworm.MacroTest.MySworm 11 | 12 | def start_link(opts \\ []) do 13 | GenServer.start_link(__MODULE__, [], opts) 14 | end 15 | 16 | def init([]), do: {:ok, nil} 17 | 18 | def handle_call({:join, group}, _from, state) do 19 | MySworm.join(group) 20 | {:reply, :ok, state} 21 | end 22 | 23 | def handle_call({:leave, group}, _from, state) do 24 | MySworm.leave(group) 25 | {:reply, :ok, state} 26 | end 27 | 28 | def handle_call(:ping, _from, state) do 29 | {:reply, :pong, state} 30 | end 31 | end 32 | 33 | setup do 34 | # start it 35 | {:ok, pid} = MySworm.start_link() 36 | 37 | on_exit(fn -> 38 | Process.sleep(50) 39 | Process.exit(pid, :normal) 40 | end) 41 | 42 | :ok 43 | end 44 | 45 | test "using" do 46 | assert [] = MySworm.registered() 47 | 48 | assert :undefined = MySworm.whereis_name("test") 49 | 50 | assert {:ok, worker} = MySworm.whereis_or_register_name("test", TestServer, :start_link, []) 51 | assert is_pid(worker) 52 | 53 | assert ^worker = MySworm.whereis_name("test") 54 | 55 | assert [{"test", ^worker}] = MySworm.registered() 56 | assert :pong = GenServer.call(worker, :ping) 57 | 58 | assert [] = MySworm.members("group0") 59 | :ok = GenServer.call(worker, {:join, "group0"}) 60 | assert [^worker] = MySworm.members("group0") 61 | :ok = GenServer.call(worker, {:leave, "group0"}) 62 | assert [] = MySworm.members("group0") 63 | end 64 | 65 | test "register_name/1" do 66 | assert [] = MySworm.registered() 67 | 68 | assert :yes = MySworm.register_name("hello") 69 | # assert :yes = MySworm.register_name("hello1") 70 | pid = self() 71 | assert [{"hello", ^pid}] = MySworm.registered() 72 | end 73 | 74 | test "via tuple" do 75 | name = {:via, MySworm, "test_server"} 76 | {:ok, pid} = TestServer.start_link(name: name) 77 | assert [{"test_server", ^pid}] = MySworm.registered() 78 | assert :pong = GenServer.call(name, :ping) 79 | end 80 | 81 | defmodule AnotherSworm do 82 | use Sworm 83 | end 84 | 85 | test "start/0" do 86 | assert {:ok, _} = AnotherSworm.start_link() 87 | assert [] = AnotherSworm.registered() 88 | end 89 | 90 | test "Support child restart strategy - restart: transient (default)" do 91 | assert {:ok, _} = AnotherSworm.start_link() 92 | assert [] = AnotherSworm.registered() 93 | 94 | assert {:ok, worker} = 95 | AnotherSworm.whereis_or_register_name("test", TestServer, :start_link, []) 96 | 97 | assert [{"test", ^worker}] = AnotherSworm.registered() 98 | 99 | Process.exit(worker, :kill) 100 | 101 | wait_until(fn -> 102 | case AnotherSworm.registered() do 103 | [{"test", worker2}] when worker2 != worker -> 104 | true 105 | 106 | _ -> 107 | false 108 | end 109 | end) 110 | end 111 | 112 | defmodule RestartTemporarySworm do 113 | use Sworm, restart: :temporary 114 | end 115 | 116 | test "Support child restart strategy - restart: temporary" do 117 | assert {:ok, _} = RestartTemporarySworm.start_link() 118 | assert [] = RestartTemporarySworm.registered() 119 | 120 | assert {:ok, worker} = 121 | RestartTemporarySworm.whereis_or_register_name("test", TestServer, :start_link, []) 122 | 123 | assert [{"test", ^worker}] = RestartTemporarySworm.registered() 124 | 125 | Process.exit(worker, :kill) 126 | 127 | until_match([], RestartTemporarySworm.registered()) 128 | end 129 | 130 | defmodule DistributionStrategySworm do 131 | use Sworm, distribution_strategy: Horde.UniformQuorumDistribution 132 | end 133 | 134 | test "Support distribution strategy" do 135 | assert {:ok, _} = DistributionStrategySworm.start_link(name: A) 136 | assert [] = DistributionStrategySworm.registered() 137 | 138 | sup = Process.whereis(DistributionStrategySworm.Supervisor) 139 | 140 | assert Horde.UniformQuorumDistribution == :sys.get_state(sup).distribution_strategy 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /test/sworm/shutdown_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Sworm.ShutdownTest do 2 | use ExUnit.Case 3 | 4 | defmodule MySworm do 5 | use Sworm 6 | 7 | defmodule TestServer do 8 | use GenServer 9 | alias Sworm.MacroTest.MySworm 10 | 11 | def start_link(opts \\ []) do 12 | GenServer.start_link(__MODULE__, [], opts) 13 | end 14 | 15 | def init([]), do: {:ok, nil} 16 | end 17 | end 18 | 19 | test "shut down" do 20 | {:ok, pid} = MySworm.start_link([]) 21 | Process.unlink(pid) 22 | 23 | {:ok, _} = MySworm.register_name("hoi", MySworm.TestServer, :start_link, []) 24 | 25 | [_] = MySworm.registered() 26 | 27 | :ok = Supervisor.stop(pid, :shutdown) 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /test/sworm_test.exs: -------------------------------------------------------------------------------- 1 | defmodule SwormTest do 2 | use SwormCase 3 | 4 | doctest Sworm 5 | 6 | setup do 7 | # start it 8 | sworm(TestSworm) 9 | 10 | :ok 11 | end 12 | 13 | defmodule TestServer do 14 | use GenServer 15 | 16 | def start_link(arg \\ []) do 17 | GenServer.start_link(__MODULE__, arg) 18 | end 19 | 20 | def init(_arg) do 21 | {:ok, nil} 22 | end 23 | 24 | def handle_call(:x, _from, state) do 25 | {:reply, :y, state} 26 | end 27 | end 28 | 29 | test "supervisor" do 30 | assert [] = Sworm.registered(TestSworm) 31 | 32 | assert {:ok, worker} = Sworm.register_name(TestSworm, "foo", TestServer, :start_link, []) 33 | 34 | assert [{"foo", ^worker}] = Sworm.registered(TestSworm) 35 | assert :y = GenServer.call(worker, :x) 36 | 37 | assert ^worker = Sworm.whereis_name(TestSworm, "foo") 38 | 39 | assert {:error, :not_found} = Sworm.unregister_name(TestSworm, "z") 40 | assert :ok = Sworm.unregister_name(TestSworm, "foo") 41 | assert :undefined = Sworm.whereis_name(TestSworm, "foo") 42 | 43 | refute Process.alive?(worker) 44 | end 45 | 46 | test "whereis_or_register_name" do 47 | assert :undefined = Sworm.whereis_name(TestSworm, "test") 48 | 49 | {:ok, worker} = Sworm.whereis_or_register_name(TestSworm, "test", TestServer, :start_link, []) 50 | assert is_pid(worker) 51 | 52 | assert ^worker = Sworm.whereis_name(TestSworm, "test") 53 | 54 | assert [{"test", ^worker}] = Sworm.registered(TestSworm) 55 | assert :y = GenServer.call(worker, :x) 56 | end 57 | 58 | test "join / leave / members" do 59 | assert {:error, :not_found} = Sworm.join(TestSworm, "group1", self()) 60 | assert {:error, :not_found} = Sworm.leave(TestSworm, "group1", self()) 61 | 62 | assert [] = Sworm.members(TestSworm, "group1") 63 | assert {:ok, worker} = Sworm.register_name(TestSworm, "a", TestServer, :start_link, []) 64 | 65 | assert :ok = Sworm.join(TestSworm, "group1", worker) 66 | assert [^worker] = Sworm.members(TestSworm, "group1") 67 | 68 | assert {:ok, worker} = Sworm.register_name(TestSworm, "b", TestServer, :start_link, []) 69 | 70 | assert :ok = Sworm.join(TestSworm, "group1", worker) 71 | assert [_, _] = Sworm.members(TestSworm, "group1") 72 | 73 | assert :ok = Sworm.leave(TestSworm, "group1", worker) 74 | assert [_] = Sworm.members(TestSworm, "group1") 75 | 76 | # leave/join multiple times is OK 77 | assert :ok = Sworm.join(TestSworm, "group1", worker) 78 | assert :ok = Sworm.join(TestSworm, "group1", worker) 79 | 80 | assert :ok = Sworm.leave(TestSworm, "group1", worker) 81 | assert :ok = Sworm.leave(TestSworm, "group1", worker) 82 | assert :ok = Sworm.leave(TestSworm, "group1", worker) 83 | end 84 | 85 | defmodule NameTestServer do 86 | def init(name) do 87 | :yes = Sworm.register_name(TestSworm, name) 88 | {:ok, nil} 89 | end 90 | end 91 | 92 | test "register_name/2" do 93 | {:ok, _} = GenServer.start_link(NameTestServer, "a", []) 94 | {:ok, b} = GenServer.start_link(NameTestServer, "b", []) 95 | 96 | assert [{"a", _}, {"b", _}] = Sworm.registered(TestSworm) |> Enum.sort() 97 | 98 | GenServer.stop(b) 99 | 100 | until_match([{"a", _}], Sworm.registered(TestSworm)) 101 | end 102 | 103 | test "register_name/3" do 104 | me = self() 105 | :yes = Sworm.register_name(TestSworm, "a", me) 106 | 107 | assert [{"a", ^me}] = Sworm.registered(TestSworm) 108 | assert [_] = delegates() 109 | 110 | # cannot register again 111 | :no = Sworm.register_name(TestSworm, "a", me) 112 | 113 | # ensure we have only one delegate 114 | assert [_] = delegates() 115 | 116 | :yes = Sworm.register_name(TestSworm, "b", me) 117 | 118 | # still only one delegate 119 | assert [_] = delegates() 120 | end 121 | 122 | require Logger 123 | 124 | test "remove from supervisor on name conflict" do 125 | # Simulate a network partition healing, 2 processes registered on 126 | # different nodes are reconciled. 127 | 128 | sworm(A) 129 | sworm(B) 130 | 131 | assert {:ok, pid_a} = Sworm.register_name(A, "foo", TestServer, :start_link, ["foo.A"]) 132 | Process.sleep(10) 133 | assert {:ok, pid_b} = Sworm.register_name(B, "foo", TestServer, :start_link, ["foo.B"]) 134 | 135 | Horde.Cluster.set_members(A.Registry, [A.Registry, B.Registry]) 136 | Horde.Cluster.set_members(A.Supervisor, [A.Supervisor, B.Supervisor]) 137 | 138 | wait_until(fn -> not Process.alive?(pid_a) end) 139 | 140 | refute Process.alive?(pid_a) 141 | assert Process.alive?(pid_b) 142 | 143 | assert([{"foo", ^pid_b}] = Sworm.registered(A)) 144 | assert([{"foo", ^pid_b}] = Sworm.registered(B)) 145 | 146 | # removed from A.Supervisor 147 | until_match( 148 | [], 149 | Horde.DynamicSupervisor.which_children(A.Supervisor) 150 | ) 151 | 152 | # still exists on B.Supervisor 153 | until_match( 154 | [{:undefined, _delegate, _, _}], 155 | Horde.DynamicSupervisor.which_children(B.Supervisor) 156 | ) 157 | end 158 | 159 | def delegates() do 160 | match = [{{{:delegate, :"$1"}, :"$2", :"$3"}, [], [:"$3"]}] 161 | 162 | Horde.Registry.select(TestSworm.Registry, match) 163 | |> Enum.uniq() 164 | end 165 | 166 | test "register_name race" do 167 | parent = self() 168 | 169 | for _n <- 1..10 do 170 | spawn_link(fn -> 171 | result = Sworm.register_name(TestSworm, "a", TestServer, :start_link, []) 172 | send(parent, result) 173 | end) 174 | end 175 | 176 | assert [ 177 | {:ok, p}, 178 | {:error, {:already_started, p}}, 179 | {:error, {:already_started, p}}, 180 | {:error, {:already_started, p}}, 181 | {:error, {:already_started, p}}, 182 | {:error, {:already_started, p}}, 183 | {:error, {:already_started, p}}, 184 | {:error, {:already_started, p}}, 185 | {:error, {:already_started, p}}, 186 | {:error, {:already_started, p}} 187 | ] = mailbox() 188 | end 189 | end 190 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | {_, 0} = System.cmd("epmd", ["-daemon"]) 2 | Node.start(:"primary@127.0.0.1", :longnames) 3 | ExUnit.start() 4 | --------------------------------------------------------------------------------