├── .formatter.exs ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── README.md ├── examples ├── consumer_supervisor.exs ├── gen_event.exs ├── producer_consumer.exs └── rate_limiter.exs ├── lib ├── consumer_supervisor.ex ├── gen_stage.ex └── gen_stage │ ├── buffer.ex │ ├── dispatcher.ex │ ├── dispatchers │ ├── broadcast_dispatcher.ex │ ├── demand_dispatcher.ex │ └── partition_dispatcher.ex │ ├── stream.ex │ ├── streamer.ex │ └── utils.ex ├── mix.exs ├── mix.lock └── test ├── consumer_supervisor_test.exs ├── gen_stage ├── broadcast_dispatcher_test.exs ├── demand_dispatcher_test.exs └── partition_dispatcher_test.exs ├── gen_stage_test.exs └── test_helper.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | inputs: ["{mix,.formatter}.exs", "{lib,test}/**/*.{ex,exs}"], 3 | locals_without_parens: [ 4 | assert_kill: 2 5 | ] 6 | ] 7 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | test: 11 | name: Elixir ${{ matrix.elixir }}/OTP ${{ matrix.otp }} 12 | runs-on: ${{ matrix.os }} 13 | env: 14 | MIX_ENV: test 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | include: 20 | # Latest supported versions. 21 | - os: ubuntu-22.04 22 | elixir: "1.18" 23 | otp: "27.2" 24 | lint: true 25 | 26 | # This is a middle ground: it's old versions that we probably want to start 27 | # requiring at some point, but technically we support older. 28 | - os: ubuntu-20.04 29 | elixir: "1.13" 30 | otp: "22.3" 31 | 32 | # Oldest supported versions. 33 | - os: ubuntu-20.04 34 | elixir: "1.11" 35 | otp: "21.3.8.24" 36 | 37 | steps: 38 | - name: Check out this repository 39 | uses: actions/checkout@v4 40 | 41 | - name: Set up Erlang and Elixir 42 | uses: erlef/setup-beam@v1 43 | with: 44 | otp-version: ${{ matrix.otp }} 45 | elixir-version: ${{ matrix.elixir }} 46 | 47 | - name: Install dependencies 48 | run: mix deps.get --only $MIX_ENV 49 | 50 | - name: Check that code is formatted 51 | run: mix format --check-formatted 52 | if: ${{ matrix.lint }} 53 | 54 | - name: Check that there are no unused dependencies in mix.lock 55 | run: mix do deps.get, deps.unlock --check-unused 56 | if: ${{ matrix.lint }} 57 | 58 | - name: Compile with --warnings-as-errors 59 | run: mix compile --warnings-as-errors 60 | if: ${{ matrix.lint }} 61 | 62 | - name: Run tests 63 | run: mix test 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /_build 2 | /cover 3 | /deps 4 | /doc 5 | erl_crash.dump 6 | *.ez 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v1.2.1 (2023-03-20) 4 | 5 | ### Enhancements 6 | 7 | * Support `:on_cancel` in `GenStage.from_enumerable/2` 8 | 9 | ## v1.2.0 (2023-02-01) 10 | 11 | ### Enhancements 12 | 13 | * Shuffle the demands on first dispatch for load balancing across consumers 14 | * Allow explicitly specifying `:max_demand` in `GenStage.DemandDispatcher` 15 | 16 | ### Bug fixes 17 | 18 | * Do not dispatch when accumulating demand 19 | 20 | ## v1.1.1 (2021-08-13) 21 | 22 | ### Enhancements 23 | 24 | * Log a clear error messages when stage terminates due to noproc on non-temporary subscription 25 | 26 | ## v1.1.0 (2021-02-05) 27 | 28 | v1.1 requires Elixir v1.7+. 29 | 30 | ### Enhancements 31 | 32 | * Fix warnings on latest Elixir versions 33 | * Support more process specifications in the `:subscribe_to` option 34 | * Add callback to allow instrumenting discarded count and function to get estimated buffer size 35 | 36 | ## v1.0.0 (2020-02-03) 37 | 38 | ### Enhancements 39 | 40 | * Allow events to be discarded in PartitionDispatcher by returning `:none` 41 | * Raise for unknown partitionis in PartitionDispatcher 42 | 43 | ## v0.14.3 (2019-10-28) 44 | 45 | ### Enhancements 46 | 47 | * Improvements to typespecs and error messages 48 | 49 | ## v0.14.2 (2019-06-12) 50 | 51 | ### Enhancements 52 | 53 | * Add `GenStage.demand/1` 54 | 55 | ### Bug fixes 56 | 57 | * Fix code_change callback implementation 58 | 59 | ## v0.14.1 (2018-10-08) 60 | 61 | ### Bug fixes 62 | 63 | * Fix warnings on Elixir v1.8 64 | 65 | ## v0.14.0 (2018-06-10) 66 | 67 | This version requires Elixir v1.5+. 68 | 69 | ### Bug fixes 70 | 71 | * Ensure a `:producer_consumer` stops asking the producer if downstream demand is 0 72 | * Enforce the :hash option for non-int partitions in PartitionDispatcher 73 | 74 | ## v0.13.1 (2018-01-28) 75 | 76 | Note: this is the last version to support Elixir v1.3 and v1.4. 77 | 78 | ### Enhancements 79 | 80 | * Log error on unknown partitions 81 | 82 | ### Bug fixes 83 | 84 | * Do not allow restart: :permanent in `ConsumerSupervisor` to avoid common pitfalls 85 | * Improve and fix types to avoid dialyzer warnings 86 | * Avoid conflict with user specified `@opts` 87 | 88 | ## v0.13.0 (2018-01-13) 89 | 90 | ### Enhancements 91 | 92 | * Mirror `ConsumerSupervisor.init/2` after `Supervisor.init/2` 93 | * No longer define default implementations for callbacks, instead declare them as `@optional_callbacks` 94 | 95 | ### Bug fixes 96 | 97 | * Ensure `ConsumerSupervisor` does not send demand when restarting a child 98 | 99 | ## v0.12.2 100 | 101 | ### Enhancements 102 | 103 | * Support Elixir v1.5 supervisor childspecs in ConsumerSupervisor 104 | * Mark `GenStage.child_spec/1` overridable 105 | 106 | ## v0.12.1 107 | 108 | ### Enhancements 109 | 110 | * Define Elixir v1.5 childspecs in GenStage and ConsumerSupervisor 111 | 112 | ### Bug fixes 113 | 114 | * Fix a bug where info messages would be sent out of order in producer consumers 115 | * Fix a bug where handle_cancel would be invoked out of order in producer consumers 116 | 117 | ## v0.12.0 118 | 119 | ### Enhancements 120 | 121 | * Add `cancel: :transient` to subscription options which does terminate if the exit is `:normal`, `:shutdown`, or `{:shutdown, _}` 122 | * Add `GenStage.sync_info/3` and `GenStage.async_info/2` which queues an information message to be delivered once the current queue is consumed 123 | 124 | ### Backwards incompatible changes 125 | 126 | * Remove `:max_dynamic` from ConsumerSupervisor 127 | * The notification mechanism has been removed from GenStage. For termination, GenStage now uses proper exit signals and `cancel: :transient` has been added as a subscription option. 128 | 129 | ## v0.11.0 130 | 131 | ### Backwards incompatible changes 132 | 133 | * Remove the Experimental namespace 134 | * Rename DynamicSupervisor to ConsumerSupervisor 135 | * Move Flow to a separate project: https://github.com/elixir-lang/flow 136 | 137 | Except by the module name changes, all APIs remain exactly the same. 138 | 139 | ### Bug fixes 140 | 141 | * Accumulate demands but don't sum them together. This provides a better ramp up time for producers with multiple consumers 142 | 143 | ## v0.10.0 144 | 145 | ### Enhancements 146 | 147 | * Add `Flow.group_by/3` and `Flow.group_by_key/3` as conveniences around `Flow.reduce/3` 148 | * Add `Flow.map_values/2` for mapping over the values in a key-value based state 149 | * Add `Flow.take_sort/3` that efficiently sorts and takes the top N entries 150 | 151 | ### Bug fixes 152 | 153 | * Ensure BroadcastDispatcher sends demand to itself when custom selector discards events 154 | * Ensure flows started with `Flow.start_link/2` properly terminate if producers terminate 155 | * Ensure flows exit locally instead of relying on linked processes exits. With this change, `Flow.run(flow)` and `Enum.to_list(flow)` no longer start stages linked directly to the caller but does so through a supervisor 156 | 157 | ## v0.9.0 158 | 159 | ### Enhancements 160 | 161 | * Add `GenStage.sync_resubscribe/4` and `GenStage.async_resubscribe/4` 162 | * Improve logs, specs and docs 163 | 164 | ### Bug fixes 165 | 166 | * Ensure `Flow.departition/4` works on `Flow.start_link/1` 167 | * Make sure no lingering monitors or messages on the inbox after GenStage.stream/1 168 | 169 | ## v0.8.0 170 | 171 | ### Enhancements 172 | 173 | * Support a `:selector` option in the `BroadcastDispatcher` 174 | 175 | ### Bug fix 176 | 177 | * Ensure PartitionDispatcher does not create more partitions than necessary 178 | 179 | ### Backwards incompatible changes 180 | 181 | * Pass the events `length` to dispatchers for more performant dispatching 182 | 183 | ## v0.7.0 184 | 185 | ### Enhancements 186 | 187 | * Introduce count-based windows, process-time windows and session-based windows on Flow 188 | * Support resetting or keeping buffer on Flow watermarks 189 | 190 | ### Backwards incompatible changes 191 | 192 | * Remove `:milliseconds`, `:seconds`, `:minutes` and `:hours` for units in favor of `:millisecond`, `:second`, `:minute` and `:hour`. You will get an error if you use the previous values. 193 | * Specifying shortcuts to `:hash` has been removed in favor of the `:key` option. You will get an error if you use the previous values. 194 | 195 | ### Bug fixes 196 | 197 | * Ensure uneven partitions emit all windows on `Flow.departition/4` 198 | * Properly emit the beginning of the window time on triggers for fixed windows 199 | 200 | ## v0.6.1 (2016-10-05) 201 | 202 | ### Bug fixes 203 | 204 | * Properly count the most recent entry for each fixed window 205 | 206 | ## v0.6.0 (2016-10-04) 207 | 208 | ### Enhancements 209 | 210 | * Introduce `Flow.departition/5` 211 | * Include examples of broadcasters and rate limiters in the documentation 212 | * Allow custom-named, non-integer partitions 213 | 214 | ### Bug fixes 215 | 216 | * Ensure consumer supervisor respects `min_demand` and does not send demand too soon 217 | 218 | ### Backwards incompatible changes 219 | 220 | * Remove `Flow.new/0`, `Flow.new/1` and `Flow.new/2` in favor of passing options to `from_enumerable/2` and `from_stage/2` 221 | * Remove `Flow.partition/3` and `Flow.merge/3` in favor of passing the `:window` option to `Flow.partition/2` and `Flow.merge/2` 222 | 223 | ## v0.5.0 (2016-08-09) 224 | 225 | This release moves `Flow` from under the `GenStage` namespace and into `Experimental.Flow`. 226 | 227 | ### Enhancements 228 | 229 | * Add `Flow.uniq/2` and `Flow.uniq_by/2` 230 | * Add `Flow.start_link/2` and `Flow.into_stages/3` 231 | * Add `Flow.window_join/8` 232 | * Unify window and partition APIs 233 | * Support `Flow.Window.global/0` and `Flow.Window.fixed/3` 234 | 235 | ## v0.4.3 (2016-07-28) 236 | 237 | ### Enhancements 238 | 239 | * Add `Flow.inner_join/6` 240 | * Add `GenStage.demand/2` that allows a producer to accumulate demand as a synchronization mechanism 241 | * Improve performance for the partition dispatcher and allow it to change the partitioned event 242 | 243 | ## v0.4.2 (2016-07-25) 244 | 245 | ### Bug fixes 246 | 247 | * Fix a bug where a flow wouldn't terminate if a source stream halts 248 | 249 | ## v0.4.1 (2016-07-21) 250 | 251 | ### Enhancements 252 | 253 | * Add `Flow.trigger/3` and `Flow.trigger_every/4` supporting custom, count and processing-time triggers. Event-time triggers can be implemented via `trigger/3`. Event-time triggers will be added once windows support is included 254 | 255 | ## v0.4.0 (2016-07-19) 256 | 257 | ### Enhancements 258 | 259 | * Introduce `Flow` with enumerable/stream based operations 260 | * Include more information on `:sys.get_status/1` calls for GenStage 261 | 262 | ### Bug fixes 263 | 264 | * Fix a bug where a `:producer_consumer` stage which filtered events would eventually halt 265 | * Fix `format_status/2` results when inspecting GenStage in `:observer` 266 | 267 | ## v0.3.0 (2016-07-12) 268 | 269 | ### Enhancements 270 | 271 | * Support notifications 272 | * Introduce `GenStage.stream/1` to stream events as a consumer from a stage 273 | * Introduce `GenStage.from_enumerable/2` to start a producer stage that emits events from an enumerable (or a stream) 274 | 275 | ## v0.2.1 (2016-07-08) 276 | 277 | ### Enhancements 278 | 279 | * Add `GenStage.PartitionDispatcher` 280 | * Set default `:max_demand` to 1000 281 | * Use buffer based `:producer_consumer` to avoid escalating demand 282 | 283 | ## v0.2.0 (2016-07-05) 284 | 285 | ### Enhancements 286 | 287 | * Support `:producer_consumer` type 288 | * Support `:infinity` as `:buffer_size` (useful for `:producer_consumer`) 289 | 290 | ### Backwards incompatible changes 291 | 292 | * Namespace all modules under `Experimental` 293 | * Ensure `:cancel` reason does not cascade through the pipeline 294 | 295 | ## v0.1.0 (2016-07-03) 296 | 297 | ### Enhancements 298 | 299 | * Include GenStage with `:producer` and `:consumer` types 300 | * Include ConsumerSupervisor implemented as a `GenStage` consumer and that provides the `:simple_one_for_one` functionality 301 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GenStage 2 | 3 | GenStage is a specification for exchanging events between producers and consumers. 4 | 5 | This project currently provides the following functionality: 6 | 7 | * `GenStage` ([docs](https://hexdocs.pm/gen_stage/GenStage.html)) - a behaviour for implementing producer and consumer stages 8 | 9 | * `ConsumerSupervisor` ([docs](https://hexdocs.pm/gen_stage/ConsumerSupervisor.html)) - a supervisor designed for consuming events from GenStage and starting a child process per event 10 | 11 | You may also be interested in two other projects built on top of GenStage: 12 | 13 | * [Flow](https://github.com/plataformatec/flow) for building computational flows using map-reduce, partitions, windows, and more that run concurrently. See the documentation for [Flow](https://hexdocs.pm/flow) or [José Valim's keynote at ElixirConf 2016](https://youtu.be/srtMWzyqdp8?t=244) introducing the main concepts behind GenStage and Flow 14 | 15 | * [Broadway](https://github.com/plataformatec/broadway) for building concurrent and multi-stage data ingestion and data processing pipelines to consume events from Amazon SQS, RabbitMQ, and others. See [Broadway's documentation](https://hexdocs.pm/broadway) or [José Valim's introduction to Broadway](https://www.youtube.com/watch?v=ZOExnT1PYjs) 16 | 17 | ## Examples 18 | 19 | Examples for using GenStage and ConsumerSupervisor can be found in the [examples](examples) directory: 20 | 21 | * [ProducerConsumer](examples/producer_consumer.exs) - a simple example of setting up a pipeline of `A -> B -> C` stages and having events flowing through it 22 | 23 | * [ConsumerSupervisor](examples/consumer_supervisor.exs) - an example of how to use one or more `ConsumerSupervisor` as a consumer to a producer that works as a counter 24 | 25 | * [GenEvent](examples/gen_event.exs) - an example of how to use `GenStage` to implement an alternative to `GenEvent` that leverages concurrency and provides more flexibility regarding buffer size and back-pressure 26 | 27 | * [RateLimiter](examples/rate_limiter.exs) - an example of performing rate limiting in a GenStage pipeline 28 | 29 | ## Installation 30 | 31 | GenStage requires Elixir v1.5. Just add `:gen_stage` to your list of dependencies in mix.exs: 32 | 33 | ```elixir 34 | def deps do 35 | [{:gen_stage, "~> 1.0"}] 36 | end 37 | ``` 38 | 39 | ## License 40 | 41 | Same as Elixir under Apache License 2.0. 42 | Check [NOTICE](https://github.com/elixir-lang/elixir/blob/main/NOTICE) and [LICENSE](https://github.com/elixir-lang/elixir/blob/main/LICENSE) for more information. 43 | -------------------------------------------------------------------------------- /examples/consumer_supervisor.exs: -------------------------------------------------------------------------------- 1 | # Usage: mix run examples/consumer_supervisor.exs 2 | # 3 | # Hit Ctrl+C twice to stop it. 4 | 5 | defmodule Counter do 6 | @moduledoc """ 7 | This is a simple producer that counts from the given 8 | number whenever there is a demand. 9 | """ 10 | 11 | use GenStage 12 | 13 | def start_link(initial) when is_integer(initial) do 14 | GenStage.start_link(__MODULE__, initial, name: __MODULE__) 15 | end 16 | 17 | ## Callbacks 18 | 19 | def init(initial) do 20 | {:producer, initial} 21 | end 22 | 23 | def handle_demand(demand, counter) when demand > 0 do 24 | # If the counter is 3 and we ask for 2 items, we will 25 | # emit the items 3 and 4, and set the state to 5. 26 | events = Enum.to_list(counter..counter+demand-1) 27 | {:noreply, events, counter + demand} 28 | end 29 | end 30 | 31 | defmodule Consumer do 32 | @moduledoc """ 33 | A consumer will be a consumer supervisor that will 34 | spawn printer tasks for each event. 35 | """ 36 | 37 | use ConsumerSupervisor 38 | 39 | def start_link() do 40 | ConsumerSupervisor.start_link(__MODULE__, :ok) 41 | end 42 | 43 | # Callbacks 44 | 45 | def init(:ok) do 46 | children = [ 47 | worker(Printer, [], restart: :temporary) 48 | ] 49 | 50 | {:ok, children, strategy: :one_for_one, subscribe_to: [{Counter, max_demand: 50}]} 51 | end 52 | end 53 | 54 | defmodule Printer do 55 | def start_link(event) do 56 | Task.start_link(fn -> 57 | IO.inspect {self(), event} 58 | end) 59 | end 60 | end 61 | 62 | defmodule App do 63 | @moduledoc """ 64 | Your application entry-point. 65 | 66 | For actual applications, start/0 should be start/2. 67 | """ 68 | 69 | def start do 70 | import Supervisor.Spec 71 | 72 | children = [ 73 | worker(Counter, [0]), 74 | # We can add as many consumer supervisors as consumers as we want! 75 | worker(Consumer, [], id: 1) 76 | ] 77 | 78 | Supervisor.start_link(children, strategy: :one_for_one) 79 | end 80 | end 81 | 82 | # Start the app and wait forever 83 | App.start 84 | Process.sleep(:infinity) 85 | -------------------------------------------------------------------------------- /examples/gen_event.exs: -------------------------------------------------------------------------------- 1 | # Usage: mix run examples/gen_event.exs 2 | defmodule Broadcaster do 3 | @moduledoc """ 4 | Using a GenStage for implementing a GenEvent manager 5 | replacement, where each handler runs as a separate process. 6 | It is around 40 LOC without docs and comments. 7 | 8 | This implementation will keep events in an internal queue 9 | until there is demand, leading to client timeouts for slow 10 | consumers. Alternative implementations could rely on the 11 | GenStage internal buffer, although such implies events will 12 | be lost if the buffer gets full (see GenStage docs). 13 | 14 | Generally, the GenStage implementation gives developers 15 | more control to handle buffers and apply back-pressure while 16 | leveraging concurrency and synchronization mechanisms. 17 | """ 18 | 19 | use GenStage 20 | 21 | @doc """ 22 | Starts the broadcaster. 23 | """ 24 | def start_link(_args) do 25 | GenStage.start_link(__MODULE__, :ok, name: __MODULE__) 26 | end 27 | 28 | @doc """ 29 | Sends an event and returns only after the event is dispatched. 30 | """ 31 | def sync_notify(event, timeout \\ 5000) do 32 | GenStage.call(__MODULE__, {:notify, event}, timeout) 33 | end 34 | 35 | ## Callbacks 36 | 37 | def init(:ok) do 38 | {:producer, {:queue.new, 0}, dispatcher: GenStage.BroadcastDispatcher} 39 | end 40 | 41 | def handle_call({:notify, event}, from, {queue, demand}) do 42 | dispatch_events(:queue.in({from, event}, queue), demand, []) 43 | end 44 | 45 | def handle_demand(incoming_demand, {queue, demand}) do 46 | dispatch_events(queue, incoming_demand + demand, []) 47 | end 48 | 49 | defp dispatch_events(queue, demand, events) do 50 | with d when d > 0 <- demand, 51 | {{:value, {from, event}}, queue} <- :queue.out(queue) do 52 | GenStage.reply(from, :ok) 53 | dispatch_events(queue, demand - 1, [event | events]) 54 | else 55 | _ -> {:noreply, Enum.reverse(events), {queue, demand}} 56 | end 57 | end 58 | end 59 | 60 | defmodule Consumer do 61 | @moduledoc """ 62 | The GenEvent handler implementation is a simple consumer. 63 | """ 64 | 65 | use GenStage 66 | 67 | def start_link(_args) do 68 | GenStage.start_link(__MODULE__, :ok) 69 | end 70 | 71 | # Callbacks 72 | 73 | def init(:ok) do 74 | # Starts a permanent subscription to the broadcaster 75 | # which will automatically start requesting items. 76 | {:consumer, :ok, subscribe_to: [Broadcaster]} 77 | end 78 | 79 | def handle_events(events, _from, state) do 80 | for event <- events do 81 | IO.inspect {self(), event} 82 | end 83 | {:noreply, [], state} 84 | end 85 | end 86 | 87 | defmodule App do 88 | @moduledoc """ 89 | Your application entry-point. 90 | """ 91 | use Supervisor 92 | 93 | @impl true 94 | def init(_arg) do 95 | children = [ 96 | Supervisor.child_spec({Broadcaster, []}, id: 1), 97 | Supervisor.child_spec({Consumer, []}, id: 2), 98 | Supervisor.child_spec({Consumer, []}, id: 3), 99 | Supervisor.child_spec({Consumer, []}, id: 4), 100 | Supervisor.child_spec({Consumer, []}, id: 5), 101 | ] 102 | 103 | Supervisor.start_link(children, strategy: :one_for_one) 104 | end 105 | end 106 | 107 | # Start the app 108 | App.init(0) 109 | 110 | # Broadcast events 111 | Broadcaster.sync_notify(1) 112 | Broadcaster.sync_notify(2) 113 | Broadcaster.sync_notify(3) 114 | Broadcaster.sync_notify(4) 115 | Broadcaster.sync_notify(5) 116 | 117 | # Wait for them to be printed 118 | Process.sleep(2000) 119 | -------------------------------------------------------------------------------- /examples/producer_consumer.exs: -------------------------------------------------------------------------------- 1 | # Usage: mix run examples/producer_consumer.exs 2 | # 3 | # Hit Ctrl+C twice to stop it. 4 | # 5 | # This is a base example where a producer A emits items, 6 | # which are amplified by a producer consumer B and printed 7 | # by consumer C. 8 | defmodule A do 9 | use GenStage 10 | 11 | def init(counter) do 12 | {:producer, counter} 13 | end 14 | 15 | def handle_demand(demand, counter) when demand > 0 do 16 | # If the counter is 3 and we ask for 2 items, we will 17 | # emit the items 3 and 4, and set the state to 5. 18 | events = Enum.to_list(counter..counter+demand-1) 19 | {:noreply, events, counter + demand} 20 | end 21 | end 22 | 23 | defmodule B do 24 | use GenStage 25 | 26 | def init(number) do 27 | {:producer_consumer, number} 28 | end 29 | 30 | def handle_events(events, _from, number) do 31 | # If we receive [0, 1, 2], the number is 2, this will transform 32 | # it into [0, 1, 2, 1, 2, 3, 2, 3, 4]. 33 | events = 34 | for event <- events, 35 | entry <- event..event+number, 36 | do: entry 37 | {:noreply, events, number} 38 | end 39 | end 40 | 41 | defmodule C do 42 | use GenStage 43 | 44 | def init(:ok) do 45 | {:consumer, :the_state_does_not_matter} 46 | end 47 | 48 | def handle_events(events, _from, state) do 49 | # Wait for a second. 50 | :timer.sleep(1000) 51 | 52 | # Inspect the events. 53 | IO.inspect(events) 54 | 55 | # We are a consumer, so we would never emit items. 56 | {:noreply, [], state} 57 | end 58 | end 59 | 60 | {:ok, a} = GenStage.start_link(A, 0) # starting from zero 61 | {:ok, b} = GenStage.start_link(B, 2) # expand by 2 62 | {:ok, c} = GenStage.start_link(C, :ok) # state does not matter 63 | 64 | GenStage.sync_subscribe(b, to: a) 65 | GenStage.sync_subscribe(c, to: b) 66 | Process.sleep(:infinity) -------------------------------------------------------------------------------- /examples/rate_limiter.exs: -------------------------------------------------------------------------------- 1 | # Usage: mix run examples/rate_limiter.exs 2 | # 3 | # Hit Ctrl+C twice to stop it. 4 | # 5 | # This is an example of using manual demand for 6 | # doing rate limiting work on a consumer. 7 | defmodule Producer do 8 | use GenStage 9 | 10 | def init(counter) do 11 | {:producer, counter} 12 | end 13 | 14 | def handle_demand(demand, counter) when demand > 0 do 15 | # If the counter is 3 and we ask for 2 items, we will 16 | # emit the items 3 and 4, and set the state to 5. 17 | events = Enum.to_list(counter..counter+demand-1) 18 | {:noreply, events, counter + demand} 19 | end 20 | end 21 | 22 | defmodule RateLimiter do 23 | use GenStage 24 | 25 | def init(_) do 26 | {:consumer, %{}} 27 | end 28 | 29 | def handle_subscribe(:producer, opts, from, producers) do 30 | # We will only allow max_demand events every 5000 milliseconds 31 | pending = opts[:max_demand] || 1000 32 | interval = opts[:interval] || 5000 33 | 34 | # Register the producer in the state 35 | producers = Map.put(producers, from, {pending, interval}) 36 | # Ask for the pending events and schedule the next time around 37 | producers = ask_and_schedule(producers, from) 38 | 39 | # Returns manual as we want control over the demand 40 | {:manual, producers} 41 | end 42 | 43 | def handle_cancel(_, from, producers) do 44 | # Remove the producers from the map on unsubscribe 45 | {:noreply, [], Map.delete(producers, from)} 46 | end 47 | 48 | def handle_events(events, from, producers) do 49 | # Bump the amount of pending events for the given producer 50 | producers = Map.update!(producers, from, fn {pending, interval} -> 51 | {pending + length(events), interval} 52 | end) 53 | 54 | # Consume the events by printing them. 55 | IO.inspect(events) 56 | 57 | # A producer_consumer would return the processed events here. 58 | {:noreply, [], producers} 59 | end 60 | 61 | def handle_info({:ask, from}, producers) do 62 | # This callback is invoked by the Process.send_after/3 message below. 63 | {:noreply, [], ask_and_schedule(producers, from)} 64 | end 65 | 66 | defp ask_and_schedule(producers, from) do 67 | case producers do 68 | %{^from => {pending, interval}} -> 69 | GenStage.ask(from, pending) 70 | Process.send_after(self(), {:ask, from}, interval) 71 | Map.put(producers, from, {0, interval}) 72 | %{} -> 73 | producers 74 | end 75 | end 76 | end 77 | 78 | {:ok, a} = GenStage.start_link(Producer, 0) # starting from zero 79 | {:ok, b} = GenStage.start_link(RateLimiter, :ok) # state does not matter 80 | 81 | # Ask for 10 items every 2 seconds. 82 | GenStage.sync_subscribe(b, to: a, max_demand: 10, interval: 2000) 83 | Process.sleep(:infinity) 84 | -------------------------------------------------------------------------------- /lib/consumer_supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule ConsumerSupervisor do 2 | @moduledoc ~S""" 3 | A supervisor that starts children as events flow in. 4 | 5 | A `ConsumerSupervisor` can be used as the consumer in a `GenStage` pipeline. 6 | A new child process will be started per event, where the event is appended 7 | to the arguments in the child specification. 8 | 9 | A `ConsumerSupervisor` can be attached to a producer by returning 10 | `:subscribe_to` from `c:init/1` or explicitly with `GenStage.sync_subscribe/3` 11 | and `GenStage.async_subscribe/2`. 12 | 13 | Once subscribed, the supervisor will ask the producer for `:max_demand` events 14 | and start child processes as events arrive. As child processes terminate, the 15 | supervisor will accumulate demand and request more events once `:min_demand` 16 | is reached. This allows the `ConsumerSupervisor` to work similar to a pool, 17 | except a child process is started per event. The minimum amount of concurrent 18 | children per producer is specified by `:min_demand` and the maximum is given 19 | by `:max_demand`. 20 | 21 | ## Example 22 | 23 | Let's define a GenStage consumer as a `ConsumerSupervisor` that subscribes 24 | to a producer named `Producer` and starts a new process for each event 25 | received from the producer. Each new process will be started by calling 26 | `Printer.start_link/1`, which simply starts a task that will print the 27 | incoming event to the terminal. 28 | 29 | defmodule Consumer do 30 | use ConsumerSupervisor 31 | 32 | def start_link(arg) do 33 | ConsumerSupervisor.start_link(__MODULE__, arg) 34 | end 35 | 36 | def init(_arg) do 37 | # Note: By default the restart for a child is set to :permanent 38 | # which is not supported in ConsumerSupervisor. You need to explicitly 39 | # set the :restart option either to :temporary or :transient. 40 | children = [%{id: Printer, start: {Printer, :start_link, []}, restart: :transient}] 41 | opts = [strategy: :one_for_one, subscribe_to: [{Producer, max_demand: 50}]] 42 | ConsumerSupervisor.init(children, opts) 43 | end 44 | end 45 | 46 | Then in the `Printer` module: 47 | 48 | defmodule Printer do 49 | def start_link(event) do 50 | # Note: this function must return the format of `{:ok, pid}` and like 51 | # all children started by a Supervisor, the process must be linked 52 | # back to the supervisor (if you use `Task.start_link/1` then both 53 | # these requirements are met automatically) 54 | Task.start_link(fn -> 55 | IO.inspect({self(), event}) 56 | end) 57 | end 58 | end 59 | 60 | Similar to `Supervisor`, `ConsumerSupervisor` also provides `start_link/3`, 61 | which allows developers to start a supervisor with the help of a callback 62 | module. 63 | 64 | ## Name Registration 65 | 66 | A supervisor is bound to the same name registration rules as a `GenServer`. 67 | Read more about it in the `GenServer` docs. 68 | """ 69 | 70 | @behaviour GenStage 71 | 72 | @typedoc "Options used by the `start*` functions" 73 | @type option :: 74 | {:registry, atom} 75 | | {:name, Supervisor.name()} 76 | | {:strategy, Supervisor.Spec.strategy()} 77 | | {:max_restarts, non_neg_integer} 78 | | {:max_seconds, non_neg_integer} 79 | | {:subscribe_to, [GenStage.stage() | {GenStage.stage(), keyword()}]} 80 | 81 | @doc """ 82 | Callback invoked to start the supervisor and during hot code upgrades. 83 | 84 | ## Options 85 | 86 | * `:strategy` - the restart strategy option. Only `:one_for_one` 87 | is supported by consumer supervisors. 88 | 89 | * `:max_restarts` - the maximum amount of restarts allowed in 90 | a time frame. Defaults to 3 times. 91 | 92 | * `:max_seconds` - the time frame in which `:max_restarts` applies 93 | in seconds. Defaults to 5 seconds. 94 | 95 | * `:subscribe_to` - a list of producers to subscribe to. Each element 96 | represents the producer or a tuple with the producer and the subscription 97 | options, for example, `[Producer]` or `[{Producer, max_demand: 20, min_demand: 10}]`. 98 | 99 | """ 100 | @callback init(args :: term) :: 101 | {:ok, [:supervisor.child_spec()], options :: keyword()} 102 | | :ignore 103 | 104 | defstruct [ 105 | :name, 106 | :mod, 107 | :args, 108 | :template, 109 | :max_restarts, 110 | :max_seconds, 111 | :strategy, 112 | children: %{}, 113 | producers: %{}, 114 | restarts: [], 115 | restarting: 0 116 | ] 117 | 118 | @doc false 119 | defmacro __using__(opts) do 120 | quote location: :keep, bind_quoted: [opts: opts] do 121 | @behaviour ConsumerSupervisor 122 | import Supervisor.Spec 123 | 124 | @doc false 125 | def child_spec(arg) do 126 | default = %{ 127 | id: __MODULE__, 128 | start: {__MODULE__, :start_link, [arg]}, 129 | type: :supervisor 130 | } 131 | 132 | Supervisor.child_spec(default, unquote(Macro.escape(opts))) 133 | end 134 | 135 | defoverridable child_spec: 1 136 | 137 | @doc false 138 | def init(arg) 139 | end 140 | end 141 | 142 | defmodule Default do 143 | @moduledoc false 144 | 145 | def init(args) do 146 | args 147 | end 148 | end 149 | 150 | @doc """ 151 | Starts a supervisor with the given children. 152 | 153 | A strategy is required to be given as an option. Furthermore, 154 | the `:max_restarts`, `:max_seconds`, and `:subscribe_to` 155 | values can be configured as described in the documentation for the 156 | `c:init/1` callback. 157 | 158 | The options can also be used to register a supervisor name. 159 | The supported values are described under the "Name Registration" 160 | section in the `GenServer` module docs. 161 | 162 | The child processes specified in `children` will be started by appending 163 | the event to process to the existing function arguments in the child specification. 164 | 165 | Note that the consumer supervisor is linked to the parent process 166 | and will exit not only on crashes but also if the parent process 167 | exits with `:normal` reason. 168 | """ 169 | @spec start_link([Supervisor.Spec.spec() | Supervisor.child_spec()], [option]) :: 170 | Supervisor.on_start() 171 | def start_link(children, options) when is_list(children) do 172 | {sup_options, start_options} = 173 | Keyword.split(options, [:strategy, :max_restarts, :max_seconds, :subscribe_to]) 174 | 175 | start_link(Default, init(children, sup_options), start_options) 176 | end 177 | 178 | @doc """ 179 | Starts a consumer supervisor module with the given `args`. 180 | 181 | To start the supervisor, the `c:init/1` callback will be invoked in the given 182 | module, with `args` passed to it. The `c:init/1` callback must return a 183 | supervision specification which can be created with the help of the 184 | `Supervisor` module. 185 | 186 | If the `c:init/1` callback returns `:ignore`, this function returns 187 | `:ignore` as well and the supervisor terminates with reason `:normal`. 188 | If it fails or returns an incorrect value, this function returns 189 | `{:error, term}` where `term` is a term with information about the 190 | error, and the supervisor terminates with reason `term`. 191 | 192 | The `:name` option can also be given in order to register a supervisor 193 | name. The supported values are described under the "Name Registration" 194 | section in the `GenServer` module docs. 195 | """ 196 | @spec start_link(module, any) :: Supervisor.on_start() 197 | @spec start_link(module, any, [option]) :: Supervisor.on_start() 198 | def start_link(mod, args, opts \\ []) do 199 | GenStage.start_link(__MODULE__, {mod, args, opts[:name]}, opts) 200 | end 201 | 202 | @doc """ 203 | Starts a child in the consumer supervisor. 204 | 205 | The child process will be started by appending the given list of 206 | `args` to the existing function arguments in the child specification. 207 | 208 | This child is started separately from any producer and does not 209 | count towards the demand of any of them. 210 | 211 | If the child process starts, function returns `{:ok, child}` or 212 | `{:ok, child, info}`, the pid is added to the supervisor, and the 213 | function returns the same value. 214 | 215 | If the child process start function returns `:ignore`, an error tuple, 216 | or an erroneous value, or if it fails, the child is discarded and 217 | `:ignore` or `{:error, error}` where `error` is a term containing 218 | information about the error is returned. 219 | """ 220 | @spec start_child(Supervisor.supervisor(), [term]) :: Supervisor.on_start_child() 221 | def start_child(supervisor, args) when is_list(args) do 222 | call(supervisor, {:start_child, args}) 223 | end 224 | 225 | @doc """ 226 | Terminates the given child pid. 227 | 228 | If successful, the function returns `:ok`. If there is no 229 | such pid, the function returns `{:error, :not_found}`. 230 | """ 231 | @spec terminate_child(Supervisor.supervisor(), pid) :: :ok | {:error, :not_found} 232 | def terminate_child(supervisor, pid) when is_pid(pid) do 233 | call(supervisor, {:terminate_child, pid}) 234 | end 235 | 236 | @doc """ 237 | Returns a list with information about all children. 238 | 239 | Note that calling this function when supervising a large number 240 | of children under low memory conditions can cause an out of memory 241 | exception. 242 | 243 | This function returns a list of tuples containing: 244 | 245 | * `id` - as defined in the child specification but is always 246 | set to `:undefined` for consumer supervisors 247 | 248 | * `child` - the pid of the corresponding child process or the 249 | atom `:restarting` if the process is about to be restarted 250 | 251 | * `type` - `:worker` or `:supervisor` as defined in the child 252 | specification 253 | 254 | * `modules` - as defined in the child specification 255 | 256 | """ 257 | @spec which_children(Supervisor.supervisor()) :: [ 258 | {:undefined, pid | :restarting, :worker | :supervisor, :dynamic | [module()]} 259 | ] 260 | def which_children(supervisor) do 261 | call(supervisor, :which_children) 262 | end 263 | 264 | @doc """ 265 | Returns a map containing count values for the supervisor. 266 | 267 | The map contains the following keys: 268 | 269 | * `:specs` - always `1` as consumer supervisors have a single specification 270 | 271 | * `:active` - the count of all actively running child processes managed by 272 | this supervisor 273 | 274 | * `:supervisors` - the count of all supervisors whether or not the child 275 | process is still alive 276 | 277 | * `:workers` - the count of all workers, whether or not the child process 278 | is still alive 279 | 280 | """ 281 | @spec count_children(Supervisor.supervisor()) :: %{ 282 | specs: non_neg_integer, 283 | active: non_neg_integer, 284 | supervisors: non_neg_integer, 285 | workers: non_neg_integer 286 | } 287 | def count_children(supervisor) do 288 | call(supervisor, :count_children) 289 | end 290 | 291 | @doc """ 292 | Receives a template to initialize and a set of options. 293 | 294 | This is typically invoked at the end of the `c:init/1` callback of module-based supervisors. 295 | 296 | This function returns a the child specification and the supervisor flags. 297 | 298 | ## Examples 299 | 300 | Using the child specification changes introduced in Elixir 1.5: 301 | 302 | defmodule MyConsumerSupervisor do 303 | use ConsumerSupervisor 304 | 305 | def start_link(arg) do 306 | ConsumerSupervisor.start_link(__MODULE__, arg) 307 | end 308 | 309 | def init(_arg) do 310 | ConsumerSupervisor.init([MyConsumer], strategy: :one_for_one, subscribe_to: MyProducer) 311 | end 312 | end 313 | 314 | """ 315 | def init([{_, _, _, _, _, _} = template], opts) do 316 | {:ok, [template], opts} 317 | end 318 | 319 | def init([template], opts) when is_tuple(template) or is_map(template) or is_atom(template) do 320 | {:ok, {_, [template]}} = Supervisor.init([template], opts) 321 | {:ok, [template], opts} 322 | end 323 | 324 | @compile {:inline, call: 2} 325 | 326 | defp call(supervisor, req) do 327 | GenStage.call(supervisor, req, :infinity) 328 | end 329 | 330 | ## Callbacks 331 | 332 | @impl true 333 | def init({mod, args, name}) do 334 | Process.put(:"$initial_call", {:supervisor, mod, 1}) 335 | Process.flag(:trap_exit, true) 336 | 337 | case mod.init(args) do 338 | {:ok, children, opts} -> 339 | case validate_specs(children) do 340 | :ok -> 341 | state = %ConsumerSupervisor{mod: mod, args: args, name: name || {self(), mod}} 342 | 343 | case init(state, children, opts) do 344 | {:ok, state, opts} -> {:consumer, state, opts} 345 | {:error, message} -> {:stop, {:bad_opts, message}} 346 | end 347 | 348 | {:error, message} -> 349 | {:stop, {:bad_specs, message}} 350 | end 351 | 352 | :ignore -> 353 | :ignore 354 | 355 | other -> 356 | {:stop, {:bad_return_value, other}} 357 | end 358 | end 359 | 360 | defp init(state, [child], opts) when is_list(opts) do 361 | {strategy, opts} = Keyword.pop(opts, :strategy) 362 | {max_restarts, opts} = Keyword.pop(opts, :max_restarts, 3) 363 | {max_seconds, opts} = Keyword.pop(opts, :max_seconds, 5) 364 | template = normalize_template(child) 365 | 366 | with :ok <- validate_strategy(strategy), 367 | :ok <- validate_restarts(max_restarts), 368 | :ok <- validate_seconds(max_seconds), 369 | :ok <- validate_template(template) do 370 | state = %{ 371 | state 372 | | template: template, 373 | strategy: strategy, 374 | max_restarts: max_restarts, 375 | max_seconds: max_seconds 376 | } 377 | 378 | {:ok, state, opts} 379 | end 380 | end 381 | 382 | defp init(_state, [_], _opts) do 383 | {:error, "supervisor's init expects a keywords list as options"} 384 | end 385 | 386 | defp validate_specs([_] = children) do 387 | :supervisor.check_childspecs(children) 388 | end 389 | 390 | defp validate_specs(_children) do 391 | {:error, "consumer supervisor expects a list with a single item as a template"} 392 | end 393 | 394 | defp validate_strategy(strategy) when strategy in [:one_for_one], do: :ok 395 | defp validate_strategy(nil), do: {:error, "supervisor expects a strategy to be given"} 396 | defp validate_strategy(_), do: {:error, "unknown supervision strategy for consumer supervisor"} 397 | 398 | defp validate_restarts(restart) when is_integer(restart), do: :ok 399 | defp validate_restarts(_), do: {:error, "max_restarts must be an integer"} 400 | 401 | defp validate_seconds(seconds) when is_integer(seconds), do: :ok 402 | defp validate_seconds(_), do: {:error, "max_seconds must be an integer"} 403 | 404 | @impl true 405 | def handle_subscribe(:producer, opts, {_, ref} = from, state) do 406 | # GenStage checks these options before allowing susbcription 407 | max = Keyword.get(opts, :max_demand, 1000) 408 | min = Keyword.get(opts, :min_demand, div(max, 2)) 409 | GenStage.ask(from, max) 410 | {:manual, put_in(state.producers[ref], {from, 0, 0, min, max})} 411 | end 412 | 413 | @impl true 414 | def handle_cancel(_, {_, ref}, state) do 415 | {:noreply, [], update_in(state.producers, &Map.delete(&1, ref))} 416 | end 417 | 418 | @impl true 419 | def handle_events(events, {pid, ref} = from, state) do 420 | %{template: child, children: children} = state 421 | {new, errors} = start_events(events, from, child, 0, [], state) 422 | new_children = Enum.into(new, children) 423 | started = map_size(new_children) - map_size(children) 424 | {:noreply, [], maybe_ask(ref, pid, started + errors, errors, new_children, state)} 425 | end 426 | 427 | defp start_events([extra | extras], from, child, errors, acc, state) do 428 | {_, ref} = from 429 | {_, {m, f, args}, restart, _, _, _} = child 430 | args = args ++ [extra] 431 | 432 | case start_child(m, f, args) do 433 | {:ok, pid, _} when restart == :temporary -> 434 | acc = [{pid, [ref | :undefined]} | acc] 435 | start_events(extras, from, child, errors, acc, state) 436 | 437 | {:ok, pid, _} -> 438 | acc = [{pid, [ref | args]} | acc] 439 | start_events(extras, from, child, errors, acc, state) 440 | 441 | {:ok, pid} when restart == :temporary -> 442 | acc = [{pid, [ref | :undefined]} | acc] 443 | start_events(extras, from, child, errors, acc, state) 444 | 445 | {:ok, pid} -> 446 | acc = [{pid, [ref | args]} | acc] 447 | start_events(extras, from, child, errors, acc, state) 448 | 449 | :ignore -> 450 | start_events(extras, from, child, errors + 1, acc, state) 451 | 452 | {:error, reason} -> 453 | :error_logger.error_msg( 454 | ~c"ConsumerSupervisor failed to start child from: ~tp with reason: ~tp~n", 455 | [from, reason] 456 | ) 457 | 458 | report_error(:start_error, reason, :undefined, args, child, state) 459 | start_events(extras, from, child, errors + 1, acc, state) 460 | end 461 | end 462 | 463 | defp start_events([], _, _, errors, acc, _) do 464 | {acc, errors} 465 | end 466 | 467 | defp maybe_ask(ref, pid, events, down, children, state) do 468 | %{producers: producers} = state 469 | 470 | case producers do 471 | %{^ref => {to, count, pending, min, max}} -> 472 | if count + events > max do 473 | :error_logger.error_msg( 474 | ~c"ConsumerSupervisor has received ~tp events in excess from: ~tp~n", 475 | [count + events - max, {pid, ref}] 476 | ) 477 | end 478 | 479 | pending = 480 | case pending + down do 481 | ask when ask >= min -> 482 | GenStage.ask(to, ask) 483 | 0 484 | 485 | ask -> 486 | ask 487 | end 488 | 489 | count = count + events - down 490 | producers = Map.put(producers, ref, {to, count, pending, min, max}) 491 | %{state | children: children, producers: producers} 492 | 493 | %{} -> 494 | %{state | children: children} 495 | end 496 | end 497 | 498 | @impl true 499 | def handle_call(:which_children, _from, state) do 500 | %{children: children, template: child} = state 501 | {_, _, _, _, type, mods} = child 502 | 503 | reply = 504 | for {pid, args} <- children do 505 | maybe_pid = 506 | case args do 507 | {:restarting, _} -> :restarting 508 | _ -> pid 509 | end 510 | 511 | {:undefined, maybe_pid, type, mods} 512 | end 513 | 514 | {:reply, reply, [], state} 515 | end 516 | 517 | def handle_call(:count_children, _from, state) do 518 | %{children: children, template: child, restarting: restarting} = state 519 | {_, _, _, _, type, _} = child 520 | 521 | specs = map_size(children) 522 | active = specs - restarting 523 | 524 | reply = 525 | case type do 526 | :supervisor -> 527 | %{specs: 1, active: active, workers: 0, supervisors: specs} 528 | 529 | :worker -> 530 | %{specs: 1, active: active, workers: specs, supervisors: 0} 531 | end 532 | 533 | {:reply, reply, [], state} 534 | end 535 | 536 | def handle_call({:terminate_child, pid}, _from, %{children: children} = state) do 537 | case children do 538 | %{^pid => [producer | _] = info} -> 539 | :ok = terminate_children(%{pid => info}, state) 540 | {:reply, :ok, [], delete_child_and_maybe_ask(producer, pid, state)} 541 | 542 | %{^pid => {:restarting, [producer | _]} = info} -> 543 | :ok = terminate_children(%{pid => info}, state) 544 | {:reply, :ok, [], delete_child_and_maybe_ask(producer, pid, state)} 545 | 546 | %{} -> 547 | {:reply, {:error, :not_found}, [], state} 548 | end 549 | end 550 | 551 | def handle_call({:start_child, extra}, _from, %{template: child} = state) do 552 | handle_start_child(child, extra, state) 553 | end 554 | 555 | defp handle_start_child({_, {m, f, args}, restart, _, _, _}, extra, state) do 556 | args = args ++ extra 557 | 558 | case reply = start_child(m, f, args) do 559 | {:ok, pid, _} -> 560 | {:reply, reply, [], save_child(restart, :dynamic, pid, args, state)} 561 | 562 | {:ok, pid} -> 563 | {:reply, reply, [], save_child(restart, :dynamic, pid, args, state)} 564 | 565 | _ -> 566 | {:reply, reply, [], state} 567 | end 568 | end 569 | 570 | defp start_child(m, f, a) do 571 | try do 572 | apply(m, f, a) 573 | catch 574 | kind, reason -> 575 | {:error, exit_reason(kind, reason, __STACKTRACE__)} 576 | else 577 | {:ok, pid, extra} when is_pid(pid) -> {:ok, pid, extra} 578 | {:ok, pid} when is_pid(pid) -> {:ok, pid} 579 | :ignore -> :ignore 580 | {:error, _} = error -> error 581 | other -> {:error, other} 582 | end 583 | end 584 | 585 | defp save_child(:temporary, producer, pid, _, state), 586 | do: put_in(state.children[pid], [producer | :undefined]) 587 | 588 | defp save_child(_, producer, pid, args, state), 589 | do: put_in(state.children[pid], [producer | args]) 590 | 591 | defp exit_reason(:exit, reason, _), do: reason 592 | defp exit_reason(:error, reason, stack), do: {reason, stack} 593 | defp exit_reason(:throw, value, stack), do: {{:nocatch, value}, stack} 594 | 595 | @impl true 596 | def handle_cast(_msg, state) do 597 | {:noreply, [], state} 598 | end 599 | 600 | @impl true 601 | def handle_info({:EXIT, pid, reason}, state) do 602 | case maybe_restart_child(pid, reason, state) do 603 | {:ok, state} -> {:noreply, [], state} 604 | {:shutdown, state} -> {:stop, :shutdown, state} 605 | end 606 | end 607 | 608 | def handle_info({:"$gen_restart", pid}, state) do 609 | %{children: children, template: child, restarting: restarting} = state 610 | state = %{state | restarting: restarting - 1} 611 | 612 | case children do 613 | %{^pid => restarting_args} -> 614 | {:restarting, [producer | args]} = restarting_args 615 | 616 | case restart_child(producer, pid, args, child, state) do 617 | {:ok, state} -> 618 | {:noreply, [], state} 619 | 620 | {:shutdown, state} -> 621 | {:stop, :shutdown, state} 622 | end 623 | 624 | # We may hit clause if we send $gen_restart and then 625 | # someone calls terminate_child, removing the child. 626 | %{} -> 627 | {:noreply, [], state} 628 | end 629 | end 630 | 631 | def handle_info(msg, state) do 632 | :error_logger.error_msg(~c"ConsumerSupervisor received unexpected message: ~tp~n", [msg]) 633 | {:noreply, [], state} 634 | end 635 | 636 | @impl true 637 | def code_change(_, %{mod: mod, args: args} = state, _) do 638 | case mod.init(args) do 639 | {:ok, children, opts} -> 640 | case validate_specs(children) do 641 | :ok -> 642 | case init(state, children, opts) do 643 | {:ok, state, _} -> {:ok, state} 644 | {:error, message} -> {:error, {:bad_opts, message}} 645 | end 646 | 647 | {:error, message} -> 648 | {:error, {:bad_specs, message}} 649 | end 650 | 651 | :ignore -> 652 | {:ok, state} 653 | 654 | error -> 655 | error 656 | end 657 | end 658 | 659 | @impl true 660 | def terminate(_, %{children: children} = state) do 661 | :ok = terminate_children(children, state) 662 | end 663 | 664 | defp terminate_children(children, %{template: template} = state) do 665 | {_, _, restart, shutdown, _, _} = template 666 | 667 | {pids, stacks} = monitor_children(children, restart) 668 | size = map_size(pids) 669 | 670 | stacks = 671 | case shutdown do 672 | :brutal_kill -> 673 | for {pid, _} <- pids, do: Process.exit(pid, :kill) 674 | wait_children(restart, shutdown, pids, size, nil, stacks) 675 | 676 | :infinity -> 677 | for {pid, _} <- pids, do: Process.exit(pid, :shutdown) 678 | wait_children(restart, shutdown, pids, size, nil, stacks) 679 | 680 | time -> 681 | for {pid, _} <- pids, do: Process.exit(pid, :shutdown) 682 | timer = :erlang.start_timer(time, self(), :kill) 683 | wait_children(restart, shutdown, pids, size, timer, stacks) 684 | end 685 | 686 | for {pid, reason} <- stacks do 687 | report_error(:shutdown_error, reason, pid, :undefined, template, state) 688 | end 689 | 690 | :ok 691 | end 692 | 693 | defp monitor_children(children, restart) do 694 | Enum.reduce(children, {%{}, %{}}, fn 695 | {_, {:restarting, _}}, {pids, stacks} -> 696 | {pids, stacks} 697 | 698 | {pid, _}, {pids, stacks} -> 699 | case monitor_child(pid) do 700 | :ok -> 701 | {Map.put(pids, pid, true), stacks} 702 | 703 | {:error, :normal} when restart != :permanent -> 704 | {pids, stacks} 705 | 706 | {:error, reason} -> 707 | {pids, Map.put(stacks, pid, reason)} 708 | end 709 | end) 710 | end 711 | 712 | defp monitor_child(pid) do 713 | ref = Process.monitor(pid) 714 | Process.unlink(pid) 715 | 716 | receive do 717 | {:EXIT, ^pid, reason} -> 718 | receive do 719 | {:DOWN, ^ref, :process, ^pid, _} -> {:error, reason} 720 | end 721 | after 722 | 0 -> :ok 723 | end 724 | end 725 | 726 | defp wait_children(_restart, _shutdown, _pids, 0, nil, stacks) do 727 | stacks 728 | end 729 | 730 | defp wait_children(_restart, _shutdown, _pids, 0, timer, stacks) do 731 | _ = :erlang.cancel_timer(timer) 732 | 733 | receive do 734 | {:timeout, ^timer, :kill} -> :ok 735 | after 736 | 0 -> :ok 737 | end 738 | 739 | stacks 740 | end 741 | 742 | defp wait_children(restart, :brutal_kill, pids, size, timer, stacks) do 743 | receive do 744 | {:DOWN, _ref, :process, pid, :killed} -> 745 | wait_children(restart, :brutal_kill, Map.delete(pids, pid), size - 1, timer, stacks) 746 | 747 | {:DOWN, _ref, :process, pid, reason} -> 748 | wait_children( 749 | restart, 750 | :brutal_kill, 751 | Map.delete(pids, pid), 752 | size - 1, 753 | timer, 754 | Map.put(stacks, pid, reason) 755 | ) 756 | end 757 | end 758 | 759 | defp wait_children(restart, shutdown, pids, size, timer, stacks) do 760 | receive do 761 | {:DOWN, _ref, :process, pid, {:shutdown, _}} -> 762 | wait_children(restart, shutdown, Map.delete(pids, pid), size - 1, timer, stacks) 763 | 764 | {:DOWN, _ref, :process, pid, :shutdown} -> 765 | wait_children(restart, shutdown, Map.delete(pids, pid), size - 1, timer, stacks) 766 | 767 | {:DOWN, _ref, :process, pid, :normal} when restart != :permanent -> 768 | wait_children(restart, shutdown, Map.delete(pids, pid), size - 1, timer, stacks) 769 | 770 | {:DOWN, _ref, :process, pid, reason} -> 771 | stacks = Map.put(stacks, pid, reason) 772 | wait_children(restart, shutdown, Map.delete(pids, pid), size - 1, timer, stacks) 773 | 774 | {:timeout, ^timer, :kill} -> 775 | for {pid, _} <- pids, do: Process.exit(pid, :kill) 776 | wait_children(restart, shutdown, pids, size, nil, stacks) 777 | end 778 | end 779 | 780 | defp maybe_restart_child(pid, reason, state) do 781 | %{children: children, template: child} = state 782 | {_, _, restart, _, _, _} = child 783 | 784 | case children do 785 | %{^pid => [producer | args]} -> 786 | maybe_restart_child(restart, reason, producer, pid, args, child, state) 787 | 788 | %{} -> 789 | {:ok, state} 790 | end 791 | end 792 | 793 | defp maybe_restart_child(:permanent, reason, producer, pid, args, child, state) do 794 | report_error(:child_terminated, reason, pid, args, child, state) 795 | restart_child(producer, pid, args, child, state) 796 | end 797 | 798 | defp maybe_restart_child(_, :normal, producer, pid, _args, _child, state) do 799 | {:ok, delete_child_and_maybe_ask(producer, pid, state)} 800 | end 801 | 802 | defp maybe_restart_child(_, :shutdown, producer, pid, _args, _child, state) do 803 | {:ok, delete_child_and_maybe_ask(producer, pid, state)} 804 | end 805 | 806 | defp maybe_restart_child(_, {:shutdown, _}, producer, pid, _args, _child, state) do 807 | {:ok, delete_child_and_maybe_ask(producer, pid, state)} 808 | end 809 | 810 | defp maybe_restart_child(:transient, reason, producer, pid, args, child, state) do 811 | report_error(:child_terminated, reason, pid, args, child, state) 812 | restart_child(producer, pid, args, child, state) 813 | end 814 | 815 | defp maybe_restart_child(:temporary, reason, producer, pid, args, child, state) do 816 | report_error(:child_terminated, reason, pid, args, child, state) 817 | {:ok, delete_child_and_maybe_ask(producer, pid, state)} 818 | end 819 | 820 | defp delete_child_and_maybe_ask(:dynamic, pid, %{children: children} = state) do 821 | %{state | children: Map.delete(children, pid)} 822 | end 823 | 824 | defp delete_child_and_maybe_ask(ref, pid, %{children: children} = state) do 825 | children = Map.delete(children, pid) 826 | maybe_ask(ref, pid, 0, 1, children, state) 827 | end 828 | 829 | defp restart_child(producer, pid, args, child, state) do 830 | case add_restart(state) do 831 | {:ok, %{strategy: strategy} = state} -> 832 | case restart_child(strategy, producer, pid, args, child, state) do 833 | {:ok, state} -> 834 | {:ok, state} 835 | 836 | {:try_again, state} -> 837 | send(self(), {:"$gen_restart", pid}) 838 | {:ok, state} 839 | end 840 | 841 | {:shutdown, state} -> 842 | report_error(:shutdown, :reached_max_restart_intensity, pid, args, child, state) 843 | {:shutdown, delete_child_and_maybe_ask(producer, pid, state)} 844 | end 845 | end 846 | 847 | defp add_restart(state) do 848 | %{max_seconds: max_seconds, max_restarts: max_restarts, restarts: restarts} = state 849 | now = :erlang.monotonic_time(1) 850 | restarts = add_restart([now | restarts], now, max_seconds) 851 | state = %{state | restarts: restarts} 852 | 853 | if length(restarts) <= max_restarts do 854 | {:ok, state} 855 | else 856 | {:shutdown, state} 857 | end 858 | end 859 | 860 | defp add_restart(restarts, now, period) do 861 | for then <- restarts, now <= then + period, do: then 862 | end 863 | 864 | defp restart_child(:one_for_one, producer, current_pid, args, child, state) do 865 | {_, {m, f, _}, restart, _, _, _} = child 866 | 867 | case start_child(m, f, args) do 868 | {:ok, pid, _} -> 869 | state = %{state | children: Map.delete(state.children, current_pid)} 870 | {:ok, save_child(restart, producer, pid, args, state)} 871 | 872 | {:ok, pid} -> 873 | state = %{state | children: Map.delete(state.children, current_pid)} 874 | {:ok, save_child(restart, producer, pid, args, state)} 875 | 876 | :ignore -> 877 | {:ok, delete_child_and_maybe_ask(producer, current_pid, state)} 878 | 879 | {:error, reason} -> 880 | report_error(:start_error, reason, {:restarting, current_pid}, args, child, state) 881 | state = restart_child(current_pid, state) 882 | {:try_again, update_in(state.restarting, &(&1 + 1))} 883 | end 884 | end 885 | 886 | defp restart_child(pid, %{children: children} = state) do 887 | case children do 888 | %{^pid => {:restarting, _}} -> 889 | state 890 | 891 | %{^pid => info} -> 892 | %{state | children: Map.put(children, pid, {:restarting, info})} 893 | end 894 | end 895 | 896 | defp report_error(error, reason, pid, args, child, %{name: name}) do 897 | :error_logger.error_report( 898 | :supervisor_report, 899 | supervisor: name, 900 | errorContext: error, 901 | reason: reason, 902 | offender: extract_child(pid, args, child) 903 | ) 904 | end 905 | 906 | defp extract_child(pid, args, {id, {m, f, _}, restart, shutdown, type, _}) do 907 | [ 908 | pid: pid, 909 | id: id, 910 | mfargs: {m, f, args}, 911 | restart_type: restart, 912 | shutdown: shutdown, 913 | child_type: type 914 | ] 915 | end 916 | 917 | @impl true 918 | def format_status(:terminate, [_pdict, state]) do 919 | state 920 | end 921 | 922 | def format_status(_, [_pdict, %{mod: mod} = state]) do 923 | [ 924 | data: [{~c"State", state}], 925 | supervisor: [{~c"Callback", mod}] 926 | ] 927 | end 928 | 929 | defp normalize_template(%{id: id, start: {mod, _, _} = start} = child), 930 | do: { 931 | id, 932 | start, 933 | Map.get(child, :restart, :permanent), 934 | Map.get(child, :shutdown, 5_000), 935 | Map.get(child, :type, :worker), 936 | Map.get(child, :modules, [mod]) 937 | } 938 | 939 | defp normalize_template({_, _, _, _, _, _} = child), do: child 940 | 941 | defp validate_template({_, _, :permanent, _, _, _}) do 942 | error = """ 943 | a child specification with :restart set to :permanent \ 944 | is not supported in ConsumerSupervisor 945 | 946 | Set the :restart option either to :temporary, so children \ 947 | spawned from events are never restarted, or :transient, \ 948 | so they are restarted only on abnormal exits 949 | """ 950 | 951 | {:error, error} 952 | end 953 | 954 | defp validate_template({_, _, _, _, _, _}) do 955 | :ok 956 | end 957 | end 958 | -------------------------------------------------------------------------------- /lib/gen_stage/buffer.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Buffer do 2 | # The buffer stores temporary, which is implicitly discarded, 3 | # and permanent data, which are explicitly discarded. 4 | # 5 | # Data is always delivered in the order they are buffered. 6 | # The temporary data is stored in a queue. Permanent data 7 | # is stored in a wheel for performance and to avoid discards. 8 | @moduledoc false 9 | 10 | @opaque t() :: {:queue.queue(), non_neg_integer(), wheel()} 11 | @typep wheel() :: {non_neg_integer(), pos_integer(), map()} | pos_integer() | reference() 12 | 13 | @doc """ 14 | Builds a new buffer. 15 | """ 16 | def new(size) when size > 0 do 17 | {:queue.new(), 0, init_wheel(size)} 18 | end 19 | 20 | @doc """ 21 | Returns the estimate size of the buffer data. 22 | 23 | It does not count data on the wheel. 24 | """ 25 | def estimate_size({_, count, _}) do 26 | count 27 | end 28 | 29 | @doc """ 30 | Stores the temporary entries. 31 | 32 | `keep` controls which side to keep, `:first` or `:last`. 33 | 34 | It returns a new buffer, the amount of discarded messages and 35 | any permanent entry that had to be emitted while discarding. 36 | """ 37 | def store_temporary({queue, counter, infos}, temps, keep) when is_list(temps) do 38 | {{excess, queue, counter}, perms, infos} = 39 | store_temporary(keep, temps, queue, counter, capacity_wheel(infos), infos) 40 | 41 | {{queue, counter, infos}, excess, perms} 42 | end 43 | 44 | defp store_temporary(_keep, temps, _queue, 0, :infinity, infos), 45 | do: {{0, :queue.from_list(temps), length(temps)}, [], infos} 46 | 47 | defp store_temporary(_keep, temps, queue, counter, :infinity, infos), 48 | do: {queue_infinity(temps, queue, counter), [], infos} 49 | 50 | defp store_temporary(:first, temps, queue, counter, max, infos), 51 | do: {queue_first(temps, queue, counter, max), [], infos} 52 | 53 | defp store_temporary(:last, temps, queue, counter, max, infos), 54 | do: queue_last(temps, queue, 0, counter, max, [], infos) 55 | 56 | ## Infinity 57 | 58 | defp queue_infinity([], queue, counter), 59 | do: {0, queue, counter} 60 | 61 | defp queue_infinity([temp | temps], queue, counter), 62 | do: queue_infinity(temps, :queue.in(temp, queue), counter + 1) 63 | 64 | ## First 65 | 66 | defp queue_first([], queue, counter, _max), 67 | do: {0, queue, counter} 68 | 69 | defp queue_first(temps, queue, max, max), 70 | do: {length(temps), queue, max} 71 | 72 | defp queue_first([temp | temps], queue, counter, max), 73 | do: queue_first(temps, :queue.in(temp, queue), counter + 1, max) 74 | 75 | ## Last 76 | 77 | defp queue_last([], queue, excess, counter, _max, perms, wheel), 78 | do: {{excess, queue, counter}, perms, wheel} 79 | 80 | defp queue_last([temp | temps], queue, excess, max, max, perms, wheel) do 81 | queue = :queue.in(temp, :queue.drop(queue)) 82 | 83 | case pop_and_increment_wheel(wheel) do 84 | {:ok, new_perms, wheel} -> 85 | queue_last(temps, queue, excess + 1, max, max, perms ++ new_perms, wheel) 86 | 87 | {:error, wheel} -> 88 | queue_last(temps, queue, excess + 1, max, max, perms, wheel) 89 | end 90 | end 91 | 92 | defp queue_last([temp | temps], queue, excess, counter, max, perms, wheel), 93 | do: queue_last(temps, :queue.in(temp, queue), excess, counter + 1, max, perms, wheel) 94 | 95 | @doc """ 96 | Puts the permanent entry in the buffer unless the buffer is empty. 97 | """ 98 | def store_permanent_unless_empty(buffer, perm) do 99 | case buffer do 100 | {_queue, 0, _infos} -> 101 | :empty 102 | 103 | {queue, count, infos} when is_reference(infos) -> 104 | {:ok, {:queue.in({infos, perm}, queue), count + 1, infos}} 105 | 106 | {queue, count, infos} -> 107 | {:ok, {queue, count, put_wheel(infos, count, perm)}} 108 | end 109 | end 110 | 111 | @doc """ 112 | Take count temporary from the buffer or until we find a permanent. 113 | 114 | Returns `:empty` if nothing was taken. 115 | """ 116 | def take_count_or_until_permanent({_queue, buffer, _infos}, counter) 117 | when buffer == 0 or counter == 0 do 118 | :empty 119 | end 120 | 121 | def take_count_or_until_permanent({queue, buffer, infos}, counter) do 122 | take_count_or_until_permanent(counter, [], queue, buffer, infos) 123 | end 124 | 125 | defp take_count_or_until_permanent(0, temps, queue, buffer, infos) do 126 | {:ok, {queue, buffer, infos}, 0, :lists.reverse(temps), []} 127 | end 128 | 129 | defp take_count_or_until_permanent(counter, temps, queue, 0, infos) do 130 | {:ok, {queue, 0, infos}, counter, :lists.reverse(temps), []} 131 | end 132 | 133 | defp take_count_or_until_permanent(counter, temps, queue, buffer, infos) 134 | when is_reference(infos) do 135 | {{:value, value}, queue} = :queue.out(queue) 136 | 137 | case value do 138 | {^infos, perm} -> 139 | {:ok, {queue, buffer - 1, infos}, counter, :lists.reverse(temps), [perm]} 140 | 141 | temp -> 142 | take_count_or_until_permanent(counter - 1, [temp | temps], queue, buffer - 1, infos) 143 | end 144 | end 145 | 146 | defp take_count_or_until_permanent(counter, temps, queue, buffer, infos) do 147 | {{:value, temp}, queue} = :queue.out(queue) 148 | 149 | case pop_and_increment_wheel(infos) do 150 | {:ok, perms, infos} -> 151 | {:ok, {queue, buffer - 1, infos}, counter - 1, :lists.reverse([temp | temps]), perms} 152 | 153 | {:error, infos} -> 154 | take_count_or_until_permanent(counter - 1, [temp | temps], queue, buffer - 1, infos) 155 | end 156 | end 157 | 158 | ## Wheel helpers 159 | 160 | defp init_wheel(:infinity), do: make_ref() 161 | defp init_wheel(size), do: size 162 | 163 | defp capacity_wheel(ref) when is_reference(ref), do: :infinity 164 | defp capacity_wheel({_, max, _}), do: max 165 | defp capacity_wheel(max), do: max 166 | 167 | defp put_wheel({pos, max, wheel}, count, perm) do 168 | {pos, max, Map.update(wheel, rem(pos + count - 1, max), [perm], &[perm | &1])} 169 | end 170 | 171 | defp put_wheel(max, count, perm) do 172 | {0, max, %{rem(count - 1, max) => [perm]}} 173 | end 174 | 175 | defp pop_and_increment_wheel({pos, max, wheel}) do 176 | new_pos = rem(pos + 1, max) 177 | 178 | case :maps.take(pos, wheel) do 179 | {perms, wheel} -> 180 | maybe_triplet = if wheel == %{}, do: max, else: {new_pos, max, wheel} 181 | {:ok, perms, maybe_triplet} 182 | 183 | :error -> 184 | {:error, {new_pos, max, wheel}} 185 | end 186 | end 187 | 188 | defp pop_and_increment_wheel(max) do 189 | {:error, max} 190 | end 191 | end 192 | -------------------------------------------------------------------------------- /lib/gen_stage/dispatcher.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Dispatcher do 2 | @moduledoc """ 3 | This module defines the behaviour used by `:producer` and 4 | `:producer_consumer` to dispatch events. 5 | 6 | When using a `:producer` or `:producer_consumer`, the dispatcher 7 | may be configured on init as follows: 8 | 9 | {:producer, state, dispatcher: GenStage.BroadcastDispatcher} 10 | 11 | Some dispatchers may require options to be given on initialization, 12 | those can be done with a tuple: 13 | 14 | {:producer, state, dispatcher: {GenStage.PartitionDispatcher, partitions: 0..3}} 15 | 16 | Elixir ships with the following dispatcher implementations: 17 | 18 | * `GenStage.DemandDispatcher` - dispatches the given batch of 19 | events to the consumer with the biggest demand in a FIFO 20 | ordering. This is the default dispatcher. 21 | 22 | * `GenStage.BroadcastDispatcher` - dispatches all events to all 23 | consumers. The demand is only sent upstream once all consumers 24 | ask for data. 25 | 26 | * `GenStage.PartitionDispatcher` - dispatches all events to a 27 | fixed amount of consumers that works as partitions according 28 | to a hash function. 29 | 30 | > ### Dispatcher State {: .info } 31 | > 32 | > Note that the Dispatcher state is stored separately from the state of the 33 | > `GenStage` itself and neither side will have direct access to the state of 34 | > the other. 35 | 36 | """ 37 | 38 | @typedoc "Options used by `init/1`" 39 | @type options :: keyword 40 | 41 | @doc """ 42 | Called on initialization with the options given on `c:GenStage.init/1`. 43 | """ 44 | @callback init(opts :: options) :: {:ok, state} when state: any 45 | 46 | @doc """ 47 | Called every time the producer gets a new subscriber. 48 | """ 49 | @callback subscribe(opts :: keyword(), from :: {pid, reference}, state :: term) :: 50 | {:ok, demand :: non_neg_integer, new_state} | {:error, term} 51 | when new_state: term 52 | 53 | @doc """ 54 | Called every time a subscription is cancelled or the consumer goes down. 55 | 56 | It is guaranteed the reference given in `from` points to a reference 57 | previously given in subscribe. 58 | """ 59 | @callback cancel(from :: {pid, reference}, state :: term) :: 60 | {:ok, demand :: non_neg_integer, new_state} 61 | when new_state: term 62 | 63 | @doc """ 64 | Called every time a consumer sends demand. 65 | 66 | The demand will always be a positive integer (more than 0). 67 | This callback must return the `actual_demand` as part of its 68 | return tuple. The returned demand is then sent to producers. 69 | 70 | It is guaranteed the reference given in `from` points to a 71 | reference previously given in subscribe. 72 | """ 73 | @callback ask(demand :: pos_integer, from :: {pid, reference}, state :: term) :: 74 | {:ok, actual_demand :: non_neg_integer, new_state} 75 | when new_state: term 76 | 77 | @doc """ 78 | Called every time a producer wants to dispatch an event. 79 | 80 | The events will always be a non empty list. This callback may 81 | receive more events than previously asked and therefore must 82 | return events it cannot not effectively deliver as part of its 83 | return tuple. Any `leftover_events` will be stored by producers 84 | in their buffer. 85 | 86 | It is important to emphasize that `leftover_events` can happen 87 | in any dispatcher implementation. After all, a consumer can 88 | subscribe, ask for events and crash. Eventually the events 89 | the consumer asked will be delivered while the consumer no longer 90 | exists, meaning they must be returned as left_over events until 91 | another consumer subscribes. 92 | 93 | This callback is responsible for sending events to consumer 94 | stages. In order to do so, you must store a `from` value from a 95 | previous `ask/3` callback. 96 | 97 | It is recommended for these events to be sent with `Process.send/3` 98 | and the `[:noconnect]` option as the consumers are all monitored 99 | by the producer. For example: 100 | 101 | Process.send(consumer, {:"$gen_consumer", {self(), consumer_ref}, events}, [:noconnect]) 102 | 103 | """ 104 | @callback dispatch(events :: nonempty_list(term), length :: pos_integer, state :: term) :: 105 | {:ok, leftover_events :: [term], new_state} 106 | when new_state: term 107 | 108 | @doc """ 109 | Used to send an info message to the current process. 110 | 111 | In case the dispatcher is doing buffering, the message must 112 | only be sent after all currently buffered consumer messages are 113 | delivered. 114 | """ 115 | @callback info(msg :: term, state :: term) :: {:ok, new_state} when new_state: term 116 | end 117 | -------------------------------------------------------------------------------- /lib/gen_stage/dispatchers/broadcast_dispatcher.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.BroadcastDispatcher do 2 | @moduledoc ~S""" 3 | A dispatcher that accumulates demand from all consumers 4 | before broadcasting events to all of them. 5 | 6 | This dispatcher guarantees that events are dispatched to all 7 | consumers without exceeding the demand of any given consumer. 8 | 9 | ## The `:selector` option 10 | 11 | If a producer uses `GenStage.BroadcastDispatcher`, its subscribers 12 | can specify an optional `:selector` function that receives the event 13 | and returns a boolean in the subscription options. 14 | 15 | Assume `producer` and `consumer` are stages exchanging events of type 16 | `%{:key => String.t, any => any}`, then by calling 17 | 18 | GenStage.sync_subscribe(consumer, 19 | to: producer, 20 | selector: fn %{key: key} -> String.starts_with?(key, "foo-") end) 21 | 22 | `consumer` will receive only the events broadcast from `producer` 23 | for which the selector function returns a truthy value. 24 | 25 | The `:selector` option can be specified in sync and async subscriptions, 26 | as well as in the `:subscribe_to` list in the return tuple of 27 | `c:GenStage.init/1`. For example: 28 | 29 | def init(:ok) do 30 | {:consumer, :ok, subscribe_to: 31 | [{producer, selector: fn %{key: key} -> String.starts_with?(key, "foo-") end}]} 32 | end 33 | 34 | ## Demand while setting up 35 | 36 | ``` 37 | [Producer Consumer 1] 38 | / \ 39 | [Producer] - - [Consumer] 40 | \ / 41 | [Producer Consumer 2] 42 | ``` 43 | 44 | When starting `Producer Consumer 1` before `Producer Consumer 2` (or even 45 | regular consumers), it is the first batch of events is only delivered to 46 | `Producer Consumer 1` since `Producer Consummer 2` is not registered yet. 47 | 48 | It is therefore recommended to start the producer with 49 | `{:producer, state, demand: :accumulate}`, which pauses demand in the producers, 50 | and after all stages have been initialized, call `GenStage.demand/2` to resume 51 | the producer. 52 | """ 53 | 54 | @behaviour GenStage.Dispatcher 55 | 56 | require Logger 57 | 58 | @doc false 59 | def init(_opts) do 60 | {:ok, {[], 0, MapSet.new()}} 61 | end 62 | 63 | @doc false 64 | def info(msg, state) do 65 | send(self(), msg) 66 | {:ok, state} 67 | end 68 | 69 | @doc false 70 | def subscribe(opts, {pid, ref}, {demands, waiting, subscribed_processes}) do 71 | selector = validate_selector(opts) 72 | 73 | if subscribed?(subscribed_processes, pid) do 74 | Logger.error(fn -> 75 | "#{inspect(pid)} is already registered with #{inspect(self())}. " <> 76 | "This subscription has been discarded." 77 | end) 78 | 79 | {:error, :already_subscribed} 80 | else 81 | subscribed_processes = add_subscriber(subscribed_processes, pid) 82 | demands = adjust_demand(-waiting, demands) 83 | {:ok, 0, {add_demand(0, pid, ref, selector, demands), 0, subscribed_processes}} 84 | end 85 | end 86 | 87 | @doc false 88 | def cancel({pid, ref}, {demands, waiting, subscribed_processes}) do 89 | subscribed_processes = delete_subscriber(subscribed_processes, pid) 90 | 91 | case delete_demand(ref, demands) do 92 | [] -> 93 | {:ok, 0, {[], 0, subscribed_processes}} 94 | 95 | demands -> 96 | # Since we may have removed the process we were waiting on, 97 | # cancellation may actually generate demand! 98 | new_min = get_min(demands) 99 | demands = adjust_demand(new_min, demands) 100 | {:ok, new_min, {demands, waiting + new_min, subscribed_processes}} 101 | end 102 | end 103 | 104 | @doc false 105 | def ask(counter, {pid, ref}, {demands, waiting, subscribed_processes}) do 106 | {current, selector, demands} = pop_demand(ref, demands) 107 | demands = add_demand(current + counter, pid, ref, selector, demands) 108 | new_min = get_min(demands) 109 | demands = adjust_demand(new_min, demands) 110 | {:ok, new_min, {demands, waiting + new_min, subscribed_processes}} 111 | end 112 | 113 | @doc false 114 | def dispatch(events, _length, {demands, 0, subscribed_processes}) do 115 | {:ok, events, {demands, 0, subscribed_processes}} 116 | end 117 | 118 | def dispatch(events, length, {demands, waiting, subscribed_processes}) do 119 | {deliver_now, deliver_later, waiting} = split_events(events, length, waiting) 120 | 121 | for {_, pid, ref, selector} <- demands do 122 | selected = 123 | case filter_and_count(deliver_now, selector) do 124 | {selected, 0} -> 125 | selected 126 | 127 | {selected, discarded} -> 128 | send(self(), {:"$gen_producer", {pid, ref}, {:ask, discarded}}) 129 | selected 130 | end 131 | 132 | Process.send(pid, {:"$gen_consumer", {self(), ref}, selected}, [:noconnect]) 133 | :ok 134 | end 135 | 136 | {:ok, deliver_later, {demands, waiting, subscribed_processes}} 137 | end 138 | 139 | defp filter_and_count(messages, nil) do 140 | {messages, 0} 141 | end 142 | 143 | defp filter_and_count(messages, selector) do 144 | filter_and_count(messages, selector, [], 0) 145 | end 146 | 147 | defp filter_and_count([message | messages], selector, acc, count) do 148 | if selector.(message) do 149 | filter_and_count(messages, selector, [message | acc], count) 150 | else 151 | filter_and_count(messages, selector, acc, count + 1) 152 | end 153 | end 154 | 155 | defp filter_and_count([], _selector, acc, count) do 156 | {:lists.reverse(acc), count} 157 | end 158 | 159 | defp validate_selector(opts) do 160 | case Keyword.get(opts, :selector) do 161 | nil -> 162 | nil 163 | 164 | selector when is_function(selector, 1) -> 165 | selector 166 | 167 | other -> 168 | raise ArgumentError, 169 | ":selector option must be passed a unary function, got: #{inspect(other)}" 170 | end 171 | end 172 | 173 | defp get_min([]), do: 0 174 | 175 | defp get_min([{acc, _, _, _} | demands]), 176 | do: demands |> Enum.reduce(acc, fn {val, _, _, _}, acc -> min(val, acc) end) |> max(0) 177 | 178 | defp split_events(events, length, counter) when length <= counter do 179 | {events, [], counter - length} 180 | end 181 | 182 | defp split_events(events, _length, counter) do 183 | {now, later} = Enum.split(events, counter) 184 | {now, later, 0} 185 | end 186 | 187 | defp adjust_demand(0, demands) do 188 | demands 189 | end 190 | 191 | defp adjust_demand(min, demands) do 192 | Enum.map(demands, fn {counter, pid, key, selector} -> 193 | {counter - min, pid, key, selector} 194 | end) 195 | end 196 | 197 | defp add_demand(counter, pid, ref, selector, demands) 198 | when is_integer(counter) and is_pid(pid) and (is_nil(selector) or is_function(selector, 1)) do 199 | [{counter, pid, ref, selector} | demands] 200 | end 201 | 202 | defp pop_demand(ref, demands) do 203 | case List.keytake(demands, ref, 2) do 204 | {{current, _pid, ^ref, selector}, rest} -> {current, selector, rest} 205 | nil -> {0, nil, demands} 206 | end 207 | end 208 | 209 | defp delete_demand(ref, demands) do 210 | List.keydelete(demands, ref, 2) 211 | end 212 | 213 | defp add_subscriber(subscribed_processes, pid) do 214 | MapSet.put(subscribed_processes, pid) 215 | end 216 | 217 | defp delete_subscriber(subscribed_processes, pid) do 218 | MapSet.delete(subscribed_processes, pid) 219 | end 220 | 221 | defp subscribed?(subscribed_processes, pid) do 222 | MapSet.member?(subscribed_processes, pid) 223 | end 224 | end 225 | -------------------------------------------------------------------------------- /lib/gen_stage/dispatchers/demand_dispatcher.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.DemandDispatcher do 2 | @moduledoc """ 3 | A dispatcher that sends batches to the highest demand. 4 | 5 | This is the default dispatcher used by `GenStage`. In order 6 | to avoid greedy consumers, it is recommended that all consumers 7 | have exactly the same maximum demand. 8 | 9 | ## Options 10 | 11 | The demand dispatcher accepts the following options 12 | on initialization: 13 | 14 | * `:shuffle_demands_on_first_dispatch` - when `true`, shuffle the initial demands list 15 | which is constructed on subscription before first dispatch. It prevents overloading 16 | the first consumer on first dispatch. Defaults to `false`. 17 | 18 | * `:max_demand` - the maximum demand expected on `GenStage.ask/3`. 19 | Defaults to the first demand asked. 20 | 21 | ### Examples 22 | 23 | To start a producer with demands shuffled on first dispatch: 24 | 25 | {:producer, state, dispatcher: {GenStage.DemandDispatcher, shuffle_demands_on_first_dispatch: true}} 26 | """ 27 | 28 | @behaviour GenStage.Dispatcher 29 | 30 | @doc false 31 | def init(opts) do 32 | shuffle_demand = Keyword.get(opts, :shuffle_demands_on_first_dispatch, false) 33 | max_demand = Keyword.get(opts, :max_demand) 34 | 35 | {:ok, {[], 0, max_demand, shuffle_demand}} 36 | end 37 | 38 | @doc false 39 | def info(msg, state) do 40 | send(self(), msg) 41 | {:ok, state} 42 | end 43 | 44 | @doc false 45 | def subscribe(_opts, {pid, ref}, {demands, pending, max, shuffle_demand}) do 46 | {:ok, 0, {demands ++ [{0, pid, ref}], pending, max, shuffle_demand}} 47 | end 48 | 49 | @doc false 50 | def cancel({_, ref}, {demands, pending, max, shuffle_demand}) do 51 | {current, demands} = pop_demand(ref, demands) 52 | {:ok, 0, {demands, current + pending, max, shuffle_demand}} 53 | end 54 | 55 | @doc false 56 | def ask(counter, {pid, ref}, {demands, pending, max, shuffle_demand}) do 57 | max = max || counter 58 | 59 | if counter > max do 60 | warning = 61 | ~c"GenStage producer DemandDispatcher expects a maximum demand of ~tp. " ++ 62 | ~c"Using different maximum demands will overload greedy consumers. " ++ 63 | ~c"Got demand for ~tp events from ~tp~n" 64 | 65 | :error_logger.warning_msg(warning, [max, counter, pid]) 66 | end 67 | 68 | {current, demands} = pop_demand(ref, demands) 69 | demands = add_demand(current + counter, pid, ref, demands) 70 | 71 | already_sent = min(pending, counter) 72 | {:ok, counter - already_sent, {demands, pending - already_sent, max, shuffle_demand}} 73 | end 74 | 75 | @doc false 76 | def dispatch(events, length, {demands, pending, max, true}) do 77 | dispatch(events, length, {Enum.shuffle(demands), pending, max, false}) 78 | end 79 | 80 | def dispatch(events, length, {demands, pending, max, false}) do 81 | {events, demands} = dispatch_demand(events, length, demands) 82 | {:ok, events, {demands, pending, max, false}} 83 | end 84 | 85 | defp dispatch_demand([], _length, demands) do 86 | {[], demands} 87 | end 88 | 89 | defp dispatch_demand(events, _length, [{0, _, _} | _] = demands) do 90 | {events, demands} 91 | end 92 | 93 | defp dispatch_demand(events, length, [{counter, pid, ref} | demands]) do 94 | {deliver_now, deliver_later, length, counter} = split_events(events, length, counter) 95 | Process.send(pid, {:"$gen_consumer", {self(), ref}, deliver_now}, [:noconnect]) 96 | demands = add_demand(counter, pid, ref, demands) 97 | dispatch_demand(deliver_later, length, demands) 98 | end 99 | 100 | defp split_events(events, length, counter) when length <= counter do 101 | {events, [], 0, counter - length} 102 | end 103 | 104 | defp split_events(events, length, counter) do 105 | {now, later} = Enum.split(events, counter) 106 | {now, later, length - counter, 0} 107 | end 108 | 109 | defp add_demand(counter, pid, ref, [{current, _, _} | _] = demands) when counter > current do 110 | [{counter, pid, ref} | demands] 111 | end 112 | 113 | defp add_demand(counter, pid, ref, [demand | demands]) do 114 | [demand | add_demand(counter, pid, ref, demands)] 115 | end 116 | 117 | defp add_demand(counter, pid, ref, []) when is_integer(counter) do 118 | [{counter, pid, ref}] 119 | end 120 | 121 | defp pop_demand(ref, demands) do 122 | {{current, _pid, ^ref}, rest} = List.keytake(demands, ref, 2) 123 | {current, rest} 124 | end 125 | end 126 | -------------------------------------------------------------------------------- /lib/gen_stage/dispatchers/partition_dispatcher.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.PartitionDispatcher do 2 | @moduledoc """ 3 | A dispatcher that sends events according to partitions. 4 | 5 | This dispatcher assumes that partitions are *evenly distributed*. 6 | See the ["Even distribution"](#module-even-distribution) section for 7 | more information. 8 | 9 | When multiple consumers subscribe to one partition, the producer 10 | behaves like a `GenStage.DemandDispatcher` *within that partition*. 11 | 12 | ## Options 13 | 14 | The partition dispatcher accepts the following options 15 | on initialization: 16 | 17 | * `:partitions` - the number of partitions to dispatch to. It may be 18 | an integer with a total number of partitions, where each partition 19 | is named from 0 up to `integer - 1`. For example, `partitions: 4` 20 | will contain four partitions named `0`, `1`, `2` and `3`. 21 | 22 | It may also be an *enumerable* that specifies the name of each partition. 23 | For instance, `partitions: [:odd, :even]` will build two partitions, 24 | named `:odd` and `:even`. 25 | 26 | * `:hash` - the hashing algorithm. It's a function of type 27 | `t:hash_function/0`, which receives the event and returns a tuple with two 28 | elements: the event to be dispatched and the partition to dispatch it to. 29 | The function can also return `:none`, in which case the event 30 | is discarded. The partition must be one of the partitions specified in 31 | `:partitions` above. The default uses: 32 | 33 | fn event -> {event, :erlang.phash2(event, Enum.count(partitions))} end 34 | 35 | ### Examples 36 | 37 | To start a producer with four partitions named `0`, `1`, `2`, and `3`: 38 | 39 | {:producer, state, dispatcher: {GenStage.PartitionDispatcher, partitions: 0..3}} 40 | 41 | To start a producer with two partitions named `:odd` and `:even`: 42 | 43 | {:producer, state, dispatcher: {GenStage.PartitionDispatcher, partitions: [:odd, :even]}} 44 | 45 | ## Subscribe options 46 | 47 | When subscribing to a `GenStage` with a partition dispatcher the following 48 | option is required: 49 | 50 | * `:partition` - the name of the partition. The partition must be one of 51 | the partitions specified in `:partitions` above. 52 | 53 | ### Examples 54 | 55 | The partition function can be given either on `init`'s subscribe_to: 56 | 57 | {:consumer, :ok, subscribe_to: [{producer, partition: 0}]} 58 | 59 | Or when calling `sync_subscribe`: 60 | 61 | GenStage.sync_subscribe(consumer, to: producer, partition: 0) 62 | 63 | ## Even distribution 64 | 65 | This dispatcher assumes that partitions are *evenly distributed*. 66 | If the data is uneven for long periods of time, then you may 67 | buffer excessive data from busy partitions for long periods of 68 | time. This happens because the producer is unable to distinguish 69 | from which particular consumer/partition demand arrives. 70 | 71 | Let's see an example. Imagine you have three consumers, each 72 | for one partition: `A`, `B`, and `C`. 73 | 74 | Let's assume 60% of the data goes to `A`, 20% to `B`, and 20% to 75 | `C`. Let's also say that `max_demand` is `10` and `min_demand` is 76 | `5`. When the consumers initially request data (`10` events each), 77 | the producer receives a total demand of `30`. A will receive `18` of 78 | those (60%), while `B` and `C` receive `6` each (20%). After 79 | processing `5` events (the `min_demand`), each consumer requests 80 | additional `5` events, for a total of `15` additional events. At 81 | this point, that will be `9` additional elements for A, and 3 82 | additional elements for B and C. At the end of these two rounds, we 83 | will have: 84 | 85 | A = 18 - 5 + 9 = 22 events 86 | B = 6 - 5 + 3 = 4 events 87 | C = 6 - 5 + 3 = 4 events 88 | 89 | Furthermore, as B and C request more items, A will only go further 90 | behind. This behaviour is fine for spikes that should quickly 91 | resolve, but it can be problematic if the data is consistently uneven. 92 | """ 93 | 94 | @typedoc """ 95 | The type used for the function passed to the `:hash` option. 96 | """ 97 | @typedoc since: "1.2.0" 98 | @type hash_function :: (event :: any -> {event :: any, partition :: any} | :none) 99 | 100 | @behaviour GenStage.Dispatcher 101 | @init {nil, nil, 0} 102 | 103 | require Logger 104 | 105 | @doc false 106 | def init(opts) do 107 | partitions = 108 | case Keyword.get(opts, :partitions) do 109 | nil -> 110 | raise ArgumentError, 111 | "the enumerable of :partitions is required when using the partition dispatcher" 112 | 113 | partitions when is_integer(partitions) -> 114 | 0..(partitions - 1) 115 | 116 | partitions -> 117 | partitions 118 | end 119 | 120 | hash_present? = Keyword.has_key?(opts, :hash) 121 | 122 | partitions = 123 | for partition <- partitions, into: %{} do 124 | if not hash_present? and not is_integer(partition) do 125 | raise ArgumentError, 126 | "when :partitions contains partitions that are not integers, you have to pass " <> 127 | "in the :hash option as well" 128 | end 129 | 130 | Process.put(partition, []) 131 | {partition, @init} 132 | end 133 | 134 | size = map_size(partitions) 135 | hash = Keyword.get(opts, :hash, &hash(&1, size)) 136 | {:ok, {make_ref(), hash, 0, 0, partitions, %{}, %{}}} 137 | end 138 | 139 | defp hash(event, range) do 140 | {event, :erlang.phash2(event, range)} 141 | end 142 | 143 | @doc false 144 | def info(msg, {tag, hash, waiting, pending, partitions, references, infos}) do 145 | info = make_ref() 146 | 147 | {partitions, queued} = 148 | Enum.reduce(partitions, {partitions, []}, fn 149 | {partition, {pid, ref, queue}}, {partitions, queued} when not is_integer(queue) -> 150 | {Map.put(partitions, partition, {pid, ref, :queue.in({tag, info}, queue)}), 151 | [partition | queued]} 152 | 153 | _, {partitions, queued} -> 154 | {partitions, queued} 155 | end) 156 | 157 | infos = 158 | case queued do 159 | [] -> 160 | send(self(), msg) 161 | infos 162 | 163 | _ -> 164 | Map.put(infos, info, {msg, queued}) 165 | end 166 | 167 | {:ok, {tag, hash, waiting, pending, partitions, references, infos}} 168 | end 169 | 170 | @doc false 171 | def subscribe(opts, {pid, ref}, {tag, hash, waiting, pending, partitions, references, infos}) do 172 | partition = Keyword.get(opts, :partition) 173 | 174 | case partitions do 175 | %{^partition => {nil, nil, demand_or_queue}} -> 176 | partitions = Map.put(partitions, partition, {pid, ref, demand_or_queue}) 177 | references = Map.put(references, ref, partition) 178 | {:ok, 0, {tag, hash, waiting, pending, partitions, references, infos}} 179 | 180 | %{^partition => {pid, _, _}} -> 181 | raise ArgumentError, "the partition #{partition} is already taken by #{inspect(pid)}" 182 | 183 | _ when is_nil(partition) -> 184 | raise ArgumentError, 185 | "the :partition option is required when subscribing to a producer with partition dispatcher" 186 | 187 | _ -> 188 | keys = Map.keys(partitions) 189 | raise ArgumentError, ":partition must be one of #{inspect(keys)}, got: #{partition}" 190 | end 191 | end 192 | 193 | @doc false 194 | def cancel({_, ref}, {tag, hash, waiting, pending, partitions, references, infos}) do 195 | {partition, references} = Map.pop(references, ref) 196 | {_pid, _ref, demand_or_queue} = Map.get(partitions, partition) 197 | partitions = Map.put(partitions, partition, @init) 198 | 199 | case demand_or_queue do 200 | demand when is_integer(demand) -> 201 | {:ok, 0, {tag, hash, waiting, pending + demand, partitions, references, infos}} 202 | 203 | queue -> 204 | {length, infos} = clear_queue(queue, tag, partition, 0, infos) 205 | {:ok, length, {tag, hash, waiting + length, pending, partitions, references, infos}} 206 | end 207 | end 208 | 209 | @doc false 210 | def ask(counter, {_, ref}, {tag, hash, waiting, pending, partitions, references, infos}) do 211 | partition = Map.fetch!(references, ref) 212 | {pid, ref, demand_or_queue} = Map.fetch!(partitions, partition) 213 | 214 | {demand_or_queue, infos} = 215 | case demand_or_queue do 216 | demand when is_integer(demand) -> 217 | {demand + counter, infos} 218 | 219 | queue -> 220 | send_from_queue(queue, tag, pid, ref, partition, counter, [], infos) 221 | end 222 | 223 | partitions = Map.put(partitions, partition, {pid, ref, demand_or_queue}) 224 | already_sent = min(pending, counter) 225 | demand = counter - already_sent 226 | pending = pending - already_sent 227 | {:ok, demand, {tag, hash, waiting + demand, pending, partitions, references, infos}} 228 | end 229 | 230 | defp send_from_queue(queue, _tag, pid, ref, _partition, 0, acc, infos) do 231 | maybe_send(acc, pid, ref) 232 | {queue, infos} 233 | end 234 | 235 | defp send_from_queue(queue, tag, pid, ref, partition, counter, acc, infos) do 236 | case :queue.out(queue) do 237 | {{:value, {^tag, info}}, queue} -> 238 | maybe_send(acc, pid, ref) 239 | infos = maybe_info(infos, info, partition) 240 | send_from_queue(queue, tag, pid, ref, partition, counter, [], infos) 241 | 242 | {{:value, event}, queue} -> 243 | send_from_queue(queue, tag, pid, ref, partition, counter - 1, [event | acc], infos) 244 | 245 | {:empty, _queue} -> 246 | maybe_send(acc, pid, ref) 247 | {counter, infos} 248 | end 249 | end 250 | 251 | defp clear_queue(queue, tag, partition, counter, infos) do 252 | case :queue.out(queue) do 253 | {{:value, {^tag, info}}, queue} -> 254 | clear_queue(queue, tag, partition, counter, maybe_info(infos, info, partition)) 255 | 256 | {{:value, _}, queue} -> 257 | clear_queue(queue, tag, partition, counter + 1, infos) 258 | 259 | {:empty, _queue} -> 260 | {counter, infos} 261 | end 262 | end 263 | 264 | # Important: events must be in reverse order 265 | defp maybe_send([], _pid, _ref), do: :ok 266 | 267 | defp maybe_send(events, pid, ref), 268 | do: Process.send(pid, {:"$gen_consumer", {self(), ref}, :lists.reverse(events)}, [:noconnect]) 269 | 270 | defp maybe_info(infos, info, partition) do 271 | case infos do 272 | %{^info => {msg, [^partition]}} -> 273 | send(self(), msg) 274 | Map.delete(infos, info) 275 | 276 | %{^info => {msg, partitions}} -> 277 | Map.put(infos, info, {msg, List.delete(partitions, partition)}) 278 | end 279 | end 280 | 281 | @doc false 282 | def dispatch(events, _length, {tag, hash, waiting, pending, partitions, references, infos}) do 283 | {deliver_later, waiting} = split_events(events, waiting, hash, partitions) 284 | 285 | partitions = 286 | partitions 287 | |> :maps.to_list() 288 | |> dispatch_per_partition() 289 | |> :maps.from_list() 290 | 291 | {:ok, deliver_later, {tag, hash, waiting, pending, partitions, references, infos}} 292 | end 293 | 294 | defp split_events(events, 0, _hash, _partitions), do: {events, 0} 295 | defp split_events([], counter, _hash, _partitions), do: {[], counter} 296 | 297 | defp split_events([event | events], counter, hash, partitions) do 298 | case hash.(event) do 299 | {event, partition} -> 300 | case :erlang.get(partition) do 301 | :undefined -> 302 | raise "unknown partition #{inspect(partition)} computed for GenStage event " <> 303 | "#{inspect(event)}. The known partitions are #{inspect(Map.keys(partitions))}. " <> 304 | "See the :partitions option to set your own. This event has been discarded." 305 | 306 | current -> 307 | Process.put(partition, [event | current]) 308 | split_events(events, counter - 1, hash, partitions) 309 | end 310 | 311 | :none -> 312 | split_events(events, counter, hash, partitions) 313 | 314 | other -> 315 | raise "the :hash function should return {event, partition}, got: #{inspect(other)}" 316 | end 317 | end 318 | 319 | defp dispatch_per_partition([{partition, {pid, ref, demand_or_queue} = value} | rest]) do 320 | case Process.put(partition, []) do 321 | [] -> 322 | [{partition, value} | dispatch_per_partition(rest)] 323 | 324 | events -> 325 | events = :lists.reverse(events) 326 | 327 | {events, demand_or_queue} = 328 | case demand_or_queue do 329 | demand when is_integer(demand) -> 330 | split_into_queue(events, demand, []) 331 | 332 | queue -> 333 | {[], put_into_queue(events, queue)} 334 | end 335 | 336 | maybe_send(events, pid, ref) 337 | [{partition, {pid, ref, demand_or_queue}} | dispatch_per_partition(rest)] 338 | end 339 | end 340 | 341 | defp dispatch_per_partition([]) do 342 | [] 343 | end 344 | 345 | defp split_into_queue(events, 0, acc), do: {acc, put_into_queue(events, :queue.new())} 346 | defp split_into_queue([], counter, acc), do: {acc, counter} 347 | 348 | defp split_into_queue([event | events], counter, acc), 349 | do: split_into_queue(events, counter - 1, [event | acc]) 350 | 351 | defp put_into_queue(events, queue) do 352 | Enum.reduce(events, queue, &:queue.in/2) 353 | end 354 | end 355 | -------------------------------------------------------------------------------- /lib/gen_stage/stream.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Stream do 2 | @moduledoc false 3 | require GenStage.Utils, as: Utils 4 | 5 | def build(subscriptions, options) do 6 | subscriptions = :lists.map(&stream_validate_opts/1, subscriptions) 7 | 8 | Stream.resource( 9 | fn -> init_stream(subscriptions, options) end, 10 | &consume_stream/1, 11 | &close_stream/1 12 | ) 13 | end 14 | 15 | defp stream_validate_opts({to, opts}) when is_list(opts) do 16 | with {:ok, max, _} <- Utils.validate_integer(opts, :max_demand, 1000, 1, :infinity, false), 17 | {:ok, min, _} <- 18 | Utils.validate_integer(opts, :min_demand, div(max, 2), 0, max - 1, false), 19 | {:ok, cancel, _} <- 20 | Utils.validate_in(opts, :cancel, :permanent, [:temporary, :transient, :permanent]) do 21 | {to, cancel, min, max, opts} 22 | else 23 | {:error, message} -> 24 | raise ArgumentError, "invalid options for #{inspect(to)} producer (#{message})" 25 | end 26 | end 27 | 28 | defp stream_validate_opts(to) do 29 | stream_validate_opts({to, []}) 30 | end 31 | 32 | defp init_stream(subscriptions, options) do 33 | parent = self() 34 | demand = options[:demand] || :forward 35 | 36 | {monitor_pid, monitor_ref} = spawn_monitor(fn -> init_monitor(parent, demand) end) 37 | send(monitor_pid, {parent, monitor_ref}) 38 | send(monitor_pid, {monitor_ref, {:subscribe, subscriptions}}) 39 | 40 | receive do 41 | {:DOWN, ^monitor_ref, _, _, reason} -> 42 | exit(reason) 43 | 44 | {^monitor_ref, {:subscriptions, demand, subscriptions}} -> 45 | if producers = options[:producers] do 46 | for pid <- producers, do: GenStage.demand(pid, demand) 47 | else 48 | demand_stream_subscriptions(demand, subscriptions) 49 | end 50 | 51 | {:receive, monitor_pid, monitor_ref, subscriptions} 52 | end 53 | end 54 | 55 | defp demand_stream_subscriptions(demand, subscriptions) do 56 | Enum.each(subscriptions, fn {_, {:subscribed, pid, _, _, _, _}} -> 57 | GenStage.demand(pid, demand) 58 | end) 59 | end 60 | 61 | defp init_monitor(parent, demand) do 62 | parent_ref = Process.monitor(parent) 63 | 64 | receive do 65 | {:DOWN, ^parent_ref, _, _, reason} -> 66 | exit(reason) 67 | 68 | {^parent, monitor_ref} -> 69 | loop_monitor(parent, parent_ref, monitor_ref, demand, []) 70 | end 71 | end 72 | 73 | defp subscriptions_monitor(parent, monitor_ref, subscriptions) do 74 | fold_fun = fn {to, cancel, min, max, opts}, acc -> 75 | producer_pid = GenServer.whereis(to) 76 | 77 | cond do 78 | producer_pid != nil -> 79 | inner_ref = Process.monitor(producer_pid) 80 | from = {parent, {monitor_ref, inner_ref}} 81 | send_noconnect(producer_pid, {:"$gen_producer", from, {:subscribe, nil, opts}}) 82 | send_noconnect(producer_pid, {:"$gen_producer", from, {:ask, max}}) 83 | Map.put(acc, inner_ref, {:subscribed, producer_pid, cancel, min, max, max}) 84 | 85 | cancel == :permanent or cancel == :transient -> 86 | exit({:noproc, {GenStage, :init_stream, [subscriptions]}}) 87 | 88 | cancel == :temporary -> 89 | acc 90 | end 91 | end 92 | 93 | :lists.foldl(fold_fun, %{}, subscriptions) 94 | end 95 | 96 | defp loop_monitor(parent, parent_ref, monitor_ref, demand, keys) do 97 | receive do 98 | {^monitor_ref, {:subscribe, pairs}} -> 99 | subscriptions = subscriptions_monitor(parent, monitor_ref, pairs) 100 | send(parent, {monitor_ref, {:subscriptions, demand, subscriptions}}) 101 | loop_monitor(parent, parent_ref, monitor_ref, demand, Map.keys(subscriptions) ++ keys) 102 | 103 | {:DOWN, ^parent_ref, _, _, reason} -> 104 | exit(reason) 105 | 106 | {:DOWN, ref, _, _, reason} -> 107 | if ref in keys do 108 | send(parent, {monitor_ref, {:DOWN, ref, reason}}) 109 | end 110 | 111 | loop_monitor(parent, parent_ref, monitor_ref, demand, keys -- [ref]) 112 | end 113 | end 114 | 115 | defp cancel_monitor(monitor_pid, monitor_ref) do 116 | # Cancel the old ref and get a fresh one since 117 | # the monitor_ref may already have been received. 118 | Process.demonitor(monitor_ref, [:flush]) 119 | 120 | ref = Process.monitor(monitor_pid) 121 | Process.exit(monitor_pid, :kill) 122 | 123 | receive do 124 | {:DOWN, ^ref, _, _, _} -> 125 | flush_monitor(monitor_ref) 126 | end 127 | end 128 | 129 | defp flush_monitor(monitor_ref) do 130 | receive do 131 | {^monitor_ref, _} -> 132 | flush_monitor(monitor_ref) 133 | after 134 | 0 -> :ok 135 | end 136 | end 137 | 138 | defp consume_stream({:receive, monitor_pid, monitor_ref, subscriptions}) do 139 | receive_stream(monitor_pid, monitor_ref, subscriptions) 140 | end 141 | 142 | defp consume_stream({:ask, from, ask, batches, monitor_pid, monitor_ref, subscriptions}) do 143 | GenStage.ask(from, ask, [:noconnect]) 144 | deliver_stream(batches, from, monitor_pid, monitor_ref, subscriptions) 145 | end 146 | 147 | defp close_stream({:receive, monitor_pid, monitor_ref, subscriptions}) do 148 | request_to_cancel_stream(monitor_pid, monitor_ref, subscriptions) 149 | cancel_monitor(monitor_pid, monitor_ref) 150 | end 151 | 152 | defp close_stream({:ask, _, _, _, monitor_pid, monitor_ref, subscriptions}) do 153 | request_to_cancel_stream(monitor_pid, monitor_ref, subscriptions) 154 | cancel_monitor(monitor_pid, monitor_ref) 155 | end 156 | 157 | defp close_stream({:exit, reason, monitor_pid, monitor_ref, subscriptions}) do 158 | request_to_cancel_stream(monitor_pid, monitor_ref, subscriptions) 159 | cancel_monitor(monitor_pid, monitor_ref) 160 | exit({reason, {GenStage, :close_stream, [subscriptions]}}) 161 | end 162 | 163 | defp receive_stream(monitor_pid, monitor_ref, subscriptions) 164 | when map_size(subscriptions) == 0 do 165 | {:halt, {:receive, monitor_pid, monitor_ref, subscriptions}} 166 | end 167 | 168 | defp receive_stream(monitor_pid, monitor_ref, subscriptions) do 169 | receive do 170 | {:"$gen_consumer", {producer_pid, {^monitor_ref, inner_ref} = ref}, events} 171 | when is_list(events) -> 172 | case subscriptions do 173 | %{^inner_ref => {:subscribed, producer_pid, cancel, min, max, demand}} -> 174 | from = {producer_pid, ref} 175 | {demand, batches} = Utils.split_batches(events, from, min, max, demand) 176 | subscribed = {:subscribed, producer_pid, cancel, min, max, demand} 177 | 178 | deliver_stream( 179 | batches, 180 | from, 181 | monitor_pid, 182 | monitor_ref, 183 | Map.put(subscriptions, inner_ref, subscribed) 184 | ) 185 | 186 | %{^inner_ref => {:cancel, _}} -> 187 | # We received this message before the cancellation was processed 188 | receive_stream(monitor_pid, monitor_ref, subscriptions) 189 | 190 | _ -> 191 | # Cancel if messages are out of order or unknown 192 | msg = {:"$gen_producer", {self(), ref}, {:cancel, :unknown_subscription}} 193 | send_noconnect(producer_pid, msg) 194 | receive_stream(monitor_pid, monitor_ref, Map.delete(subscriptions, inner_ref)) 195 | end 196 | 197 | {:"$gen_consumer", {_, {^monitor_ref, inner_ref}}, {:cancel, reason}} -> 198 | cancel_stream(inner_ref, reason, monitor_pid, monitor_ref, subscriptions) 199 | 200 | {:"$gen_cast", {:"$subscribe", nil, to, opts}} -> 201 | send(monitor_pid, {monitor_ref, {:subscribe, [stream_validate_opts({to, opts})]}}) 202 | 203 | receive do 204 | {^monitor_ref, {:subscriptions, demand, new_subscriptions}} -> 205 | demand_stream_subscriptions(demand, new_subscriptions) 206 | receive_stream(monitor_pid, monitor_ref, Map.merge(subscriptions, new_subscriptions)) 207 | 208 | {^monitor_ref, {:DOWN, inner_ref, reason}} -> 209 | cancel_stream(inner_ref, reason, monitor_pid, monitor_ref, subscriptions) 210 | end 211 | 212 | {:DOWN, ^monitor_ref, _, _, reason} -> 213 | {:halt, {:exit, reason, monitor_pid, monitor_ref, subscriptions}} 214 | 215 | {^monitor_ref, {:DOWN, inner_ref, reason}} -> 216 | cancel_stream(inner_ref, reason, monitor_pid, monitor_ref, subscriptions) 217 | end 218 | end 219 | 220 | defp deliver_stream([], _from, monitor_pid, monitor_ref, subscriptions) do 221 | receive_stream(monitor_pid, monitor_ref, subscriptions) 222 | end 223 | 224 | defp deliver_stream([{events, ask} | batches], from, monitor_pid, monitor_ref, subscriptions) do 225 | {events, {:ask, from, ask, batches, monitor_pid, monitor_ref, subscriptions}} 226 | end 227 | 228 | defp request_to_cancel_stream(monitor_pid, monitor_ref, subscriptions) do 229 | fold_fun = fn inner_ref, tuple, acc -> 230 | request_to_cancel_stream(inner_ref, tuple, monitor_ref, acc) 231 | end 232 | 233 | subscriptions = :maps.fold(fold_fun, subscriptions, subscriptions) 234 | receive_stream(monitor_pid, monitor_ref, subscriptions) 235 | end 236 | 237 | defp request_to_cancel_stream(_ref, {:cancel, _}, _monitor_ref, subscriptions) do 238 | subscriptions 239 | end 240 | 241 | defp request_to_cancel_stream(inner_ref, tuple, monitor_ref, subscriptions) do 242 | process_pid = elem(tuple, 1) 243 | GenStage.cancel({process_pid, {monitor_ref, inner_ref}}, :normal, [:noconnect]) 244 | Map.put(subscriptions, inner_ref, {:cancel, process_pid}) 245 | end 246 | 247 | defp cancel_stream(inner_ref, reason, monitor_pid, monitor_ref, subscriptions) do 248 | case subscriptions do 249 | %{^inner_ref => {_, _, cancel, _, _, _}} 250 | when cancel == :permanent 251 | when cancel == :transient and not Utils.is_transient_shutdown(reason) -> 252 | Process.demonitor(inner_ref, [:flush]) 253 | {:halt, {:exit, reason, monitor_pid, monitor_ref, Map.delete(subscriptions, inner_ref)}} 254 | 255 | %{^inner_ref => _} -> 256 | Process.demonitor(inner_ref, [:flush]) 257 | receive_stream(monitor_pid, monitor_ref, Map.delete(subscriptions, inner_ref)) 258 | 259 | %{} -> 260 | receive_stream(monitor_pid, monitor_ref, subscriptions) 261 | end 262 | end 263 | 264 | defp send_noconnect(pid, msg) do 265 | Process.send(pid, msg, [:noconnect]) 266 | end 267 | end 268 | -------------------------------------------------------------------------------- /lib/gen_stage/streamer.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Streamer do 2 | @moduledoc false 3 | use GenStage 4 | 5 | def start_link({stream, opts}) do 6 | {:current_stacktrace, [_info_call | stack]} = Process.info(self(), :current_stacktrace) 7 | GenStage.start_link(__MODULE__, {stream, stack, opts}, opts) 8 | end 9 | 10 | def init({stream, stack, opts}) do 11 | continuation = 12 | &Enumerable.reduce(stream, &1, fn 13 | x, {acc, 1} -> {:suspend, {[x | acc], 0}} 14 | x, {acc, counter} -> {:cont, {[x | acc], counter - 1}} 15 | end) 16 | 17 | on_cancel = 18 | case Keyword.get(opts, :on_cancel, :continue) do 19 | :continue -> nil 20 | :stop -> %{} 21 | end 22 | 23 | {:producer, {stack, continuation, on_cancel}, Keyword.take(opts, [:dispatcher, :demand])} 24 | end 25 | 26 | def handle_subscribe(:consumer, _opts, {pid, ref}, {stack, continuation, on_cancel}) do 27 | if on_cancel do 28 | {:automatic, {stack, continuation, Map.put(on_cancel, ref, pid)}} 29 | else 30 | {:automatic, {stack, continuation, on_cancel}} 31 | end 32 | end 33 | 34 | def handle_cancel(_reason, {_, ref}, {stack, continuation, on_cancel}) do 35 | case on_cancel do 36 | %{^ref => _} when map_size(on_cancel) == 1 -> 37 | {:stop, :normal, {stack, continuation, Map.delete(on_cancel, ref)}} 38 | 39 | %{^ref => _} -> 40 | {:noreply, [], {stack, continuation, Map.delete(on_cancel, ref)}} 41 | 42 | _ -> 43 | {:noreply, [], {stack, continuation, on_cancel}} 44 | end 45 | end 46 | 47 | def handle_demand(_demand, {stack, continuation, on_cancel}) when is_atom(continuation) do 48 | {:noreply, [], {stack, continuation, on_cancel}} 49 | end 50 | 51 | def handle_demand(demand, {stack, continuation, on_cancel}) when demand > 0 do 52 | case continuation.({:cont, {[], demand}}) do 53 | {:suspended, {list, 0}, continuation} -> 54 | {:noreply, :lists.reverse(list), {stack, continuation, on_cancel}} 55 | 56 | {status, {list, _}} -> 57 | GenStage.async_info(self(), :stop) 58 | {:noreply, :lists.reverse(list), {stack, status, on_cancel}} 59 | end 60 | end 61 | 62 | def handle_info(:stop, state) do 63 | {:stop, :normal, state} 64 | end 65 | 66 | def handle_info(msg, {stack, continuation, on_cancel}) do 67 | log = 68 | ~c"** Undefined handle_info in ~tp~n** Unhandled message: ~tp~n** Stream started at:~n~ts" 69 | 70 | :error_logger.warning_msg(log, [inspect(__MODULE__), msg, Exception.format_stacktrace(stack)]) 71 | {:noreply, [], {stack, continuation, on_cancel}} 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /lib/gen_stage/utils.ex: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Utils do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Validates the argument is a list. 6 | """ 7 | def validate_list(opts, key, default) do 8 | {value, opts} = Keyword.pop(opts, key, default) 9 | 10 | if is_list(value) do 11 | {:ok, value, opts} 12 | else 13 | {:error, "expected #{inspect(key)} to be a list, got: #{inspect(value)}"} 14 | end 15 | end 16 | 17 | @doc """ 18 | Validates the given option is one of the values. 19 | """ 20 | def validate_in(opts, key, default, values) do 21 | {value, opts} = Keyword.pop(opts, key, default) 22 | 23 | if value in values do 24 | {:ok, value, opts} 25 | else 26 | {:error, "expected #{inspect(key)} to be one of #{inspect(values)}, got: #{inspect(value)}"} 27 | end 28 | end 29 | 30 | @doc """ 31 | Validates an integer. 32 | """ 33 | def validate_integer(opts, key, default, min, max, infinity?) do 34 | {value, opts} = Keyword.pop(opts, key, default) 35 | 36 | cond do 37 | value == :infinity and infinity? -> 38 | {:ok, value, opts} 39 | 40 | not is_integer(value) -> 41 | error_message = "expected #{inspect(key)} to be an integer, got: #{inspect(value)}" 42 | {:error, error_message} 43 | 44 | value < min -> 45 | error_message = 46 | "expected #{inspect(key)} to be equal to or greater than #{min}, got: #{inspect(value)}" 47 | 48 | {:error, error_message} 49 | 50 | value > max -> 51 | error_message = 52 | "expected #{inspect(key)} to be equal to or less than #{max}, got: #{inspect(value)}" 53 | 54 | {:error, error_message} 55 | 56 | true -> 57 | {:ok, value, opts} 58 | end 59 | end 60 | 61 | @doc """ 62 | Validates there are no options left. 63 | """ 64 | def validate_no_opts(opts) do 65 | if opts == [] do 66 | :ok 67 | else 68 | {:error, "unknown options #{inspect(opts)}"} 69 | end 70 | end 71 | 72 | @doc """ 73 | Helper to check if a shutdown is transient. 74 | """ 75 | defmacro is_transient_shutdown(value) do 76 | quote do 77 | unquote(value) == :normal or unquote(value) == :shutdown or 78 | (is_tuple(unquote(value)) and tuple_size(unquote(value)) == 2 and 79 | elem(unquote(value), 0) == :shutdown) 80 | end 81 | end 82 | 83 | @doc """ 84 | Returns the name of the current process or self. 85 | """ 86 | def self_name() do 87 | case :erlang.process_info(self(), :registered_name) do 88 | {:registered_name, name} when is_atom(name) -> name 89 | _ -> self() 90 | end 91 | end 92 | 93 | @doc """ 94 | Splits a list of events into messages configured by min, max, and demand. 95 | """ 96 | def split_batches(events, from, min, max, demand) do 97 | split_batches(events, from, min, max, demand, demand, []) 98 | end 99 | 100 | defp split_batches([], _from, _min, _max, _old_demand, new_demand, batches) do 101 | {new_demand, :lists.reverse(batches)} 102 | end 103 | 104 | defp split_batches(events, from, min, max, old_demand, new_demand, batches) do 105 | {events, batch, batch_size} = split_events(events, max - min, 0, []) 106 | 107 | # Adjust the batch size to whatever is left of the demand in case of excess. 108 | {old_demand, batch_size} = 109 | case old_demand - batch_size do 110 | diff when diff < 0 -> 111 | error_msg = ~c"GenStage consumer ~tp has received ~tp events in excess from: ~tp~n" 112 | :error_logger.error_msg(error_msg, [self_name(), abs(diff), from]) 113 | {0, old_demand} 114 | 115 | diff -> 116 | {diff, batch_size} 117 | end 118 | 119 | # In case we've reached min, we will ask for more events. 120 | {new_demand, batch_size} = 121 | case new_demand - batch_size do 122 | diff when diff <= min -> 123 | {max, max - diff} 124 | 125 | diff -> 126 | {diff, 0} 127 | end 128 | 129 | split_batches(events, from, min, max, old_demand, new_demand, [{batch, batch_size} | batches]) 130 | end 131 | 132 | defp split_events(events, limit, limit, acc), do: {events, :lists.reverse(acc), limit} 133 | defp split_events([], _limit, counter, acc), do: {[], :lists.reverse(acc), counter} 134 | 135 | defp split_events([event | events], limit, counter, acc) do 136 | split_events(events, limit, counter + 1, [event | acc]) 137 | end 138 | end 139 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule GenStage.Mixfile do 2 | use Mix.Project 3 | 4 | @version "1.2.1" 5 | 6 | def project do 7 | [ 8 | app: :gen_stage, 9 | version: @version, 10 | elixir: "~> 1.11", 11 | package: package(), 12 | description: "Producer and consumer actors with back-pressure for Elixir", 13 | start_permanent: Mix.env() == :prod, 14 | deps: deps(), 15 | docs: [ 16 | main: "GenStage", 17 | source_ref: "v#{@version}", 18 | source_url: "https://github.com/elixir-lang/gen_stage" 19 | ] 20 | ] 21 | end 22 | 23 | def application do 24 | [ 25 | extra_applications: [:logger] 26 | ] 27 | end 28 | 29 | defp deps do 30 | [ 31 | {:ex_doc, "~> 0.12", only: :docs} 32 | ] 33 | end 34 | 35 | defp package do 36 | %{ 37 | licenses: ["Apache-2.0"], 38 | maintainers: ["José Valim", "James Fish"], 39 | links: %{"GitHub" => "https://github.com/elixir-lang/gen_stage"} 40 | } 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "earmark_parser": {:hex, :earmark_parser, "1.4.43", "34b2f401fe473080e39ff2b90feb8ddfeef7639f8ee0bbf71bb41911831d77c5", [:mix], [], "hexpm", "970a3cd19503f5e8e527a190662be2cee5d98eed1ff72ed9b3d1a3d466692de8"}, 3 | "ex_doc": {:hex, :ex_doc, "0.37.0", "970f92b39e62c460aa8a367508e938f5e4da6e2ff3eaed3f8530b25870f45471", [:mix], [{:earmark_parser, "~> 1.4.42", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "b0ee7f17373948e0cf471e59c3a0ee42f3bd1171c67d91eb3626456ef9c6202c"}, 4 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 5 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, 6 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, 7 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"}, 8 | } 9 | -------------------------------------------------------------------------------- /test/consumer_supervisor_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ConsumerSupervisorTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule Simple do 5 | use ConsumerSupervisor 6 | def init(args), do: args 7 | end 8 | 9 | test "generates child_spec/1" do 10 | assert Simple.child_spec([:hello]) == %{ 11 | id: Simple, 12 | start: {Simple, :start_link, [[:hello]]}, 13 | type: :supervisor 14 | } 15 | 16 | defmodule Custom do 17 | use ConsumerSupervisor, 18 | id: :id, 19 | restart: :temporary, 20 | start: {:foo, :bar, []} 21 | 22 | def init(arg) do 23 | arg 24 | end 25 | end 26 | 27 | assert Custom.child_spec([:hello]) == %{ 28 | id: :id, 29 | restart: :temporary, 30 | start: {:foo, :bar, []}, 31 | type: :supervisor 32 | } 33 | end 34 | 35 | test "start_link/3 with non-ok init" do 36 | Process.flag(:trap_exit, true) 37 | worker = worker(Foo, [], restart: :transient) 38 | 39 | error = {:bad_specs, "consumer supervisor expects a list with a single item as a template"} 40 | assert ConsumerSupervisor.start_link(Simple, {:ok, [], []}) == {:error, error} 41 | 42 | error = {:bad_specs, "consumer supervisor expects a list with a single item as a template"} 43 | assert ConsumerSupervisor.start_link(Simple, {:ok, [1, 2], []}) == {:error, error} 44 | 45 | assert ConsumerSupervisor.start_link(Simple, {:ok, [worker], nil}) == 46 | {:error, {:bad_opts, "supervisor's init expects a keywords list as options"}} 47 | 48 | assert ConsumerSupervisor.start_link(Simple, {:ok, [worker], []}) == 49 | {:error, {:bad_opts, "supervisor expects a strategy to be given"}} 50 | 51 | assert ConsumerSupervisor.start_link(Simple, {:ok, [worker], [strategy: :unknown]}) == 52 | {:error, {:bad_opts, "unknown supervision strategy for consumer supervisor"}} 53 | 54 | assert ConsumerSupervisor.start_link(Simple, :unknown) == 55 | {:error, {:bad_return_value, :unknown}} 56 | 57 | assert ConsumerSupervisor.start_link(Simple, :ignore) == :ignore 58 | end 59 | 60 | test "start_link/3 with bad child_spec" do 61 | Process.flag(:trap_exit, true) 62 | 63 | init = {:ok, [worker(Foo, [])], [strategy: :one_for_one]} 64 | 65 | assert {:error, {:bad_opts, "a child specification with :restart set to :permanent" <> _}} = 66 | ConsumerSupervisor.start_link(Simple, init) 67 | 68 | init = {:ok, [worker(Foo, [], shutdown: :invalid)], [strategy: :one_for_one]} 69 | 70 | assert {:error, {:bad_specs, {:invalid_shutdown, :invalid}}} = 71 | ConsumerSupervisor.start_link(Simple, init) 72 | end 73 | 74 | test "sets initial call to the same as a regular supervisor" do 75 | {:ok, pid} = Supervisor.start_link([], strategy: :one_for_one) 76 | assert :proc_lib.initial_call(pid) == {:supervisor, Supervisor.Default, [:Argument__1]} 77 | 78 | {:ok, pid} = 79 | ConsumerSupervisor.start_link( 80 | [worker(Foo, [], restart: :transient)], 81 | strategy: :one_for_one 82 | ) 83 | 84 | assert :proc_lib.initial_call(pid) == 85 | {:supervisor, ConsumerSupervisor.Default, [:Argument__1]} 86 | end 87 | 88 | if function_exported?(:supervisor, :get_callback_module, 1) do 89 | test "returns the callback module" do 90 | {:ok, pid} = Supervisor.start_link([], strategy: :one_for_one) 91 | assert :supervisor.get_callback_module(pid) == Supervisor.Default 92 | 93 | children = [worker(Foo, [], restart: :transient)] 94 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 95 | 96 | assert :supervisor.get_callback_module(pid) == ConsumerSupervisor.Default 97 | end 98 | end 99 | 100 | test "start_link/3 with registered process" do 101 | spec = {:ok, [worker(Foo, [], restart: :transient)], [strategy: :one_for_one]} 102 | {:ok, pid} = ConsumerSupervisor.start_link(Simple, spec, name: __MODULE__) 103 | 104 | # Sets up a link 105 | {:links, links} = Process.info(self(), :links) 106 | assert pid in links 107 | 108 | # A name 109 | assert Process.whereis(__MODULE__) == pid 110 | 111 | # And the initial call 112 | assert {:supervisor, ConsumerSupervisorTest.Simple, 1} = :proc_lib.translate_initial_call(pid) 113 | end 114 | 115 | describe "init/2" do 116 | test "supports old child spec" do 117 | spec = {Foo, {Foo, :start_link, []}, :permanent, 5000, :worker, [Foo]} 118 | expected = {:ok, [spec], strategy: :one_for_one} 119 | assert ConsumerSupervisor.init([spec], strategy: :one_for_one) == expected 120 | end 121 | 122 | test "supports new child spec as tuple" do 123 | expected = { 124 | :ok, 125 | [%{id: Task, restart: :temporary, start: {Task, :start_link, [[:foo, :bar]]}}], 126 | [strategy: :one_for_one] 127 | } 128 | 129 | assert expected == ConsumerSupervisor.init([{Task, [:foo, :bar]}], strategy: :one_for_one) 130 | end 131 | 132 | test "supports new child spec as atom" do 133 | expected = { 134 | :ok, 135 | [%{id: Task, restart: :temporary, start: {Task, :start_link, [[]]}}], 136 | [strategy: :one_for_one] 137 | } 138 | 139 | assert expected == ConsumerSupervisor.init([Task], strategy: :one_for_one) 140 | end 141 | end 142 | 143 | test "start_link/3 with new syntax" do 144 | children = [%{id: Foo, restart: :temporary, start: {Foo, :start_link, [[]]}}] 145 | spec = {:ok, children, [strategy: :one_for_one]} 146 | 147 | {:ok, pid} = ConsumerSupervisor.start_link(Simple, spec, name: __MODULE__) 148 | 149 | # Sets up a link 150 | {:links, links} = Process.info(self(), :links) 151 | assert pid in links 152 | 153 | # A name 154 | assert Process.whereis(__MODULE__) == pid 155 | 156 | # And the initial call 157 | assert {:supervisor, ConsumerSupervisorTest.Simple, 1} = :proc_lib.translate_initial_call(pid) 158 | end 159 | 160 | ## Code change 161 | 162 | test "code_change/3 with non-ok init" do 163 | worker = worker(Task, [:timer, :sleep, [:infinity]], restart: :transient) 164 | {:ok, pid} = ConsumerSupervisor.start_link(Simple, {:ok, [worker], strategy: :one_for_one}) 165 | 166 | error = {:bad_specs, "consumer supervisor expects a list with a single item as a template"} 167 | assert fake_upgrade(pid, {:ok, [], []}) == {:error, {:error, error}} 168 | 169 | error = {:bad_specs, "consumer supervisor expects a list with a single item as a template"} 170 | assert fake_upgrade(pid, {:ok, [1, 2], []}) == {:error, {:error, error}} 171 | 172 | error = {:bad_opts, "supervisor's init expects a keywords list as options"} 173 | assert fake_upgrade(pid, {:ok, [worker], nil}) == {:error, {:error, error}} 174 | 175 | error = {:bad_opts, "supervisor expects a strategy to be given"} 176 | assert fake_upgrade(pid, {:ok, [worker], []}) == {:error, {:error, error}} 177 | 178 | error = {:bad_opts, "unknown supervision strategy for consumer supervisor"} 179 | assert fake_upgrade(pid, {:ok, [worker], [strategy: :unknown]}) == {:error, {:error, error}} 180 | 181 | assert fake_upgrade(pid, :unknown) == {:error, :unknown} 182 | assert fake_upgrade(pid, :ignore) == :ok 183 | end 184 | 185 | test "code_change/3 with ok init" do 186 | worker = worker(Task, [:timer, :sleep, [:infinity]], restart: :transient) 187 | {:ok, pid} = ConsumerSupervisor.start_link(Simple, {:ok, [worker], strategy: :one_for_one}) 188 | 189 | {:ok, _} = ConsumerSupervisor.start_child(pid, []) 190 | assert %{active: 1} = ConsumerSupervisor.count_children(pid) 191 | 192 | worker = worker(Task, [Kernel, :send], restart: :temporary) 193 | assert fake_upgrade(pid, {:ok, [worker], [strategy: :one_for_one]}) == :ok 194 | assert %{active: 1} = ConsumerSupervisor.count_children(pid) 195 | 196 | {:ok, _} = ConsumerSupervisor.start_child(pid, [[self(), :sample]]) 197 | assert_receive :sample 198 | end 199 | 200 | defp fake_upgrade(pid, args) do 201 | :ok = :sys.suspend(pid) 202 | :sys.replace_state(pid, fn stage -> put_in(stage.state.args, args) end) 203 | res = :sys.change_code(pid, :gen_server, 123, :extra) 204 | :ok = :sys.resume(pid) 205 | res 206 | end 207 | 208 | ## start_child/2 209 | 210 | def start_link(:ok3), do: {:ok, spawn_link(fn -> :timer.sleep(:infinity) end), :extra} 211 | def start_link(:ok2), do: {:ok, spawn_link(fn -> :timer.sleep(:infinity) end)} 212 | def start_link(:error), do: {:error, :found} 213 | def start_link(:ignore), do: :ignore 214 | def start_link(:unknown), do: :unknown 215 | 216 | def start_link(:try_again, notify) do 217 | if Process.get(:try_again) do 218 | Process.put(:try_again, false) 219 | send(notify, {:try_again, false}) 220 | {:error, :try_again} 221 | else 222 | Process.put(:try_again, true) 223 | send(notify, {:try_again, true}) 224 | start_link(:ok2) 225 | end 226 | end 227 | 228 | def start_link(:non_local, :throw), do: throw(:oops) 229 | def start_link(:non_local, :error), do: raise("oops") 230 | def start_link(:non_local, :exit), do: exit(:oops) 231 | 232 | def start_link(:restart, value) do 233 | if Process.get({:restart, value}) do 234 | start_link(value) 235 | else 236 | Process.put({:restart, value}, true) 237 | start_link(:ok2) 238 | end 239 | end 240 | 241 | test "start_child/2" do 242 | children = [worker(__MODULE__, [], restart: :transient)] 243 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 244 | 245 | assert {:ok, _, :extra} = ConsumerSupervisor.start_child(pid, [:ok3]) 246 | assert {:ok, _} = ConsumerSupervisor.start_child(pid, [:ok2]) 247 | assert {:error, :found} = ConsumerSupervisor.start_child(pid, [:error]) 248 | assert :ignore = ConsumerSupervisor.start_child(pid, [:ignore]) 249 | assert {:error, :unknown} = ConsumerSupervisor.start_child(pid, [:unknown]) 250 | end 251 | 252 | test "start_child/2 with throw/error/exit" do 253 | children = [worker(__MODULE__, [:non_local], restart: :transient)] 254 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 255 | 256 | assert {:error, {{:nocatch, :oops}, [_ | _]}} = ConsumerSupervisor.start_child(pid, [:throw]) 257 | assert {:error, {%RuntimeError{}, [_ | _]}} = ConsumerSupervisor.start_child(pid, [:error]) 258 | assert {:error, :oops} = ConsumerSupervisor.start_child(pid, [:exit]) 259 | end 260 | 261 | test "temporary child is not restarted regardless of reason" do 262 | children = [worker(__MODULE__, [], restart: :temporary)] 263 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 264 | 265 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:ok2]) 266 | assert_kill child, :shutdown 267 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(pid) 268 | 269 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:ok2]) 270 | assert_kill child, :whatever 271 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(pid) 272 | end 273 | 274 | test "transient child is restarted unless normal/shutdown/{shutdown, _}" do 275 | children = [worker(__MODULE__, [], restart: :transient)] 276 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 277 | 278 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:ok2]) 279 | assert_kill child, :shutdown 280 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(pid) 281 | 282 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:ok2]) 283 | assert_kill child, {:shutdown, :signal} 284 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(pid) 285 | 286 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:ok2]) 287 | assert_kill child, :whatever 288 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(pid) 289 | end 290 | 291 | test "child is restarted with different values" do 292 | children = [worker(__MODULE__, [:restart], restart: :transient)] 293 | 294 | {:ok, pid} = 295 | ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_restarts: 100_000) 296 | 297 | assert {:ok, child1} = ConsumerSupervisor.start_child(pid, [:ok2]) 298 | 299 | assert [{:undefined, ^child1, :worker, [ConsumerSupervisorTest]}] = 300 | ConsumerSupervisor.which_children(pid) 301 | 302 | assert_kill child1, :kill 303 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(pid) 304 | 305 | assert {:ok, child2} = ConsumerSupervisor.start_child(pid, [:ok3]) 306 | 307 | assert [ 308 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 309 | {:undefined, ^child2, :worker, [ConsumerSupervisorTest]} 310 | ] = ConsumerSupervisor.which_children(pid) 311 | 312 | assert_kill child2, :kill 313 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(pid) 314 | 315 | assert {:ok, child3} = ConsumerSupervisor.start_child(pid, [:ignore]) 316 | 317 | assert [ 318 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 319 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 320 | {:undefined, _, :worker, [ConsumerSupervisorTest]} 321 | ] = ConsumerSupervisor.which_children(pid) 322 | 323 | assert_kill child3, :kill 324 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(pid) 325 | 326 | assert {:ok, child4} = ConsumerSupervisor.start_child(pid, [:error]) 327 | 328 | assert [ 329 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 330 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 331 | {:undefined, _, :worker, [ConsumerSupervisorTest]} 332 | ] = ConsumerSupervisor.which_children(pid) 333 | 334 | assert_kill child4, :kill 335 | assert %{workers: 3, active: 2} = ConsumerSupervisor.count_children(pid) 336 | 337 | assert {:ok, child5} = ConsumerSupervisor.start_child(pid, [:unknown]) 338 | 339 | assert [ 340 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 341 | {:undefined, _, :worker, [ConsumerSupervisorTest]}, 342 | {:undefined, :restarting, :worker, [ConsumerSupervisorTest]}, 343 | {:undefined, _, :worker, [ConsumerSupervisorTest]} 344 | ] = ConsumerSupervisor.which_children(pid) 345 | 346 | assert_kill child5, :kill 347 | assert %{workers: 4, active: 2} = ConsumerSupervisor.count_children(pid) 348 | end 349 | 350 | test "child is restarted when trying again" do 351 | children = [worker(__MODULE__, [], restart: :transient)] 352 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_restarts: 2) 353 | 354 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:try_again, self()]) 355 | assert_received {:try_again, true} 356 | assert_kill child, :kill 357 | assert_receive {:try_again, false} 358 | assert_receive {:try_again, true} 359 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(pid) 360 | end 361 | 362 | test "child triggers maximum restarts" do 363 | Process.flag(:trap_exit, true) 364 | children = [worker(__MODULE__, [], restart: :transient)] 365 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_restarts: 1) 366 | 367 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:restart, :error]) 368 | assert_kill child, :kill 369 | assert_receive {:EXIT, ^pid, :shutdown} 370 | end 371 | 372 | test "child triggers maximum seconds" do 373 | Process.flag(:trap_exit, true) 374 | children = [worker(__MODULE__, [], restart: :transient)] 375 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_seconds: 0) 376 | 377 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:restart, :error]) 378 | assert_kill child, :kill 379 | assert_receive {:EXIT, ^pid, :shutdown} 380 | end 381 | 382 | test "child triggers maximum intensity when trying again" do 383 | Process.flag(:trap_exit, true) 384 | children = [worker(__MODULE__, [], restart: :transient)] 385 | {:ok, pid} = ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_restarts: 10) 386 | 387 | assert {:ok, child} = ConsumerSupervisor.start_child(pid, [:restart, :error]) 388 | assert_kill child, :kill 389 | assert_receive {:EXIT, ^pid, :shutdown} 390 | end 391 | 392 | ## terminate/2 393 | 394 | test "terminates children with brutal kill" do 395 | Process.flag(:trap_exit, true) 396 | children = [worker(Task, [], shutdown: :brutal_kill, restart: :transient)] 397 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 398 | 399 | fun = fn -> :timer.sleep(:infinity) end 400 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 401 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [fun]) 402 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 403 | 404 | Process.monitor(child1) 405 | Process.monitor(child2) 406 | Process.monitor(child3) 407 | assert_kill sup, :shutdown 408 | assert_receive {:DOWN, _, :process, ^child1, :killed} 409 | assert_receive {:DOWN, _, :process, ^child2, :killed} 410 | assert_receive {:DOWN, _, :process, ^child3, :killed} 411 | end 412 | 413 | test "terminates children with infinity shutdown" do 414 | Process.flag(:trap_exit, true) 415 | children = [worker(Task, [], shutdown: :infinity, restart: :transient)] 416 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 417 | 418 | fun = fn -> :timer.sleep(:infinity) end 419 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 420 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [fun]) 421 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 422 | 423 | Process.monitor(child1) 424 | Process.monitor(child2) 425 | Process.monitor(child3) 426 | assert_kill sup, :shutdown 427 | assert_receive {:DOWN, _, :process, ^child1, :shutdown} 428 | assert_receive {:DOWN, _, :process, ^child2, :shutdown} 429 | assert_receive {:DOWN, _, :process, ^child3, :shutdown} 430 | end 431 | 432 | test "terminates children with infinity shutdown and abnormal reason" do 433 | Process.flag(:trap_exit, true) 434 | children = [worker(Task, [], shutdown: :infinity, restart: :transient)] 435 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 436 | 437 | fun = fn -> 438 | Process.flag(:trap_exit, true) 439 | receive(do: (_ -> exit({:shutdown, :oops}))) 440 | end 441 | 442 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 443 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [fun]) 444 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 445 | 446 | Process.monitor(child1) 447 | Process.monitor(child2) 448 | Process.monitor(child3) 449 | assert_kill sup, :shutdown 450 | assert_receive {:DOWN, _, :process, ^child1, {:shutdown, :oops}} 451 | assert_receive {:DOWN, _, :process, ^child2, {:shutdown, :oops}} 452 | assert_receive {:DOWN, _, :process, ^child3, {:shutdown, :oops}} 453 | end 454 | 455 | test "terminates children with integer shutdown" do 456 | Process.flag(:trap_exit, true) 457 | children = [worker(Task, [], shutdown: 1000, restart: :transient)] 458 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 459 | 460 | fun = fn -> :timer.sleep(:infinity) end 461 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 462 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [fun]) 463 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 464 | 465 | Process.monitor(child1) 466 | Process.monitor(child2) 467 | Process.monitor(child3) 468 | assert_kill sup, :shutdown 469 | assert_receive {:DOWN, _, :process, ^child1, :shutdown} 470 | assert_receive {:DOWN, _, :process, ^child2, :shutdown} 471 | assert_receive {:DOWN, _, :process, ^child3, :shutdown} 472 | end 473 | 474 | test "terminates children with integer shutdown and abnormal reason" do 475 | Process.flag(:trap_exit, true) 476 | children = [worker(Task, [], shutdown: 1000, restart: :transient)] 477 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 478 | 479 | fun = fn -> 480 | Process.flag(:trap_exit, true) 481 | receive(do: (_ -> exit({:shutdown, :oops}))) 482 | end 483 | 484 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 485 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [fun]) 486 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 487 | 488 | Process.monitor(child1) 489 | Process.monitor(child2) 490 | Process.monitor(child3) 491 | assert_kill sup, :shutdown 492 | assert_receive {:DOWN, _, :process, ^child1, {:shutdown, :oops}} 493 | assert_receive {:DOWN, _, :process, ^child2, {:shutdown, :oops}} 494 | assert_receive {:DOWN, _, :process, ^child3, {:shutdown, :oops}} 495 | end 496 | 497 | test "terminates children with expired integer shutdown" do 498 | Process.flag(:trap_exit, true) 499 | children = [worker(Task, [], shutdown: 1, restart: :transient)] 500 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 501 | 502 | fun = fn -> :timer.sleep(:infinity) end 503 | 504 | tmt = fn -> 505 | Process.flag(:trap_exit, true) 506 | :timer.sleep(:infinity) 507 | end 508 | 509 | assert {:ok, child1} = ConsumerSupervisor.start_child(sup, [fun]) 510 | assert {:ok, child2} = ConsumerSupervisor.start_child(sup, [tmt]) 511 | assert {:ok, child3} = ConsumerSupervisor.start_child(sup, [fun]) 512 | 513 | Process.monitor(child1) 514 | Process.monitor(child2) 515 | Process.monitor(child3) 516 | assert_kill sup, :shutdown 517 | assert_receive {:DOWN, _, :process, ^child1, :shutdown} 518 | assert_receive {:DOWN, _, :process, ^child2, :killed} 519 | assert_receive {:DOWN, _, :process, ^child3, :shutdown} 520 | end 521 | 522 | ## terminate_child/2 523 | 524 | test "terminates child with brutal kill" do 525 | children = [worker(Task, [], shutdown: :brutal_kill, restart: :transient)] 526 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 527 | 528 | fun = fn -> :timer.sleep(:infinity) end 529 | assert {:ok, child} = ConsumerSupervisor.start_child(sup, [fun]) 530 | 531 | Process.monitor(child) 532 | assert :ok = ConsumerSupervisor.terminate_child(sup, child) 533 | assert_receive {:DOWN, _, :process, ^child, :killed} 534 | 535 | assert {:error, :not_found} = ConsumerSupervisor.terminate_child(sup, child) 536 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(sup) 537 | end 538 | 539 | test "terminates child with integer shutdown" do 540 | children = [worker(Task, [], shutdown: 1000, restart: :transient)] 541 | {:ok, sup} = ConsumerSupervisor.start_link(children, strategy: :one_for_one) 542 | 543 | fun = fn -> :timer.sleep(:infinity) end 544 | assert {:ok, child} = ConsumerSupervisor.start_child(sup, [fun]) 545 | 546 | Process.monitor(child) 547 | assert :ok = ConsumerSupervisor.terminate_child(sup, child) 548 | assert_receive {:DOWN, _, :process, ^child, :shutdown} 549 | 550 | assert {:error, :not_found} = ConsumerSupervisor.terminate_child(sup, child) 551 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(sup) 552 | end 553 | 554 | test "terminates restarting child" do 555 | children = [worker(__MODULE__, [:restart], restart: :transient)] 556 | 557 | {:ok, sup} = 558 | ConsumerSupervisor.start_link(children, strategy: :one_for_one, max_restarts: 100_000) 559 | 560 | assert {:ok, child} = ConsumerSupervisor.start_child(sup, [:error]) 561 | assert_kill child, :kill 562 | assert :ok = ConsumerSupervisor.terminate_child(sup, child) 563 | 564 | assert {:error, :not_found} = ConsumerSupervisor.terminate_child(sup, child) 565 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(sup) 566 | end 567 | 568 | defmodule Consumer do 569 | def start_link(opts \\ []) do 570 | spec = 571 | opts 572 | |> Keyword.take([:restart, :shutdown]) 573 | |> Keyword.put_new(:restart, :temporary) 574 | |> Enum.into(%{id: __MODULE__, start: {__MODULE__, :start_child, [self()]}}) 575 | 576 | opts = opts ++ [strategy: :one_for_one, max_restarts: 0] 577 | ConsumerSupervisor.start_link([spec], opts) 578 | end 579 | 580 | def start_child(pid, :ok2) do 581 | child = spawn_link(:timer, :sleep, [:infinity]) 582 | send(pid, {:child_started, child}) 583 | {:ok, child} 584 | end 585 | 586 | def start_child(pid, :ok3) do 587 | child = spawn_link(:timer, :sleep, [:infinity]) 588 | send(pid, {:child_started, child}) 589 | {:ok, child, :extra} 590 | end 591 | 592 | def start_child(pid, :error) do 593 | send(pid, :child_start_error) 594 | {:error, :found} 595 | end 596 | 597 | def start_child(pid, :ignore) do 598 | send(pid, :child_ignore) 599 | :ignore 600 | end 601 | 602 | def start_child(pid, :unknown) do 603 | send(pid, :child_start_unknown) 604 | :unknown 605 | end 606 | 607 | def start_child(pid, {:non_local, class}) do 608 | send(pid, {:child_non_local, class}) 609 | 610 | stack = 611 | try do 612 | throw(:oops) 613 | catch 614 | :oops -> __STACKTRACE__ 615 | end 616 | 617 | :erlang.raise(class, :oops, stack) 618 | end 619 | 620 | def start_child(pid, {:restart, value}) do 621 | if Process.get({:restart, value}) do 622 | start_child(pid, value) 623 | else 624 | Process.put({:restart, value}, true) 625 | start_child(pid, :ok2) 626 | end 627 | end 628 | end 629 | 630 | defmodule Producer do 631 | use GenStage 632 | 633 | def start_link(state \\ nil) do 634 | GenStage.start_link(__MODULE__, state) 635 | end 636 | 637 | def sync_queue(stage, events) do 638 | GenStage.call(stage, {:queue, events}) 639 | end 640 | 641 | ## Callbacks 642 | 643 | def init(state) do 644 | {:producer, state} 645 | end 646 | 647 | def handle_call({:queue, events}, _from, state) do 648 | {:reply, state, events, state} 649 | end 650 | 651 | def handle_demand(_, state) do 652 | {:noreply, [], state} 653 | end 654 | end 655 | 656 | describe "sync_subscribe" do 657 | test "returns ok with reference" do 658 | {:ok, sup} = Consumer.start_link() 659 | {:ok, producer} = Producer.start_link() 660 | assert {:ok, ref} = GenStage.sync_subscribe(sup, to: producer) 661 | assert is_reference(ref) 662 | end 663 | 664 | @tag :capture_log 665 | test "returns errors on bad options" do 666 | {:ok, sup} = Consumer.start_link() 667 | 668 | assert {:error, {:bad_opts, message}} = 669 | GenStage.sync_subscribe(sup, to: :whatever, max_demand: 0) 670 | 671 | assert message == "expected :max_demand to be equal to or greater than 1, got: 0" 672 | 673 | assert {:error, {:bad_opts, message}} = 674 | GenStage.sync_subscribe(sup, to: :whatever, min_demand: 2000) 675 | 676 | assert message == "expected :min_demand to be equal to or less than 999, got: 2000" 677 | end 678 | 679 | @tag :capture_log 680 | test "supervisor exits when there is no named producer and subscription is permanent" do 681 | Process.flag(:trap_exit, true) 682 | {:ok, sup} = Consumer.start_link() 683 | assert {:ok, _} = GenStage.sync_subscribe(sup, to: :unknown) 684 | assert_receive {:EXIT, ^sup, :noproc} 685 | end 686 | 687 | @tag :capture_log 688 | test "supervisor exits when producer is dead and subscription is permanent" do 689 | Process.flag(:trap_exit, true) 690 | {:ok, producer} = Producer.start_link() 691 | GenStage.stop(producer) 692 | {:ok, sup} = Consumer.start_link() 693 | assert {:ok, _} = GenStage.sync_subscribe(sup, to: producer) 694 | assert_receive {:EXIT, ^sup, :noproc} 695 | end 696 | 697 | @tag :capture_log 698 | test "supervisor does not exit when there is no named producer and subscription is temporary" do 699 | {:ok, sup} = Consumer.start_link() 700 | assert {:ok, _} = GenStage.sync_subscribe(sup, to: :unknown, cancel: :temporary) 701 | _ = :sys.get_state(sup) 702 | end 703 | 704 | @tag :capture_log 705 | test "supervisor does not exit when producer is dead and subscription is temporary" do 706 | {:ok, producer} = Producer.start_link() 707 | GenStage.stop(producer) 708 | {:ok, sup} = Consumer.start_link() 709 | assert {:ok, _} = GenStage.sync_subscribe(sup, to: producer, cancel: :temporary) 710 | _ = :sys.get_state(sup) 711 | end 712 | end 713 | 714 | describe "supervisor consuming start args" do 715 | @tag :capture_log 716 | test "start child" do 717 | {:ok, producer} = Producer.start_link() 718 | {:ok, sup} = Consumer.start_link() 719 | opts = [to: producer, cancel: :temporary] 720 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 721 | 722 | Producer.sync_queue(producer, [:ok2]) 723 | assert_receive {:child_started, ok2} 724 | assert [{:undefined, ^ok2, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 725 | 726 | Producer.sync_queue(producer, [:ok3]) 727 | assert_receive {:child_started, _} 728 | assert %{active: 2} = ConsumerSupervisor.count_children(sup) 729 | 730 | Producer.sync_queue(producer, [:error]) 731 | assert_receive :child_start_error 732 | 733 | assert [{:undefined, _, :worker, [Consumer]}, {:undefined, _, :worker, [Consumer]}] = 734 | ConsumerSupervisor.which_children(sup) 735 | 736 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(sup) 737 | 738 | Producer.sync_queue(producer, [:ignore]) 739 | assert_receive :child_ignore 740 | 741 | assert [{:undefined, _, :worker, [Consumer]}, {:undefined, _, :worker, [Consumer]}] = 742 | ConsumerSupervisor.which_children(sup) 743 | 744 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(sup) 745 | 746 | Producer.sync_queue(producer, [:unknown]) 747 | assert_receive :child_start_unknown 748 | 749 | assert [{:undefined, _, :worker, [Consumer]}, {:undefined, _, :worker, [Consumer]}] = 750 | ConsumerSupervisor.which_children(sup) 751 | 752 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(sup) 753 | end 754 | 755 | test "start child with stream" do 756 | {:ok, producer} = GenStage.from_enumerable([:ok2, :ok2, :ok2]) 757 | {:ok, sup} = Consumer.start_link() 758 | opts = [to: producer, cancel: :temporary] 759 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 760 | assert_receive {:child_started, _} 761 | assert_receive {:child_started, _} 762 | assert_receive {:child_started, _} 763 | end 764 | 765 | @tag :capture_log 766 | test "start child with throw/error/exit" do 767 | {:ok, producer} = Producer.start_link() 768 | {:ok, sup} = Consumer.start_link() 769 | opts = [to: producer, cancel: :temporary] 770 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 771 | 772 | Producer.sync_queue(producer, [{:non_local, :throw}]) 773 | assert_receive {:child_non_local, :throw} 774 | assert [] = ConsumerSupervisor.which_children(sup) 775 | 776 | Producer.sync_queue(producer, [{:non_local, :error}]) 777 | assert_receive {:child_non_local, :error} 778 | assert [] = ConsumerSupervisor.which_children(sup) 779 | 780 | Producer.sync_queue(producer, [{:non_local, :exit}]) 781 | assert_receive {:child_non_local, :exit} 782 | assert [] = ConsumerSupervisor.which_children(sup) 783 | end 784 | 785 | @tag :capture_log 786 | test "start child limited by max_demand" do 787 | {:ok, producer} = Producer.start_link() 788 | {:ok, sup} = Consumer.start_link() 789 | opts = [to: producer, cancel: :temporary, max_demand: 1, min_demand: 0] 790 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 791 | 792 | Producer.sync_queue(producer, [:ok2, :error, :ignore, :ok2, :ok2, :ok2]) 793 | assert_receive {:child_started, child1} 794 | assert [{:undefined, ^child1, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 795 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 796 | refute_received :child_start_error 797 | 798 | assert_kill(child1, :shutdown) 799 | assert_receive :child_start_error 800 | assert_receive :child_ignore 801 | assert_receive {:child_started, child2} 802 | assert [{:undefined, ^child2, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 803 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 804 | refute_received {:child_started, _} 805 | 806 | assert ConsumerSupervisor.terminate_child(sup, child2) == :ok 807 | assert_receive {:child_started, child3} 808 | assert [{:undefined, ^child3, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 809 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 810 | refute_received {:child_started, _} 811 | end 812 | 813 | test "restarting children counted in max_demand" do 814 | {:ok, producer} = Producer.start_link() 815 | {:ok, sup} = Consumer.start_link(restart: :transient, max_restarts: 100_000) 816 | opts = [to: producer, cancel: :temporary, max_demand: 1, min_demand: 0] 817 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 818 | 819 | Producer.sync_queue(producer, [{:restart, :error}, :ok2, :ok2, :ok2]) 820 | assert_receive {:child_started, child1} 821 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 822 | assert [{:undefined, ^child1, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 823 | 824 | assert_kill child1, :kill 825 | assert %{workers: 1, active: 0} = ConsumerSupervisor.count_children(sup) 826 | 827 | assert [{:undefined, :restarting, :worker, [Consumer]}] = 828 | ConsumerSupervisor.which_children(sup) 829 | 830 | refute_received {:child_started, _} 831 | 832 | assert ConsumerSupervisor.terminate_child(sup, child1) == :ok 833 | assert_receive {:child_started, child2} 834 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 835 | assert [{:undefined, ^child2, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 836 | 837 | assert_kill child2, :kill 838 | assert_receive {:child_started, child3} 839 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 840 | assert [{:undefined, ^child3, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 841 | 842 | refute_received {:child_started, _} 843 | end 844 | 845 | test "children remain after producer down" do 846 | _ = Process.flag(:trap_exit, true) 847 | {:ok, producer} = Producer.start_link() 848 | {:ok, sup} = Consumer.start_link() 849 | opts = [to: producer, cancel: :temporary, max_demand: 2, min_demand: 0] 850 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 851 | 852 | Producer.sync_queue(producer, [:ok2, :ok2]) 853 | assert_receive {:child_started, child1} 854 | assert_receive {:child_started, child2} 855 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(sup) 856 | 857 | assert_kill producer, :shutdown 858 | 859 | assert %{workers: 2, active: 2} = ConsumerSupervisor.count_children(sup) 860 | assert_kill child1, :shutdown 861 | assert [{:undefined, ^child2, :worker, [Consumer]}] = ConsumerSupervisor.which_children(sup) 862 | 863 | assert ConsumerSupervisor.terminate_child(sup, child2) == :ok 864 | assert %{workers: 0, active: 0} = ConsumerSupervisor.count_children(sup) 865 | assert [] = ConsumerSupervisor.which_children(sup) 866 | end 867 | 868 | test "ask for more events when count reaches min_demand (high)" do 869 | _ = Process.flag(:trap_exit, true) 870 | {:ok, producer} = Producer.start_link() 871 | {:ok, sup} = Consumer.start_link() 872 | opts = [to: producer, cancel: :temporary, max_demand: 3, min_demand: 2] 873 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 874 | 875 | Producer.sync_queue(producer, [:ok2, :ok2, :ok2]) 876 | assert_receive {:child_started, child1} 877 | assert_receive {:child_started, child2} 878 | assert_receive {:child_started, _child3} 879 | assert %{workers: 3, active: 3} = ConsumerSupervisor.count_children(sup) 880 | 881 | assert_kill child1, :shutdown 882 | assert_kill child2, :shutdown 883 | 884 | assert %{workers: 1, active: 1} = ConsumerSupervisor.count_children(sup) 885 | 886 | Producer.sync_queue(producer, [:ok2]) 887 | assert_receive {:child_started, _child4} 888 | 889 | Producer.sync_queue(producer, [:ok2]) 890 | assert_receive {:child_started, _child5} 891 | 892 | assert %{workers: 3, active: 3} = ConsumerSupervisor.count_children(sup) 893 | 894 | Producer.sync_queue(producer, [:ok2]) 895 | refute_received {:child_started, _child6} 896 | end 897 | 898 | test "ask for more events when count reaches min_demand (low)" do 899 | {:ok, producer} = 900 | Task.start_link(fn -> 901 | receive do 902 | {:"$gen_producer", _, {:subscribe, _, _}} -> 903 | receive do 904 | {:"$gen_producer", {pid, ref}, {:ask, 3}} -> 905 | send(pid, {:"$gen_consumer", {pid, ref}, [:ok2, :ok2]}) 906 | 907 | receive do 908 | {:"$gen_producer", _, {:ask, _}} -> 909 | raise "oops" 910 | end 911 | end 912 | end 913 | end) 914 | 915 | {:ok, sup} = Consumer.start_link() 916 | opts = [to: producer, cancel: :temporary, max_demand: 3, min_demand: 0] 917 | assert {:ok, _} = GenStage.sync_subscribe(sup, opts) 918 | assert_receive {:child_started, _child1} 919 | assert_receive {:child_started, _child2} 920 | refute_received {:child_started, _child3} 921 | end 922 | end 923 | 924 | defp worker(mod, args, extra \\ []) do 925 | extra |> Enum.into(%{id: mod, start: {mod, :start_link, args}}) 926 | end 927 | 928 | defp assert_kill(pid, reason) do 929 | ref = Process.monitor(pid) 930 | Process.exit(pid, reason) 931 | assert_receive {:DOWN, ^ref, _, _, _} 932 | end 933 | end 934 | -------------------------------------------------------------------------------- /test/gen_stage/broadcast_dispatcher_test.exs: -------------------------------------------------------------------------------- 1 | defmodule GenStage.BroadcastDispatcherTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias GenStage.BroadcastDispatcher, as: D 5 | 6 | defp dispatcher(opts) do 7 | {:ok, {[], 0, _subscribers} = state} = D.init(opts) 8 | state 9 | end 10 | 11 | test "subscribes and cancels" do 12 | pid = self() 13 | ref = make_ref() 14 | disp = dispatcher([]) 15 | expected_subscribers = MapSet.new([pid]) 16 | 17 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 18 | assert disp == {[{0, pid, ref, nil}], 0, expected_subscribers} 19 | 20 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 21 | assert disp == {[], 0, MapSet.new()} 22 | end 23 | 24 | test "subscribes, asks, and cancels" do 25 | pid = self() 26 | ref = make_ref() 27 | disp = dispatcher([]) 28 | expected_subscribers = MapSet.new([pid]) 29 | 30 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 31 | assert disp == {[{0, pid, ref, nil}], 0, expected_subscribers} 32 | 33 | {:ok, 10, disp} = D.ask(10, {pid, ref}, disp) 34 | assert disp == {[{0, pid, ref, nil}], 10, expected_subscribers} 35 | 36 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 37 | assert disp == {[], 0, MapSet.new()} 38 | end 39 | 40 | test "multiple subscriptions with early demand" do 41 | pid1 = self() 42 | pid2 = spawn(fn -> :ok end) 43 | ref1 = make_ref() 44 | ref2 = make_ref() 45 | disp = dispatcher([]) 46 | 47 | expected_subscribers = MapSet.new([pid1]) 48 | 49 | {:ok, 0, disp} = D.subscribe([], {pid1, ref1}, disp) 50 | assert disp == {[{0, pid1, ref1, nil}], 0, expected_subscribers} 51 | 52 | {:ok, 10, disp} = D.ask(10, {pid1, ref1}, disp) 53 | assert disp == {[{0, pid1, ref1, nil}], 10, expected_subscribers} 54 | 55 | expected_subscribers = MapSet.put(expected_subscribers, pid2) 56 | 57 | {:ok, 0, disp} = D.subscribe([], {pid2, ref2}, disp) 58 | assert disp == {[{0, pid2, ref2, nil}, {10, pid1, ref1, nil}], 0, expected_subscribers} 59 | 60 | expected_subscribers = MapSet.delete(expected_subscribers, pid1) 61 | 62 | {:ok, 0, disp} = D.cancel({pid1, ref1}, disp) 63 | assert disp == {[{0, pid2, ref2, nil}], 0, expected_subscribers} 64 | 65 | {:ok, 10, disp} = D.ask(10, {pid2, ref2}, disp) 66 | assert disp == {[{0, pid2, ref2, nil}], 10, expected_subscribers} 67 | end 68 | 69 | test "multiple subscriptions with late demand" do 70 | pid1 = self() 71 | pid2 = spawn_forwarder() 72 | ref1 = make_ref() 73 | ref2 = make_ref() 74 | disp = dispatcher([]) 75 | 76 | expected_subscribers = MapSet.new([pid1]) 77 | 78 | {:ok, 0, disp} = D.subscribe([], {pid1, ref1}, disp) 79 | assert disp == {[{0, pid1, ref1, nil}], 0, expected_subscribers} 80 | 81 | expected_subscribers = MapSet.put(expected_subscribers, pid2) 82 | 83 | {:ok, 0, disp} = D.subscribe([], {pid2, ref2}, disp) 84 | assert disp == {[{0, pid2, ref2, nil}, {0, pid1, ref1, nil}], 0, expected_subscribers} 85 | 86 | {:ok, 0, disp} = D.ask(10, {pid1, ref1}, disp) 87 | assert disp == {[{10, pid1, ref1, nil}, {0, pid2, ref2, nil}], 0, expected_subscribers} 88 | 89 | expected_subscribers = MapSet.delete(expected_subscribers, pid2) 90 | 91 | {:ok, 10, disp} = D.cancel({pid2, ref2}, disp) 92 | assert disp == {[{0, pid1, ref1, nil}], 10, expected_subscribers} 93 | 94 | {:ok, 10, disp} = D.ask(10, {pid1, ref1}, disp) 95 | assert disp == {[{0, pid1, ref1, nil}], 20, expected_subscribers} 96 | end 97 | 98 | test "subscribes, asks and dispatches to multiple consumers" do 99 | pid1 = spawn_forwarder() 100 | pid2 = spawn_forwarder() 101 | pid3 = spawn_forwarder() 102 | ref1 = make_ref() 103 | ref2 = make_ref() 104 | ref3 = make_ref() 105 | disp = dispatcher([]) 106 | 107 | {:ok, 0, disp} = D.subscribe([], {pid1, ref1}, disp) 108 | {:ok, 0, disp} = D.subscribe([], {pid2, ref2}, disp) 109 | 110 | {:ok, 0, disp} = D.ask(3, {pid1, ref1}, disp) 111 | {:ok, 2, disp} = D.ask(2, {pid2, ref2}, disp) 112 | 113 | expected_subscribers = MapSet.new([pid1, pid2]) 114 | 115 | assert disp == {[{0, pid2, ref2, nil}, {1, pid1, ref1, nil}], 2, expected_subscribers} 116 | 117 | # One batch fits all 118 | {:ok, [], disp} = D.dispatch([:a, :b], 2, disp) 119 | assert disp == {[{0, pid2, ref2, nil}, {1, pid1, ref1, nil}], 0, expected_subscribers} 120 | 121 | assert_receive {:"$gen_consumer", {_, ^ref1}, [:a, :b]} 122 | assert_receive {:"$gen_consumer", {_, ^ref2}, [:a, :b]} 123 | 124 | # A batch with left-over 125 | {:ok, 1, disp} = D.ask(2, {pid2, ref2}, disp) 126 | 127 | {:ok, [:d], disp} = D.dispatch([:c, :d], 2, disp) 128 | assert disp == {[{1, pid2, ref2, nil}, {0, pid1, ref1, nil}], 0, expected_subscribers} 129 | assert_receive {:"$gen_consumer", {_, ^ref1}, [:c]} 130 | assert_receive {:"$gen_consumer", {_, ^ref2}, [:c]} 131 | 132 | # A batch with no demand 133 | {:ok, [:d], disp} = D.dispatch([:d], 1, disp) 134 | assert disp == {[{1, pid2, ref2, nil}, {0, pid1, ref1, nil}], 0, expected_subscribers} 135 | refute_received {:"$gen_consumer", {_, _}, _} 136 | 137 | # Add a late subscriber 138 | {:ok, 1, disp} = D.ask(1, {pid1, ref1}, disp) 139 | {:ok, 0, disp} = D.subscribe([], {pid3, ref3}, disp) 140 | {:ok, [:d, :e], disp} = D.dispatch([:d, :e], 2, disp) 141 | 142 | expected_subscribers = MapSet.put(expected_subscribers, pid3) 143 | 144 | assert disp == 145 | {[{0, pid3, ref3, nil}, {1, pid1, ref1, nil}, {1, pid2, ref2, nil}], 0, 146 | expected_subscribers} 147 | 148 | # Even out 149 | {:ok, 0, disp} = D.ask(2, {pid1, ref1}, disp) 150 | {:ok, 0, disp} = D.ask(2, {pid2, ref2}, disp) 151 | {:ok, 3, disp} = D.ask(3, {pid3, ref3}, disp) 152 | {:ok, [], disp} = D.dispatch([:d, :e, :f], 3, disp) 153 | 154 | assert disp == 155 | {[{0, pid3, ref3, nil}, {0, pid2, ref2, nil}, {0, pid1, ref1, nil}], 0, 156 | expected_subscribers} 157 | 158 | assert_receive {:"$gen_consumer", {_, ^ref1}, [:d, :e, :f]} 159 | assert_receive {:"$gen_consumer", {_, ^ref2}, [:d, :e, :f]} 160 | assert_receive {:"$gen_consumer", {_, ^ref3}, [:d, :e, :f]} 161 | end 162 | 163 | test "subscribing with a selector function" do 164 | pid1 = spawn_forwarder() 165 | pid2 = spawn_forwarder() 166 | ref1 = make_ref() 167 | ref2 = make_ref() 168 | disp = dispatcher([]) 169 | selector1 = fn %{key: key} -> String.starts_with?(key, "pre") end 170 | selector2 = fn %{key: key} -> String.starts_with?(key, "pref") end 171 | 172 | {:ok, 0, disp} = D.subscribe([selector: selector1], {pid1, ref1}, disp) 173 | {:ok, 0, disp} = D.subscribe([selector: selector2], {pid2, ref2}, disp) 174 | assert {[{0, ^pid2, ^ref2, _selector2}, {0, ^pid1, ^ref1, _selector1}], 0, _} = disp 175 | 176 | {:ok, 0, disp} = D.ask(4, {pid2, ref2}, disp) 177 | {:ok, 4, disp} = D.ask(4, {pid1, ref1}, disp) 178 | 179 | events = [%{key: "pref-1234"}, %{key: "pref-5678"}, %{key: "pre0000"}, %{key: "foo0000"}] 180 | {:ok, [], _disp} = D.dispatch(events, 4, disp) 181 | 182 | assert_receive {:"$gen_producer", {_, ^ref1}, {:ask, 1}} 183 | assert_receive {:"$gen_producer", {_, ^ref2}, {:ask, 2}} 184 | 185 | assert_receive {:"$gen_consumer", {_, ^ref1}, 186 | [%{key: "pref-1234"}, %{key: "pref-5678"}, %{key: "pre0000"}]} 187 | 188 | assert_receive {:"$gen_consumer", {_, ^ref2}, [%{key: "pref-1234"}, %{key: "pref-5678"}]} 189 | end 190 | 191 | test "delivers info to current process" do 192 | pid1 = spawn_forwarder() 193 | pid2 = spawn_forwarder() 194 | ref1 = make_ref() 195 | ref2 = make_ref() 196 | disp = dispatcher([]) 197 | 198 | {:ok, 0, disp} = D.subscribe([], {pid1, ref1}, disp) 199 | {:ok, 0, disp} = D.subscribe([], {pid2, ref2}, disp) 200 | {:ok, 0, disp} = D.ask(3, {pid1, ref1}, disp) 201 | 202 | {:ok, notify_disp} = D.info(:hello, disp) 203 | assert disp == notify_disp 204 | assert_receive :hello 205 | end 206 | 207 | test "subscribing is idempotent" do 208 | pid = self() 209 | ref1 = make_ref() 210 | ref2 = make_ref() 211 | disp = dispatcher([]) 212 | expected_subscribers = MapSet.new([pid]) 213 | 214 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 215 | 216 | assert ExUnit.CaptureLog.capture_log(fn -> 217 | assert {:error, _} = D.subscribe([], {pid, ref2}, disp) 218 | assert disp == {[{0, pid, ref1, nil}], 0, expected_subscribers} 219 | end) =~ "already registered" 220 | end 221 | 222 | defp spawn_forwarder do 223 | parent = self() 224 | 225 | spawn_link(fn -> forwarder_loop(parent) end) 226 | end 227 | 228 | defp forwarder_loop(parent) do 229 | receive do 230 | msg -> 231 | send(parent, msg) 232 | forwarder_loop(parent) 233 | end 234 | end 235 | end 236 | -------------------------------------------------------------------------------- /test/gen_stage/demand_dispatcher_test.exs: -------------------------------------------------------------------------------- 1 | defmodule GenStage.DemandDispatcherTest do 2 | use ExUnit.Case, async: true 3 | 4 | import ExUnit.CaptureLog 5 | alias GenStage.DemandDispatcher, as: D 6 | 7 | @default_shuffle_flag false 8 | 9 | defp dispatcher(opts) do 10 | shuffle_demand = Keyword.get(opts, :shuffle_demands_on_first_dispatch, false) 11 | max_demand = Keyword.get(opts, :max_demand) 12 | {:ok, {[], 0, ^max_demand, ^shuffle_demand} = state} = D.init(opts) 13 | state 14 | end 15 | 16 | test "subscribes and cancels" do 17 | pid = self() 18 | ref = make_ref() 19 | disp = dispatcher([]) 20 | 21 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 22 | assert disp == {[{0, pid, ref}], 0, nil, @default_shuffle_flag} 23 | 24 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 25 | assert disp == {[], 0, nil, @default_shuffle_flag} 26 | end 27 | 28 | test "subscribes, asks and cancels" do 29 | pid = self() 30 | ref = make_ref() 31 | disp = dispatcher([]) 32 | 33 | # Subscribe, ask and cancel and leave some demand 34 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 35 | assert disp == {[{0, pid, ref}], 0, nil, @default_shuffle_flag} 36 | 37 | {:ok, 10, disp} = D.ask(10, {pid, ref}, disp) 38 | assert disp == {[{10, pid, ref}], 0, 10, @default_shuffle_flag} 39 | 40 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 41 | assert disp == {[], 10, 10, @default_shuffle_flag} 42 | 43 | # Subscribe, ask and cancel and leave the same demand 44 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 45 | assert disp == {[{0, pid, ref}], 10, 10, @default_shuffle_flag} 46 | 47 | {:ok, 0, disp} = D.ask(5, {pid, ref}, disp) 48 | assert disp == {[{5, pid, ref}], 5, 10, @default_shuffle_flag} 49 | 50 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 51 | assert disp == {[], 10, 10, @default_shuffle_flag} 52 | end 53 | 54 | test "subscribes, asks and dispatches" do 55 | pid = self() 56 | ref = make_ref() 57 | disp = dispatcher([]) 58 | {:ok, 0, disp} = D.subscribe([], {pid, ref}, disp) 59 | 60 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 61 | assert disp == {[{3, pid, ref}], 0, 3, @default_shuffle_flag} 62 | 63 | {:ok, [], disp} = D.dispatch([:a], 1, disp) 64 | assert disp == {[{2, pid, ref}], 0, 3, @default_shuffle_flag} 65 | assert_received {:"$gen_consumer", {_, ^ref}, [:a]} 66 | 67 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 68 | assert disp == {[{5, pid, ref}], 0, 3, @default_shuffle_flag} 69 | 70 | {:ok, [:g, :h], disp} = D.dispatch([:b, :c, :d, :e, :f, :g, :h], 7, disp) 71 | assert disp == {[{0, pid, ref}], 0, 3, @default_shuffle_flag} 72 | assert_received {:"$gen_consumer", {_, ^ref}, [:b, :c, :d, :e, :f]} 73 | 74 | {:ok, [:i, :j], disp} = D.dispatch([:i, :j], 2, disp) 75 | assert disp == {[{0, pid, ref}], 0, 3, @default_shuffle_flag} 76 | refute_received {:"$gen_consumer", {_, ^ref}, _} 77 | end 78 | 79 | test "subscribes, asks multiple consumers" do 80 | pid = self() 81 | ref1 = make_ref() 82 | ref2 = make_ref() 83 | ref3 = make_ref() 84 | disp = dispatcher([]) 85 | 86 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 87 | {:ok, 0, disp} = D.subscribe([], {pid, ref2}, disp) 88 | {:ok, 0, disp} = D.subscribe([], {pid, ref3}, disp) 89 | 90 | {:ok, 4, disp} = D.ask(4, {pid, ref1}, disp) 91 | {:ok, 2, disp} = D.ask(2, {pid, ref2}, disp) 92 | {:ok, 3, disp} = D.ask(3, {pid, ref3}, disp) 93 | assert disp == {[{4, pid, ref1}, {3, pid, ref3}, {2, pid, ref2}], 0, 4, @default_shuffle_flag} 94 | 95 | {:ok, 2, disp} = D.ask(2, {pid, ref3}, disp) 96 | assert disp == {[{5, pid, ref3}, {4, pid, ref1}, {2, pid, ref2}], 0, 4, @default_shuffle_flag} 97 | 98 | {:ok, 4, disp} = D.ask(4, {pid, ref2}, disp) 99 | assert disp == {[{6, pid, ref2}, {5, pid, ref3}, {4, pid, ref1}], 0, 4, @default_shuffle_flag} 100 | end 101 | 102 | test "subscribes, asks and dispatches to multiple consumers" do 103 | pid = self() 104 | ref1 = make_ref() 105 | ref2 = make_ref() 106 | disp = dispatcher([]) 107 | 108 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 109 | {:ok, 0, disp} = D.subscribe([], {pid, ref2}, disp) 110 | 111 | {:ok, 3, disp} = D.ask(3, {pid, ref1}, disp) 112 | {:ok, 2, disp} = D.ask(2, {pid, ref2}, disp) 113 | assert disp == {[{3, pid, ref1}, {2, pid, ref2}], 0, 3, @default_shuffle_flag} 114 | 115 | # One batch fits all 116 | {:ok, [], disp} = D.dispatch([:a, :b, :c, :d, :e], 5, disp) 117 | 118 | assert_received {:"$gen_consumer", {_, ^ref1}, [:a, :b, :c]} 119 | assert_received {:"$gen_consumer", {_, ^ref2}, [:d, :e]} 120 | 121 | {:ok, [:a, :b, :c], disp} = D.dispatch([:a, :b, :c], 3, disp) 122 | assert disp == {[{0, pid, ref1}, {0, pid, ref2}], 0, 3, @default_shuffle_flag} 123 | refute_received {:"$gen_consumer", {_, _}, _} 124 | 125 | # Two batches with left over 126 | {:ok, 3, disp} = D.ask(3, {pid, ref1}, disp) 127 | {:ok, 3, disp} = D.ask(3, {pid, ref2}, disp) 128 | assert disp == {[{3, pid, ref1}, {3, pid, ref2}], 0, 3, @default_shuffle_flag} 129 | 130 | {:ok, [], disp} = D.dispatch([:a, :b], 2, disp) 131 | assert disp == {[{3, pid, ref2}, {1, pid, ref1}], 0, 3, @default_shuffle_flag} 132 | assert_received {:"$gen_consumer", {_, ^ref1}, [:a, :b]} 133 | 134 | {:ok, [], disp} = D.dispatch([:c, :d], 2, disp) 135 | assert disp == {[{1, pid, ref1}, {1, pid, ref2}], 0, 3, @default_shuffle_flag} 136 | assert_received {:"$gen_consumer", {_, ^ref2}, [:c, :d]} 137 | 138 | # Eliminate the left-over 139 | {:ok, [:g], disp} = D.dispatch([:e, :f, :g], 3, disp) 140 | assert disp == {[{0, pid, ref1}, {0, pid, ref2}], 0, 3, @default_shuffle_flag} 141 | assert_received {:"$gen_consumer", {_, ^ref1}, [:e]} 142 | assert_received {:"$gen_consumer", {_, ^ref2}, [:f]} 143 | end 144 | 145 | test "subscribes, asks and dispatches to multiple consumers with shuffled demands" do 146 | pid = self() 147 | ref1 = make_ref() 148 | ref2 = make_ref() 149 | disp = dispatcher(shuffle_demands_on_first_dispatch: true) 150 | 151 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 152 | {:ok, 0, disp} = D.subscribe([], {pid, ref2}, disp) 153 | 154 | {:ok, 3, disp} = D.ask(3, {pid, ref1}, disp) 155 | {:ok, 2, disp} = D.ask(2, {pid, ref2}, disp) 156 | assert disp == {[{3, pid, ref1}, {2, pid, ref2}], 0, 3, true} 157 | 158 | # demands should be shuffled after first dispatch 159 | {:ok, [], disp} = D.dispatch([:a, :b, :c, :d, :e], 5, disp) 160 | 161 | # shuffled flag is reset to false after first dispatch 162 | {[{0, pid, ref1_actual}, {0, pid, ref2_actual}], 0, 3, @default_shuffle_flag} = disp 163 | 164 | if ref1_actual == ref1 do 165 | assert ref2_actual == ref2 166 | assert_received {:"$gen_consumer", {_, ^ref1}, [:a, :b, :c]} 167 | assert_received {:"$gen_consumer", {_, ^ref2}, [:d, :e]} 168 | else 169 | assert ref1_actual == ref2 170 | assert ref2_actual == ref1 171 | assert_received {:"$gen_consumer", {_, ^ref2}, [:a, :b]} 172 | assert_received {:"$gen_consumer", {_, ^ref1}, [:c, :d, :e]} 173 | end 174 | end 175 | 176 | test "delivers info to current process" do 177 | pid = self() 178 | ref1 = make_ref() 179 | ref2 = make_ref() 180 | disp = dispatcher([]) 181 | 182 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 183 | {:ok, 0, disp} = D.subscribe([], {pid, ref2}, disp) 184 | {:ok, 3, disp} = D.ask(3, {pid, ref1}, disp) 185 | 186 | {:ok, notify_disp} = D.info(:hello, disp) 187 | assert disp == notify_disp 188 | assert_received :hello 189 | end 190 | 191 | test "warns on demand mismatch" do 192 | pid = self() 193 | ref1 = make_ref() 194 | ref2 = make_ref() 195 | disp = dispatcher(max_demand: 3) 196 | 197 | {:ok, 0, disp} = D.subscribe([], {pid, ref1}, disp) 198 | {:ok, 0, disp} = D.subscribe([], {pid, ref2}, disp) 199 | 200 | log = 201 | capture_log(fn -> 202 | {:ok, 4, disp} = D.ask(4, {pid, ref2}, disp) 203 | disp 204 | end) 205 | 206 | assert log =~ "GenStage producer DemandDispatcher expects a maximum demand of 3" 207 | end 208 | end 209 | -------------------------------------------------------------------------------- /test/gen_stage/partition_dispatcher_test.exs: -------------------------------------------------------------------------------- 1 | defmodule GenStage.PartitionDispatcherTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias GenStage.PartitionDispatcher, as: D 5 | 6 | defp dispatcher(opts) do 7 | {:ok, state} = D.init(opts) 8 | state 9 | end 10 | 11 | defp waiting_and_pending({_, _, waiting, pending, _, _, _}) do 12 | {waiting, pending} 13 | end 14 | 15 | test "subscribes, asks and cancels" do 16 | pid = self() 17 | ref = make_ref() 18 | disp = dispatcher(partitions: 2) 19 | 20 | # Subscribe, ask and cancel and leave some demand 21 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid, ref}, disp) 22 | {:ok, 10, disp} = D.ask(10, {pid, ref}, disp) 23 | assert {10, 0} = waiting_and_pending(disp) 24 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 25 | assert {10, 10} = waiting_and_pending(disp) 26 | 27 | # Subscribe again and the same demand is back 28 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid, ref}, disp) 29 | {:ok, 0, disp} = D.ask(5, {pid, ref}, disp) 30 | assert {10, 5} = waiting_and_pending(disp) 31 | {:ok, 0, disp} = D.cancel({pid, ref}, disp) 32 | assert {10, 10} = waiting_and_pending(disp) 33 | end 34 | 35 | test "subscribes, asks and dispatches" do 36 | pid = self() 37 | ref = make_ref() 38 | disp = dispatcher(partitions: 1) 39 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid, ref}, disp) 40 | 41 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 42 | {:ok, [], disp} = D.dispatch([1], 1, disp) 43 | assert {2, 0} = waiting_and_pending(disp) 44 | assert_received {:"$gen_consumer", {_, ^ref}, [1]} 45 | 46 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 47 | assert {5, 0} = waiting_and_pending(disp) 48 | 49 | {:ok, [9, 11], disp} = D.dispatch([2, 5, 6, 7, 8, 9, 11], 7, disp) 50 | assert {0, 0} = waiting_and_pending(disp) 51 | assert_received {:"$gen_consumer", {_, ^ref}, [2, 5, 6, 7, 8]} 52 | end 53 | 54 | test "subscribes, asks and dispatches to custom partitions" do 55 | pid = self() 56 | ref = make_ref() 57 | 58 | hash_fun = fn event -> 59 | {event, if(rem(event, 2) == 0, do: :even, else: :odd)} 60 | end 61 | 62 | disp = dispatcher(partitions: [:odd, :even], hash: hash_fun) 63 | 64 | {:ok, 0, disp} = D.subscribe([partition: :odd], {pid, ref}, disp) 65 | 66 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 67 | {:ok, [], disp} = D.dispatch([1], 1, disp) 68 | assert {2, 0} = waiting_and_pending(disp) 69 | assert_received {:"$gen_consumer", {_, ^ref}, [1]} 70 | 71 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 72 | assert {5, 0} = waiting_and_pending(disp) 73 | 74 | {:ok, [15, 17], disp} = D.dispatch([5, 7, 9, 11, 13, 15, 17], 7, disp) 75 | assert {0, 0} = waiting_and_pending(disp) 76 | assert_received {:"$gen_consumer", {_, ^ref}, [5, 7, 9, 11, 13]} 77 | end 78 | 79 | test "subscribes, asks and dispatches to partitions or none" do 80 | pid = self() 81 | even_ref = make_ref() 82 | odd_ref = make_ref() 83 | 84 | hash_fun = fn event -> 85 | cond do 86 | rem(event, 3) == 0 -> :none 87 | rem(event, 2) == 0 -> {event, :even} 88 | true -> {event, :odd} 89 | end 90 | end 91 | 92 | disp = dispatcher(partitions: [:odd, :even], hash: hash_fun) 93 | 94 | {:ok, 0, disp} = D.subscribe([partition: :even], {pid, even_ref}, disp) 95 | {:ok, 0, disp} = D.subscribe([partition: :odd], {pid, odd_ref}, disp) 96 | 97 | {:ok, 4, disp} = D.ask(4, {pid, even_ref}, disp) 98 | {:ok, 4, disp} = D.ask(4, {pid, odd_ref}, disp) 99 | {:ok, [12], disp} = D.dispatch([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 12, disp) 100 | 101 | assert_received {:"$gen_consumer", {_, ^even_ref}, [2, 4, 8, 10]} 102 | assert_received {:"$gen_consumer", {_, ^odd_ref}, [1, 5, 7, 11]} 103 | assert {0, 0} = waiting_and_pending(disp) 104 | end 105 | 106 | test "buffers events before subscription" do 107 | disp = dispatcher(partitions: 2) 108 | 109 | # Use one subscription to queue 110 | pid = self() 111 | ref = make_ref() 112 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid, ref}, disp) 113 | 114 | {:ok, 5, disp} = D.ask(5, {pid, ref}, disp) 115 | {:ok, [], disp} = D.dispatch([1, 2, 5, 6, 7], 5, disp) 116 | assert {0, 0} = waiting_and_pending(disp) 117 | refute_received {:"$gen_consumer", {_, ^ref}, _} 118 | 119 | {:ok, [8, 9], disp} = D.dispatch([8, 9], 2, disp) 120 | assert {0, 0} = waiting_and_pending(disp) 121 | 122 | # Use another subscription to get events back 123 | pid = self() 124 | ref = make_ref() 125 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid, ref}, disp) 126 | {:ok, 5, disp} = D.ask(5, {pid, ref}, disp) 127 | assert {5, 0} = waiting_and_pending(disp) 128 | assert_received {:"$gen_consumer", {_, ^ref}, [1, 2, 5, 6, 7]} 129 | 130 | {:ok, [], disp} = D.dispatch([1, 2], 2, disp) 131 | assert {3, 0} = waiting_and_pending(disp) 132 | end 133 | 134 | test "buffers events after subscription" do 135 | disp = dispatcher(partitions: 2) 136 | 137 | pid0 = self() 138 | ref0 = make_ref() 139 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid0, ref0}, disp) 140 | {:ok, 5, disp} = D.ask(5, {pid0, ref0}, disp) 141 | assert {5, 0} = waiting_and_pending(disp) 142 | 143 | pid1 = self() 144 | ref1 = make_ref() 145 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid1, ref1}, disp) 146 | {:ok, 5, disp} = D.ask(5, {pid1, ref1}, disp) 147 | assert {10, 0} = waiting_and_pending(disp) 148 | 149 | # Send all events to the same partition, half of them will be buffered 150 | {:ok, [], disp} = D.dispatch([1, 2], 2, disp) 151 | {:ok, [], disp} = D.dispatch([5, 6, 7, 1, 2, 5, 6, 7], 8, disp) 152 | assert {0, 0} = waiting_and_pending(disp) 153 | assert_received {:"$gen_consumer", {_, ^ref0}, [1, 2]} 154 | assert_received {:"$gen_consumer", {_, ^ref0}, [5, 6, 7]} 155 | 156 | {:ok, 5, disp} = D.ask(5, {pid0, ref0}, disp) 157 | assert {5, 0} = waiting_and_pending(disp) 158 | assert_received {:"$gen_consumer", {_, ^ref0}, [1, 2, 5, 6, 7]} 159 | end 160 | 161 | test "subscribes, asks and cancels with buffer" do 162 | disp = dispatcher(partitions: 2) 163 | 164 | pid1 = self() 165 | ref1 = make_ref() 166 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid1, ref1}, disp) 167 | {:ok, 5, disp} = D.ask(5, {pid1, ref1}, disp) 168 | assert {5, 0} = waiting_and_pending(disp) 169 | 170 | pid0 = self() 171 | ref0 = make_ref() 172 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid0, ref0}, disp) 173 | {:ok, [], disp} = D.dispatch([1, 2, 5, 6, 7], 5, disp) 174 | assert {0, 0} = waiting_and_pending(disp) 175 | refute_received {:"$gen_consumer", {_, ^ref0}, _} 176 | 177 | # The notification should not count as an event 178 | {:ok, disp} = D.info(:hello, disp) 179 | {:ok, 5, disp} = D.cancel({pid0, ref0}, disp) 180 | assert {5, 0} = waiting_and_pending(disp) 181 | assert_received :hello 182 | end 183 | 184 | test "delivers info to current process" do 185 | pid0 = self() 186 | ref0 = make_ref() 187 | pid1 = self() 188 | ref1 = make_ref() 189 | disp = dispatcher(partitions: 2) 190 | 191 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid0, ref0}, disp) 192 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid1, ref1}, disp) 193 | {:ok, 3, disp} = D.ask(3, {pid1, ref1}, disp) 194 | 195 | {:ok, notify_disp} = D.info(:hello, disp) 196 | assert disp == notify_disp 197 | assert_received :hello 198 | end 199 | 200 | test "does not queue info for non-existing consumers" do 201 | disp = dispatcher(partitions: 2) 202 | D.info(:hello, disp) 203 | assert_received :hello 204 | end 205 | 206 | test "queues info to backed up consumers" do 207 | pid0 = self() 208 | ref0 = make_ref() 209 | pid1 = self() 210 | ref1 = make_ref() 211 | disp = dispatcher(partitions: 2) 212 | 213 | {:ok, 0, disp} = D.subscribe([partition: 0], {pid0, ref0}, disp) 214 | {:ok, 0, disp} = D.subscribe([partition: 1], {pid0, ref1}, disp) 215 | {:ok, 3, disp} = D.ask(3, {pid1, ref1}, disp) 216 | {:ok, [], disp} = D.dispatch([1, 2, 5], 3, disp) 217 | 218 | {:ok, disp} = D.info(:hello, disp) 219 | refute_received :hello 220 | 221 | {:ok, 5, _} = D.ask(5, {pid0, ref0}, disp) 222 | assert_received :hello 223 | end 224 | 225 | test "raises on unknown partition" do 226 | pid = self() 227 | ref = make_ref() 228 | disp = dispatcher(partitions: [:foo, :bar], hash: &{&1, :oops}) 229 | 230 | {:ok, 0, disp} = D.subscribe([partition: :foo], {pid, ref}, disp) 231 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 232 | 233 | assert_raise RuntimeError, ~r/unknown partition :oops/, fn -> 234 | D.dispatch([1, 2, 5], 3, disp) 235 | end 236 | end 237 | 238 | test "errors if the :hash function returns a bad value" do 239 | pid = self() 240 | ref = make_ref() 241 | disp = dispatcher(partitions: [:foo, :bar], hash: fn _ -> :not_a_tuple end) 242 | {:ok, 0, disp} = D.subscribe([partition: :foo], {pid, ref}, disp) 243 | {:ok, 3, disp} = D.ask(3, {pid, ref}, disp) 244 | 245 | assert_raise RuntimeError, ~r/the :hash function should return/, fn -> 246 | D.dispatch([1, 2, 5], 3, disp) 247 | end 248 | end 249 | 250 | test "errors on init" do 251 | assert_raise ArgumentError, ~r/the enumerable of :partitions is required/, fn -> 252 | dispatcher([]) 253 | end 254 | 255 | assert_raise ArgumentError, ~r/when :partitions contains partitions/, fn -> 256 | dispatcher(partitions: [:even, :odd]) 257 | end 258 | end 259 | 260 | test "errors on subscribe" do 261 | pid = self() 262 | ref = make_ref() 263 | disp = dispatcher(partitions: 2) 264 | 265 | assert_raise ArgumentError, ~r/the :partition option is required when subscribing/, fn -> 266 | D.subscribe([], {pid, ref}, disp) 267 | end 268 | 269 | assert_raise ArgumentError, ~r/the partition 0 is already taken by/, fn -> 270 | {:ok, _, disp} = D.subscribe([partition: 0], {pid, ref}, disp) 271 | D.subscribe([partition: 0], {pid, ref}, disp) 272 | end 273 | 274 | assert_raise ArgumentError, ~r/:partition must be one of \[0, 1]/, fn -> 275 | D.subscribe([partition: -1], {pid, ref}, disp) 276 | end 277 | 278 | assert_raise ArgumentError, ~r/:partition must be one of \[0, 1]/, fn -> 279 | D.subscribe([partition: 2], {pid, ref}, disp) 280 | end 281 | 282 | assert_raise ArgumentError, ~r/:partition must be one of \[0, 1]/, fn -> 283 | D.subscribe([partition: :oops], {pid, ref}, disp) 284 | end 285 | end 286 | end 287 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start(assert_receive_timeout: 500) 2 | --------------------------------------------------------------------------------