├── .formatter.exs ├── .github └── workflows │ ├── ci.yml │ └── hex.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── bench ├── enqueue.exs └── heartbeat.exs ├── config ├── config.exs ├── dev.exs └── test.exs ├── lib ├── exq.ex ├── exq │ ├── adapters │ │ ├── queue.ex │ │ └── queue │ │ │ ├── mock.ex │ │ │ └── redis.ex │ ├── api.ex │ ├── api │ │ └── server.ex │ ├── backoff │ │ ├── behaviour.ex │ │ └── sidekiq_default.ex │ ├── dequeue │ │ ├── behaviour.ex │ │ └── local.ex │ ├── enqueue_api.ex │ ├── enqueuer.ex │ ├── enqueuer │ │ └── server.ex │ ├── heartbeat │ │ ├── monitor.ex │ │ └── server.ex │ ├── manager │ │ └── server.ex │ ├── middleware │ │ ├── behaviour.ex │ │ ├── job.ex │ │ ├── logger.ex │ │ ├── manager.ex │ │ ├── pipeline.ex │ │ ├── server.ex │ │ ├── stats.ex │ │ ├── telemetry.ex │ │ └── unique.ex │ ├── mock.ex │ ├── node │ │ └── server.ex │ ├── node_identifier │ │ ├── behaviour.ex │ │ └── hostname_identifier.ex │ ├── redis │ │ ├── connection.ex │ │ ├── heartbeat.ex │ │ ├── job_queue.ex │ │ ├── job_stat.ex │ │ └── script.ex │ ├── scheduler │ │ └── server.ex │ ├── serializers │ │ ├── behaviour.ex │ │ └── json_serializer.ex │ ├── stats │ │ └── server.ex │ ├── support │ │ ├── binary.ex │ │ ├── coercion.ex │ │ ├── config.ex │ │ ├── job.ex │ │ ├── mode.ex │ │ ├── node.ex │ │ ├── opts.ex │ │ ├── process.ex │ │ ├── randomize.ex │ │ ├── redis.ex │ │ └── time.ex │ ├── worker │ │ ├── metadata.ex │ │ ├── server.ex │ │ └── supervisor.ex │ └── worker_drainer │ │ └── server.ex └── mix │ └── tasks │ └── exq.run.ex ├── mix.exs ├── mix.lock └── test ├── api_test.exs ├── config_test.exs ├── exq └── heartbeat │ └── monitor_test.exs ├── exq_test.exs ├── failure_scenarios_test.exs ├── fake_mode_test.exs ├── flaky_connection_test.exs ├── inline_mode_test.exs ├── job_queue_test.exs ├── job_stat_test.exs ├── json_serializer_test.exs ├── metadata_test.exs ├── middleware_test.exs ├── mode_test.exs ├── performance_test.exs ├── readonly_reconnect_test.exs ├── redis_test.exs ├── test-redis-replica.conf ├── test-redis.conf ├── test-sentinel.conf ├── test_helper.exs └── worker_test.exs /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | check_duplicate_runs: 11 | name: Check for duplicate runs 12 | continue-on-error: true 13 | runs-on: ubuntu-latest 14 | outputs: 15 | should_skip: ${{ steps.skip_check.outputs.should_skip }} 16 | steps: 17 | - id: skip_check 18 | uses: fkirc/skip-duplicate-actions@master 19 | with: 20 | concurrent_skipping: always 21 | cancel_others: true 22 | skip_after_successful_duplicate: true 23 | paths_ignore: '["**/README.md", "**/CHANGELOG.md", "**/LICENSE"]' 24 | do_not_skip: '["pull_request"]' 25 | 26 | test: 27 | name: Elixir ${{ matrix.elixir }} / OTP ${{ matrix.otp }} 28 | runs-on: ubuntu-22.04 29 | needs: check_duplicate_runs 30 | if: ${{ needs.check_duplicate_runs.outputs.should_skip != 'true' }} 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | include: 36 | - elixir: 1.12 37 | otp: 24 38 | - elixir: 1.14 39 | otp: 25 40 | - elixir: 1.15 41 | otp: 25 42 | - elixir: 1.17 43 | otp: 27 44 | - elixir: 1.18 45 | otp: 27 46 | check_warnings: true 47 | check_format: true 48 | steps: 49 | - name: Checkout 50 | uses: actions/checkout@v3 51 | 52 | - name: Set up Elixir 53 | uses: erlef/setup-beam@v1 54 | with: 55 | elixir-version: ${{ matrix.elixir }} 56 | otp-version: ${{ matrix.otp }} 57 | 58 | - name: Set up Redis Server 59 | run: sudo apt-get install redis-server -y 60 | 61 | - name: Restore deps cache 62 | uses: actions/cache@v3 63 | with: 64 | path: | 65 | deps 66 | _build 67 | key: ${{ runner.os }}-deps-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('**/mix.lock') }}-git-${{ github.sha }} 68 | restore-keys: | 69 | ${{ runner.os }}-deps-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('**/mix.lock') }} 70 | ${{ runner.os }}-deps-${{ matrix.otp }}-${{ matrix.elixir }} 71 | 72 | - name: Install package dependencies 73 | run: mix deps.get 74 | 75 | - name: Remove compiled application files 76 | run: mix clean 77 | 78 | - name: Check Format 79 | run: mix format --check-formatted 80 | if: ${{ matrix.check_format }} 81 | 82 | - name: Compile dependencies 83 | run: mix compile 84 | env: 85 | MIX_ENV: test 86 | 87 | - name: Check warnings 88 | run: mix compile --force --warnings-as-errors 89 | if: ${{ matrix.check_warnings }} 90 | env: 91 | MIX_ENV: test 92 | 93 | - name: Run unit tests 94 | run: mix coveralls.github --no-start 95 | env: 96 | MIX_ENV: test 97 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 98 | -------------------------------------------------------------------------------- /.github/workflows/hex.yml: -------------------------------------------------------------------------------- 1 | env: 2 | ELIXIR_VERSION: "1.13.4" 3 | OTP_VERSION: "25.0.1" 4 | on: 5 | push: 6 | tags: 7 | - '*' 8 | jobs: 9 | publish: 10 | runs-on: ubuntu-22.04 11 | steps: 12 | - uses: actions/checkout@v3 13 | - name: Set up Elixir 14 | uses: erlef/setup-beam@v1 15 | with: 16 | elixir-version: ${{ env.ELIXIR_VERSION }} 17 | otp-version: ${{ env.OTP_VERSION }} 18 | - name: Publish to Hex 19 | uses: synchronal/hex-publish-action@v3 20 | with: 21 | name: exq 22 | key: ${{ secrets.HEX_PM_KEY }} 23 | tag-release: false 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | exq-*.tar 24 | 25 | # Temporary files for e.g. tests. 26 | /tmp/ 27 | 28 | # Misc. 29 | /ebin 30 | priv/tmp_downloads/ 31 | *.swp 32 | *.log 33 | .*.swp 34 | *.swo 35 | stdout 36 | dump.rdb 37 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). 5 | 6 | ## [Unreleased] 7 | 8 | ### Changed 9 | 10 | - Resolved deprecation warnings [#499](https://github.com/akira/exq/pull/499) by @ananthakumaran. 11 | - Added the ability to clear unique tokens when deleting a job [#497](https://github.com/akira/exq/pull/497) by @dmitrykleymenov 12 | 13 | ### Removed 14 | 15 | - Dropped support for Elixir versions prior to 1.12. 16 | 17 | ## [0.19.0] - 2023-07-01 18 | 19 | ### Changed 20 | - Relax poison dependency #485 by @frahugo 21 | 22 | ## [0.18.0] - 2023-06-01 23 | 24 | ### Added 25 | - Add enqueue_all for enqueuing a batch of jobs atomically #483 by @meysius 26 | 27 | ### Fixed 28 | - Fix namespaced keys in scripts for ACL redis #481 by @korialis 29 | 30 | ## [0.17.0] - 2022-11-25 31 | 32 | ### Added 33 | - Add Exq unique job feature, allow to enforce a single instance of job! #469 by @ananthakumaran 34 | 35 | ### Fixed 36 | - Setup default redis timeout config #475 by @ananthakumaran 37 | 38 | ### Changed 39 | - Use erlef/setup-beam and bump versions #476 by @kianmeng 40 | 41 | ## [0.16.2] - 2022-04-15 42 | 43 | ### Added 44 | - API to send signal to worker nodes by @ananthakumaran 45 | ### Changed 46 | 47 | ### Fixed 48 | - Fixed typos #464 by @kianmeng 49 | 50 | ## [0.16.1] - 2021-12-13 51 | 52 | ### Added 53 | 54 | ### Changed 55 | 56 | ### Fixed 57 | - Fix @doc redefined warnings #463 by deepfryed 58 | 59 | ## [0.16.0] - 2021-12-12 60 | 61 | NOTE: Please read PR #458 for upgrade instructions. 62 | 63 | ### Added 64 | - Add retried_at field for Sidekiq compatibility #450 by @ananthakumaran 65 | - Add apis to support exq_ui #452 by @ananthakumaran 66 | - Add documentation about mode: :enqueuer and Exq.Enqueuer.queue_in #456 by @dbernheisel 67 | - Add api to immediately enqueue jobs from retry/scheduled queue #461 by @ananthakumaran 68 | - Add api to re-enqueue dead job #462 by @ananthakumaran 69 | 70 | ### Changed 71 | - Add Sidekiq 5 compatibility #458 by @ananthakumaran 72 | - Use latest Phoenix child spec style #459 by @vovayartsev 73 | - Replace deprecated supervisor calls #453 by @vkuznetsov 74 | 75 | ### Fixed 76 | - Handle timeouts on middleware pipeline #444 by @ananthakumaran 77 | - Use the correct scheduled time for enqueued_at field for mock #449 by @ananthakumaran 78 | 79 | 80 | ## [0.15.0] - 2021-07-19 81 | 82 | ### Added 83 | - Add dequeue behavior for ability to implement things like concurrency control #421 by @ananthakumaran 84 | - Api Module Documentation #440 by @kevin-j-m 85 | 86 | ### Changed 87 | - Use Lua script to schedule job for better performance and memory leak fix #427 by @ananthakumaran 88 | - Logging fixes #429 by @rraub 89 | - Relax poison dependency #431 by @ananthakumaran 90 | - Use github actions instead of Travis #433 by @ananthakumaran 91 | - Use the same same module conversion logic in mock as well #434 by @ananthakumaran 92 | - use Task instead of spawn_link for starting workers #436 by @mitchellhenke 93 | 94 | ### Fixed 95 | - re-enqueue unfinished jobs to the beginning of queue on restart #424 by @ananthakumaran 96 | - Fix for sentinel 0.11.0+ #428 by @ananthakumaran 97 | - Fixes for generated HTML docs by #442 @kianmeng 98 | 99 | 100 | ## [0.14.0] - 2020-08-08 101 | 102 | ### Added 103 | - Node heartbeat functionality for dynamic environments #392 by @ananthakumaran (disabled by default). 104 | - Exq telemetry events #414 by @hez 105 | - Allow custom job IDs #417 by @bradediger 106 | 107 | ### Changed 108 | - Don't log Redis disconnects #420 by @isaacsanders 109 | 110 | ### Fixed 111 | - exq.run mix task starts dependent apps as well #408 by @ananthakumaran 112 | - Cast queue level concurrency #401 by @ananthakumaran 113 | - Fix documentation typo #423 by @LionsHead 114 | - Fix conflicting unit in docs #419 by @JamesFerguson 115 | 116 | ## [0.13.5] - 2020-01-01 117 | 118 | ### Added 119 | - Queue adapter for mock testing @ananthakumaran and @samidarko 120 | 121 | ## [0.13.4] - 2019-11-03 122 | 123 | ### Fixed 124 | - Remove unnecessary serialization of enqueue calls #390 by @ananthakumaran and @sb8244 125 | - Fix warnings by @hkrutzer #394 126 | - Start all the apps during test by @ananthakumaran #391 127 | - Replace KEYS with a cursored call to SCAN for realtime stats by @neslinesli93 #384 128 | 129 | ## [0.13.3] - 2019-06-16 130 | 131 | ### Added 132 | - Handle AWS Elasticache Redis DNS failover. This ensures persistent connections are shutdown, forcing a reconnect in scenarios where a Redis node in a HA cluster is switched to READONLY mode by @deepfryed. 133 | 134 | ## [0.13.2] - 2019-03-15 135 | 136 | ### Fixed 137 | - Fix json_library issue #369 needing addition to config file. Add default value. 138 | 139 | ## [0.13.1] - 2019-02-24 140 | 141 | ### Added 142 | - Support for configurable JSON parser, with Jason as default by @chulkilee. 143 | 144 | ### Fixed 145 | - Remove redundant time output for worker log by @akira. 146 | - Fix deprecated time warning by @gvl. 147 | 148 | ## [0.13.0] - 2019-01-21 149 | 150 | ### Removed 151 | - Due to library dependencies, support for Elixir 1.3, Elixir 1.4 and OTP 18.0, OTP 19.0 has been removed. 152 | - Redix version older than 0.8.1 is no longer supported. 153 | - Config options `reconnect_on_sleep` and `redis_timeout` are now removed. 154 | 155 | ### Added 156 | - Support for Redix >= 0.8.1 by @ryansch and @ananthakumaran. 157 | - Configuration for Mix Format by @chulkilee. 158 | - Use :microsecond vs :microseconds by @KalvinHom. 159 | 160 | ### Changed 161 | - Redis options are now passed in via `redis_options` by @ryansch and @ananthakumaran. 162 | - Removed redix_sentinel dependency, now supported by new Redix version by @ananthakumaran. 163 | 164 | ## [0.12.2] - 2018-10-14 165 | 166 | ### Fixed 167 | - Don't assume redis_opts is enumerable by @ryansch. 168 | 169 | ### Added 170 | - Add {:system, VAR} format support for more config params by @LysanderGG 171 | - Allow setting mode to both [:enqueuer, :api] by @buob 172 | 173 | ### Changed 174 | - Specify less than 0.8.0 on redix version in mix.exs by @buob 175 | 176 | ## [0.12.1] - 2018-07-13 177 | 178 | ### Fixed 179 | - Cleanup packaging for `elixir_uuid` change. 180 | 181 | ## [0.12.0] - 2018-07-12 182 | 183 | ### Fixed 184 | - Change `uuid` to `elixir_uuid` which has been renamed. This will prevent future namespace clashes by @tzilist. 185 | 186 | ## [0.11.0] - 2018-05-12 187 | 188 | ### Added 189 | - Trim dead jobs queue after certain size by @ananthakumaran. 190 | - Add an api to list all subscriptions (active queues) by @robobakery. 191 | - Have top supervisor wait for worker drainer to gracefully terminate @frahugo. 192 | 193 | ## [0.10.1] - 2018-02-11 194 | 195 | ### Fixed 196 | - Fix retry for Sidekiq job format using retry => true by @deepfryed. 197 | 198 | ## [0.10.0] - 2018-02-11 199 | 200 | ### Fixed 201 | - Remove Password logging by @akira. 202 | 203 | ### Added 204 | - Redis Sentinel support by @ananthakumaran. 205 | - Make redis module name and start_link args configurable @ananthakumaran. 206 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2014 Alex Kira 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /bench/enqueue.exs: -------------------------------------------------------------------------------- 1 | defmodule BenchmarkWorker do 2 | def perform() do 3 | end 4 | end 5 | 6 | {:ok, _} = Application.ensure_all_started(:exq) 7 | Logger.configure(level: :warn) 8 | 9 | Benchee.run( 10 | %{ 11 | "enqueue" => fn -> {:ok, _} = Exq.enqueue(Exq, "default", BenchmarkWorker, []) end 12 | }, 13 | parallel: 100 14 | ) 15 | -------------------------------------------------------------------------------- /bench/heartbeat.exs: -------------------------------------------------------------------------------- 1 | {:ok, _} = Application.ensure_all_started(:redix) 2 | 3 | defmodule FastWorker do 4 | def perform() do 5 | end 6 | end 7 | 8 | defmodule SlowWorker do 9 | def perform() do 10 | Process.sleep(60_000) 11 | end 12 | end 13 | 14 | defmodule LoadGenerator do 15 | def generate(exq) do 16 | {:ok, _} = Exq.enqueue(Exq, "default", FastWorker, []) 17 | {:ok, _} = Exq.enqueue(Exq, "default", SlowWorker, []) 18 | Process.sleep(1000) 19 | generate(exq) 20 | end 21 | end 22 | 23 | defmodule NodeIdentifier.UUID do 24 | def node_id do 25 | Agent.get(:agent, & &1) 26 | end 27 | end 28 | 29 | Application.put_env(:exq, :node_identifier, NodeIdentifier.UUID) 30 | {:ok, _} = Agent.start_link(fn -> "controller" end, name: :agent) 31 | 32 | {:ok, controller} = 33 | Exq.start_link(name: Exq, concurrency: 0, heartbeat_enable: true, heartbeat_interval: 1000) 34 | 35 | spawn(fn -> 36 | LoadGenerator.generate(controller) 37 | end) 38 | 39 | workers = 40 | for _i <- 1..50 do 41 | id = UUID.uuid4() 42 | Agent.update(:agent, fn _ -> id end) 43 | 44 | {:ok, worker} = 45 | Exq.start_link( 46 | name: String.to_atom(id), 47 | concurrency: 10, 48 | heartbeat_enable: true, 49 | heartbeat_interval: 1000 50 | ) 51 | 52 | worker 53 | end 54 | 55 | Process.sleep(:infinity) 56 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :logger, :console, format: "\n$date $time [$level]: $message \n" 4 | 5 | config :exq, 6 | name: Exq, 7 | host: "127.0.0.1", 8 | port: 6379, 9 | database: 0, 10 | redis_options: [], 11 | namespace: "exq", 12 | queues: ["default"], 13 | scheduler_enable: true, 14 | concurrency: 100, 15 | scheduler_poll_timeout: 200, 16 | poll_timeout: 100, 17 | genserver_timeout: 5000, 18 | shutdown_timeout: 5000, 19 | dead_max_jobs: 10_000, 20 | # 6 months 21 | dead_timeout_in_seconds: 180 * 24 * 60 * 60, 22 | max_retries: 25, 23 | json_library: Jason, 24 | middleware: [ 25 | Exq.Middleware.Stats, 26 | Exq.Middleware.Job, 27 | Exq.Middleware.Manager, 28 | Exq.Middleware.Unique, 29 | Exq.Middleware.Logger 30 | ] 31 | 32 | import_config "#{Mix.env()}.exs" 33 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | config :logger, :console, format: "\n$date $time [$level]: $message \n" 4 | 5 | config :exq, 6 | name: Exq, 7 | host: System.get_env("REDIS_HOST") || "127.0.0.1", 8 | port: System.get_env("REDIS_PORT") || 6555, 9 | url: nil, 10 | namespace: "test", 11 | queues: ["default"], 12 | heartbeat_enable: true, 13 | heartbeat_interval: 500, 14 | missed_heartbeats_allowed: 3, 15 | concurrency: :infinite, 16 | scheduler_enable: false, 17 | scheduler_poll_timeout: 20, 18 | poll_timeout: 10, 19 | redis_timeout: 5000, 20 | genserver_timeout: 5000, 21 | test_with_local_redis: true, 22 | max_retries: 0, 23 | stats_flush_interval: 5, 24 | stats_batch_size: 1, 25 | middleware: [ 26 | Exq.Middleware.Stats, 27 | Exq.Middleware.Job, 28 | Exq.Middleware.Manager, 29 | Exq.Middleware.Unique, 30 | Exq.Middleware.Telemetry 31 | ], 32 | queue_adapter: Exq.Adapters.Queue.Mock 33 | -------------------------------------------------------------------------------- /lib/exq.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq do 2 | require Logger 3 | use Application 4 | 5 | import Exq.Support.Opts, only: [top_supervisor: 1] 6 | alias Exq.Worker.Metadata 7 | alias Exq.Support.Config 8 | 9 | # Mixin Enqueue API 10 | use Exq.Enqueuer.EnqueueApi 11 | 12 | def child_spec(exq_options \\ []) do 13 | %{ 14 | id: __MODULE__, 15 | type: :supervisor, 16 | start: {__MODULE__, :start_link, [exq_options]} 17 | } 18 | end 19 | 20 | # See http://elixir-lang.org/docs/stable/elixir/Application.html 21 | # for more information on OTP Applications 22 | def start(_type, _args) do 23 | if Config.get(:start_on_application) do 24 | start_link() 25 | else 26 | # Don't start Exq 27 | Supervisor.start_link([], strategy: :one_for_one) 28 | end 29 | end 30 | 31 | # Exq methods 32 | def start_link(opts \\ []) do 33 | children = Exq.Support.Mode.children(opts) 34 | 35 | Supervisor.start_link(children, 36 | name: top_supervisor(opts[:name]), 37 | strategy: :one_for_one, 38 | max_restarts: 20, 39 | max_seconds: 5 40 | ) 41 | end 42 | 43 | def stop(nil), do: :ok 44 | def stop(pid) when is_pid(pid), do: Process.exit(pid, :shutdown) 45 | 46 | def stop(name) do 47 | name 48 | |> whereis 49 | |> stop 50 | end 51 | 52 | def whereis(name) do 53 | name 54 | |> top_supervisor 55 | |> Process.whereis() 56 | end 57 | 58 | @doc """ 59 | List all subscriptions(active queues) 60 | * `pid` - PID for Exq Manager or Enqueuer to handle this 61 | """ 62 | def subscriptions(pid) do 63 | GenServer.call(pid, :subscriptions) 64 | end 65 | 66 | @doc """ 67 | Subscribe to a queue - ie. listen to queue for jobs 68 | * `pid` - PID for Exq Manager or Enqueuer to handle this 69 | * `queue` - Name of queue 70 | * `concurrency` - Optional argument specifying max concurrency for queue 71 | """ 72 | def subscribe(pid, queue) do 73 | GenServer.call(pid, {:subscribe, queue}) 74 | end 75 | 76 | def subscribe(pid, queue, concurrency) do 77 | GenServer.call(pid, {:subscribe, queue, concurrency}) 78 | end 79 | 80 | @doc """ 81 | Unsubscribe from a queue - ie. stop listening to queue for jobs 82 | * `pid` - PID for Exq Manager or Enqueuer to handle this 83 | * `queue` - Name of queue 84 | """ 85 | def unsubscribe(pid, queue) do 86 | GenServer.call(pid, {:unsubscribe, queue}) 87 | end 88 | 89 | @doc """ 90 | Unsubscribe from all queues - ie. stop listening for jobs 91 | * `pid` - PID for Exq Manager or Enqueuer to handle this 92 | """ 93 | def unsubscribe_all(pid) do 94 | GenServer.call(pid, :unsubscribe_all) 95 | end 96 | 97 | @doc """ 98 | Get the job metadata 99 | * `name` - registered name of Exq. Only necessary if the custom 100 | name option is used when starting Exq. Defaults to Exq 101 | * `pid` - pid of the worker. Defaults to self(). 102 | """ 103 | def worker_job(name \\ nil, pid \\ self()) do 104 | metadata = Metadata.server_name(name) 105 | Metadata.lookup(metadata, pid) 106 | end 107 | end 108 | -------------------------------------------------------------------------------- /lib/exq/adapters/queue.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Adapters.Queue do 2 | @moduledoc ~S""" 3 | Behaviour for creating Exq queue adapters. 4 | 5 | ## Example 6 | 7 | defmodule Exq.Adapters.Queue.CustomAdapter do 8 | @behaviour Exq.Adapters.Queue 9 | def enqueue(pid, queue, worker, args, options) do 10 | {:ok, apply(worker, :perform, args)} 11 | end 12 | 13 | def enqueue_at(pid, queue, time, worker, args, options) do 14 | enqueue_somehow(pid, queue, time, worker, args, options) 15 | end 16 | 17 | def enqueue_in(pid, queue, offset, worker, args, options) do 18 | enqueue_in_somehow(pid, queue, offset, worker, args, options) 19 | end 20 | end 21 | 22 | """ 23 | 24 | @typedoc "The GenServer name" 25 | @type name :: atom | {:global, term} | {:via, module, term} 26 | 27 | @typedoc "The server reference" 28 | @type server :: pid | name | {atom, node} 29 | 30 | @callback enqueue(server, String.t(), module(), list(), list()) :: tuple() 31 | @callback enqueue_at(server, String.t(), DateTime.t(), module(), list(), list()) :: tuple() 32 | @callback enqueue_in(server, String.t(), integer(), module(), list(), list()) :: tuple() 33 | @callback enqueue_all(server, list()) :: tuple() 34 | end 35 | -------------------------------------------------------------------------------- /lib/exq/adapters/queue/mock.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Adapters.Queue.Mock do 2 | @moduledoc """ 3 | Mock queue. 4 | 5 | Designed to be used when testing your application. 6 | """ 7 | 8 | @behaviour Exq.Adapters.Queue 9 | 10 | defdelegate enqueue(pid, queue, worker, args, options), to: Exq.Mock 11 | 12 | defdelegate enqueue_at(pid, queue, time, worker, args, options), to: Exq.Mock 13 | 14 | defdelegate enqueue_in(pid, queue, offset, worker, args, options), to: Exq.Mock 15 | 16 | defdelegate enqueue_all(pid, jobs), to: Exq.Mock 17 | end 18 | -------------------------------------------------------------------------------- /lib/exq/adapters/queue/redis.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Adapters.Queue.Redis do 2 | @moduledoc """ 3 | Redis based Asynchronous queue. 4 | 5 | Enqueue the job by using the GenServer API. Default queue. Designed to be used in production. 6 | """ 7 | alias Exq.Support.Config 8 | alias Exq.Redis.JobQueue 9 | 10 | @behaviour Exq.Adapters.Queue 11 | 12 | def enqueue(pid, queue, worker, args, options) do 13 | {redis, namespace} = GenServer.call(pid, :redis, Config.get(:genserver_timeout)) 14 | JobQueue.enqueue(redis, namespace, queue, worker, args, options) 15 | end 16 | 17 | def enqueue_at(pid, queue, time, worker, args, options) do 18 | {redis, namespace} = GenServer.call(pid, :redis, Config.get(:genserver_timeout)) 19 | JobQueue.enqueue_at(redis, namespace, queue, time, worker, args, options) 20 | end 21 | 22 | def enqueue_in(pid, queue, offset, worker, args, options) do 23 | {redis, namespace} = GenServer.call(pid, :redis, Config.get(:genserver_timeout)) 24 | JobQueue.enqueue_in(redis, namespace, queue, offset, worker, args, options) 25 | end 26 | 27 | def enqueue_all(pid, jobs) do 28 | {redis, namespace} = GenServer.call(pid, :redis, Config.get(:genserver_timeout)) 29 | JobQueue.enqueue_all(redis, namespace, jobs) 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/exq/api/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Api.Server do 2 | @moduledoc """ 3 | The API deals with getting current stats for the UI / API. 4 | """ 5 | 6 | alias Exq.Support.Config 7 | alias Exq.Redis.JobQueue 8 | alias Exq.Redis.JobStat 9 | 10 | use GenServer 11 | 12 | defmodule State do 13 | defstruct redis: nil, namespace: nil 14 | end 15 | 16 | def start_link(opts \\ []) do 17 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 18 | end 19 | 20 | ## =========================================================== 21 | ## GenServer callbacks 22 | ## =========================================================== 23 | 24 | def init(opts) do 25 | {:ok, %State{redis: opts[:redis], namespace: opts[:namespace]}} 26 | end 27 | 28 | def handle_call(:processes, _from, state) do 29 | processes = JobStat.processes(state.redis, state.namespace) 30 | {:reply, {:ok, processes}, state} 31 | end 32 | 33 | def handle_call(:busy, _from, state) do 34 | count = JobStat.busy(state.redis, state.namespace) 35 | {:reply, {:ok, count}, state} 36 | end 37 | 38 | def handle_call(:nodes, _from, state) do 39 | nodes = JobStat.nodes(state.redis, state.namespace) 40 | {:reply, {:ok, nodes}, state} 41 | end 42 | 43 | def handle_call({:stats, key}, _from, state) do 44 | count = JobStat.get_count(state.redis, state.namespace, key) 45 | {:reply, {:ok, count}, state} 46 | end 47 | 48 | def handle_call({:stats, key, dates}, _from, state) do 49 | counts = JobStat.get_counts(state.redis, state.namespace, Enum.map(dates, &"#{key}:#{&1}")) 50 | {:reply, {:ok, counts}, state} 51 | end 52 | 53 | def handle_call(:queues, _from, state) do 54 | queues = JobQueue.list_queues(state.redis, state.namespace) 55 | {:reply, {:ok, queues}, state} 56 | end 57 | 58 | def handle_call({:failed, options}, _from, state) do 59 | jobs = JobQueue.failed(state.redis, state.namespace, options) 60 | {:reply, {:ok, jobs}, state} 61 | end 62 | 63 | def handle_call({:retries, options}, _from, state) do 64 | jobs = JobQueue.scheduled_jobs(state.redis, state.namespace, "retry", options) 65 | {:reply, {:ok, jobs}, state} 66 | end 67 | 68 | def handle_call(:jobs, _from, state) do 69 | jobs = JobQueue.jobs(state.redis, state.namespace) 70 | {:reply, {:ok, jobs}, state} 71 | end 72 | 73 | def handle_call({:jobs, :scheduled, options}, _from, state) do 74 | jobs = JobQueue.scheduled_jobs(state.redis, state.namespace, "schedule", options) 75 | {:reply, {:ok, jobs}, state} 76 | end 77 | 78 | def handle_call({:jobs, :scheduled_with_scores}, _from, state) do 79 | jobs = JobQueue.scheduled_jobs_with_scores(state.redis, state.namespace, "schedule") 80 | {:reply, {:ok, jobs}, state} 81 | end 82 | 83 | def handle_call({:jobs, queue, options}, _from, state) do 84 | jobs = JobQueue.jobs(state.redis, state.namespace, queue, options) 85 | {:reply, {:ok, jobs}, state} 86 | end 87 | 88 | def handle_call(:queue_size, _from, state) do 89 | sizes = JobQueue.queue_size(state.redis, state.namespace) 90 | {:reply, {:ok, sizes}, state} 91 | end 92 | 93 | def handle_call({:queue_size, queue}, _from, state) do 94 | size = JobQueue.queue_size(state.redis, state.namespace, queue) 95 | {:reply, {:ok, size}, state} 96 | end 97 | 98 | def handle_call(:scheduled_size, _from, state) do 99 | size = JobQueue.scheduled_size(state.redis, state.namespace) 100 | {:reply, {:ok, size}, state} 101 | end 102 | 103 | def handle_call(:retry_size, _from, state) do 104 | size = JobQueue.retry_size(state.redis, state.namespace) 105 | {:reply, {:ok, size}, state} 106 | end 107 | 108 | def handle_call(:failed_size, _from, state) do 109 | size = JobQueue.failed_size(state.redis, state.namespace) 110 | {:reply, {:ok, size}, state} 111 | end 112 | 113 | def handle_call({:find_failed, jid}, _from, state) do 114 | {:ok, job} = JobStat.find_failed(state.redis, state.namespace, jid) 115 | {:reply, {:ok, job}, state} 116 | end 117 | 118 | def handle_call({:find_failed, score, jid, options}, _from, state) do 119 | {:ok, job} = JobStat.find_failed(state.redis, state.namespace, score, jid, options) 120 | {:reply, {:ok, job}, state} 121 | end 122 | 123 | def handle_call({:find_job, queue, jid}, _from, state) do 124 | response = JobQueue.find_job(state.redis, state.namespace, jid, queue) 125 | {:reply, response, state} 126 | end 127 | 128 | def handle_call({:find_scheduled, jid}, _from, state) do 129 | {:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :scheduled) 130 | {:reply, {:ok, job}, state} 131 | end 132 | 133 | def handle_call({:find_scheduled, score, jid, options}, _from, state) do 134 | {:ok, job} = JobStat.find_scheduled(state.redis, state.namespace, score, jid, options) 135 | {:reply, {:ok, job}, state} 136 | end 137 | 138 | def handle_call({:find_retry, jid}, _from, state) do 139 | {:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :retry) 140 | {:reply, {:ok, job}, state} 141 | end 142 | 143 | def handle_call({:find_retry, score, jid, options}, _from, state) do 144 | {:ok, job} = JobStat.find_retry(state.redis, state.namespace, score, jid, options) 145 | {:reply, {:ok, job}, state} 146 | end 147 | 148 | def handle_call({:remove_queue, queue}, _from, state) do 149 | JobStat.remove_queue(state.redis, state.namespace, queue) 150 | {:reply, :ok, state} 151 | end 152 | 153 | def handle_call({:remove_job, queue, jid}, _from, state) do 154 | JobQueue.remove_job(state.redis, state.namespace, queue, jid) 155 | {:reply, :ok, state} 156 | end 157 | 158 | def handle_call({:remove_enqueued_jobs, queue, raw_jobs, options}, _from, state) do 159 | JobQueue.remove_enqueued_jobs(state.redis, state.namespace, queue, raw_jobs) 160 | 161 | if Keyword.get(options, :clear_unique_tokens, false) do 162 | JobQueue.unlock_jobs(state.redis, state.namespace, raw_jobs) 163 | end 164 | 165 | {:reply, :ok, state} 166 | end 167 | 168 | def handle_call({:remove_retry, jid}, _from, state) do 169 | JobQueue.remove_retry(state.redis, state.namespace, jid) 170 | {:reply, :ok, state} 171 | end 172 | 173 | def handle_call({:remove_retry_jobs, raw_jobs, options}, _from, state) do 174 | JobQueue.remove_retry_jobs(state.redis, state.namespace, raw_jobs) 175 | 176 | if Keyword.get(options, :clear_unique_tokens, false) do 177 | JobQueue.unlock_jobs(state.redis, state.namespace, raw_jobs) 178 | end 179 | 180 | {:reply, :ok, state} 181 | end 182 | 183 | def handle_call({:dequeue_retry_jobs, raw_jobs}, _from, state) do 184 | result = JobQueue.dequeue_retry_jobs(state.redis, state.namespace, raw_jobs) 185 | {:reply, result, state} 186 | end 187 | 188 | def handle_call({:remove_scheduled, jid}, _from, state) do 189 | JobQueue.remove_scheduled(state.redis, state.namespace, jid) 190 | {:reply, :ok, state} 191 | end 192 | 193 | def handle_call({:remove_scheduled_jobs, raw_jobs, options}, _from, state) do 194 | JobQueue.remove_scheduled_jobs(state.redis, state.namespace, raw_jobs) 195 | 196 | if Keyword.get(options, :clear_unique_tokens, false) do 197 | JobQueue.unlock_jobs(state.redis, state.namespace, raw_jobs) 198 | end 199 | 200 | {:reply, :ok, state} 201 | end 202 | 203 | def handle_call({:dequeue_scheduled_jobs, raw_jobs}, _from, state) do 204 | result = JobQueue.dequeue_scheduled_jobs(state.redis, state.namespace, raw_jobs) 205 | {:reply, result, state} 206 | end 207 | 208 | def handle_call({:remove_failed, jid}, _from, state) do 209 | JobStat.remove_failed(state.redis, state.namespace, jid) 210 | {:reply, :ok, state} 211 | end 212 | 213 | def handle_call({:remove_failed_jobs, raw_jobs, options}, _from, state) do 214 | JobQueue.remove_failed_jobs(state.redis, state.namespace, raw_jobs) 215 | 216 | if Keyword.get(options, :clear_unique_tokens, false) do 217 | JobQueue.unlock_jobs(state.redis, state.namespace, raw_jobs) 218 | end 219 | 220 | {:reply, :ok, state} 221 | end 222 | 223 | def handle_call(:clear_failed, _from, state) do 224 | JobStat.clear_failed(state.redis, state.namespace) 225 | {:reply, :ok, state} 226 | end 227 | 228 | def handle_call({:dequeue_failed_jobs, raw_jobs}, _from, state) do 229 | result = JobQueue.dequeue_failed_jobs(state.redis, state.namespace, raw_jobs) 230 | {:reply, result, state} 231 | end 232 | 233 | def handle_call(:clear_processes, _from, state) do 234 | JobStat.clear_processes(state.redis, state.namespace) 235 | {:reply, :ok, state} 236 | end 237 | 238 | def handle_call(:clear_scheduled, _from, state) do 239 | JobQueue.delete_queue(state.redis, state.namespace, "schedule") 240 | {:reply, :ok, state} 241 | end 242 | 243 | def handle_call(:clear_retries, _from, state) do 244 | JobQueue.delete_queue(state.redis, state.namespace, "retry") 245 | {:reply, :ok, state} 246 | end 247 | 248 | def handle_call(:realtime_stats, _from, state) do 249 | {:ok, failures, successes} = JobStat.realtime_stats(state.redis, state.namespace) 250 | {:reply, {:ok, failures, successes}, state} 251 | end 252 | 253 | def handle_call({:retry_job, jid}, _from, state) do 254 | {:ok, job} = JobQueue.find_job(state.redis, state.namespace, jid, :retry) 255 | JobQueue.retry_job(state.redis, state.namespace, job) 256 | {:reply, :ok, state} 257 | end 258 | 259 | def handle_call({:send_signal, node_id, signal_name}, _from, state) do 260 | result = JobStat.node_signal(state.redis, state.namespace, node_id, signal_name) 261 | {:reply, result, state} 262 | end 263 | 264 | def terminate(_reason, _state) do 265 | :ok 266 | end 267 | 268 | def server_name(name) do 269 | name = name || Config.get(:name) 270 | "#{name}.Api" |> String.to_atom() 271 | end 272 | end 273 | -------------------------------------------------------------------------------- /lib/exq/backoff/behaviour.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Backoff.Behaviour do 2 | @callback offset(job :: %Exq.Support.Job{}) :: number 3 | end 4 | -------------------------------------------------------------------------------- /lib/exq/backoff/sidekiq_default.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Backoff.SidekiqDefault do 2 | @behaviour Exq.Backoff.Behaviour 3 | alias Exq.Support.Randomize 4 | 5 | def offset(job) do 6 | :math.pow(job.retry_count, 4) + 15 + Randomize.random(30) * (job.retry_count + 1) 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /lib/exq/dequeue/behaviour.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Dequeue.Behaviour do 2 | @moduledoc """ 3 | Custom concurrency or rate limiting at a queue level can be achieved 4 | by implementing the Dequeue behaviour. 5 | 6 | The following config can be used to customize dequeue behaviour for a queue: 7 | 8 | config :exq, 9 | queues: [{"default", {RateLimiter, options}}] 10 | 11 | RateLimiter module should implement `Exq.Dequeue.Behaviour`. The 12 | options supplied here would be passed as the second argument to the 13 | `c:init/2` function. 14 | 15 | ### Life cycle 16 | 17 | `c:init/2` will be invoked on initialization. The first argument will contain info 18 | like queue and the second argument is user configurable. 19 | 20 | `c:available?/1` will be invoked before each poll. If the 21 | returned value contains `false` as the second element of the tuple, 22 | the queue will not polled 23 | 24 | `c:dispatched/1` will be invoked once a job is dispatched to the worker 25 | 26 | `c:processed/1` will be invoked if a job completed successfully 27 | 28 | `c:failed/1` will be invoked if a job failed 29 | 30 | `c:stop/1` will be invoked when a queue is unsubscribed or before the 31 | node terminates. Note: there is no guarantee this will be invoked if 32 | the node terminates abruptly 33 | """ 34 | 35 | @callback init(info :: %{queue: String.t()}, args :: term) :: {:ok, term} 36 | @callback stop(state :: term) :: :ok 37 | @callback available?(state :: term) :: {:ok, boolean, term} 38 | @callback dispatched(state :: term) :: {:ok, term} 39 | @callback processed(state :: term) :: {:ok, term} 40 | @callback failed(state :: term) :: {:ok, term} 41 | end 42 | -------------------------------------------------------------------------------- /lib/exq/dequeue/local.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Dequeue.Local do 2 | @behaviour Exq.Dequeue.Behaviour 3 | 4 | defmodule State do 5 | @moduledoc false 6 | 7 | defstruct max: nil, current: 0 8 | end 9 | 10 | @impl true 11 | def init(_, options) do 12 | {:ok, %State{max: Keyword.fetch!(options, :concurrency)}} 13 | end 14 | 15 | @impl true 16 | def stop(_), do: :ok 17 | 18 | @impl true 19 | def available?(state), do: {:ok, state.current < state.max, state} 20 | 21 | @impl true 22 | def dispatched(state), do: {:ok, %{state | current: state.current + 1}} 23 | 24 | @impl true 25 | def processed(state), do: {:ok, %{state | current: state.current - 1}} 26 | 27 | @impl true 28 | def failed(state), do: {:ok, %{state | current: state.current - 1}} 29 | end 30 | -------------------------------------------------------------------------------- /lib/exq/enqueue_api.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Enqueuer.EnqueueApi do 2 | @moduledoc """ 3 | Enqueue API. 4 | 5 | Expects a Exq.Manager.Server or Exq.Enqueuer.Server process as first arg. 6 | """ 7 | 8 | @doc false 9 | defmacro __using__(_) do 10 | quote location: :keep do 11 | alias Exq.Support.Config 12 | 13 | @options_doc """ 14 | * `options`: Following job options are supported 15 | * `max_retries` (integer) - max retry count 16 | * `jid` (string) - user supplied jid value 17 | * `unique_for` (integer) - lock expiration duration in seconds 18 | * `unique_token` (string) - unique lock token. By default the token is computed based on the queue, class and args. 19 | * `unique_until` (atom) - defaults to `:success`. Supported values are 20 | * `:success` - unlock on job success 21 | * `:start` - unlock on job first execution 22 | * `:expiry` - unlock when the lock is expired. Depends on `unique_for` value. 23 | """ 24 | 25 | @default_options [] 26 | @doc """ 27 | Enqueue a job immediately. 28 | 29 | Expected args: 30 | * `pid` - PID for Exq Manager or Enqueuer to handle this 31 | * `queue` - Name of queue to use 32 | * `worker` - Worker module to target 33 | * `args` - Array of args to send to worker 34 | #{@options_doc} 35 | 36 | Returns: 37 | * `{:ok, jid}` if the job was enqueued successfully, with `jid` = Job ID. 38 | * `{:error, reason}` if there was an error enqueueing job 39 | 40 | """ 41 | def enqueue(pid, queue, worker, args), 42 | do: enqueue(pid, queue, worker, args, @default_options) 43 | 44 | def enqueue(pid, queue, worker, args, options) do 45 | queue_adapter = Config.get(:queue_adapter) 46 | queue_adapter.enqueue(pid, queue, worker, args, options) 47 | end 48 | 49 | @doc """ 50 | Schedule a job to be enqueued at a specific time in the future. 51 | 52 | Expected args: 53 | * `pid` - PID for Exq Manager or Enqueuer to handle this 54 | * `queue` - name of queue to use 55 | * `time` - Time to enqueue 56 | * `worker` - Worker module to target 57 | * `args` - Array of args to send to worker 58 | #{@options_doc} 59 | 60 | If Exq is running in `mode: [:enqueuer]`, then you will need to use the Enqueuer 61 | to schedule jobs, for example: 62 | 63 | ```elixir 64 | time = Timex.now() |> Timex.shift(days: 8) 65 | Exq.Enqueuer.enqueue_at(Exq.Enqueuer, "default", time, MyWorker, ["foo"]) 66 | ``` 67 | """ 68 | def enqueue_at(pid, queue, time, worker, args), 69 | do: enqueue_at(pid, queue, time, worker, args, @default_options) 70 | 71 | def enqueue_at(pid, queue, time, worker, args, options) do 72 | queue_adapter = Config.get(:queue_adapter) 73 | queue_adapter.enqueue_at(pid, queue, time, worker, args, options) 74 | end 75 | 76 | @doc """ 77 | Schedule a job to be enqueued at in the future given by offset in seconds. 78 | 79 | Expected args: 80 | * `pid` - PID for Exq Manager or Enqueuer to handle this 81 | * `queue` - Name of queue to use 82 | * `offset` - Offset in seconds in the future to enqueue 83 | * `worker` - Worker module to target 84 | * `args` - Array of args to send to worker 85 | #{@options_doc} 86 | 87 | If Exq is running in `mode: [:enqueuer]`, then you will need to use the Enqueuer 88 | to schedule jobs, for example: 89 | 90 | ```elixir 91 | Exq.Enqueuer.enqueue_in(Exq.Enqueuer, "default", 5000, MyWorker, ["foo"]) 92 | ``` 93 | """ 94 | def enqueue_in(pid, queue, offset, worker, args), 95 | do: enqueue_in(pid, queue, offset, worker, args, @default_options) 96 | 97 | def enqueue_in(pid, queue, offset, worker, args, options) do 98 | queue_adapter = Config.get(:queue_adapter) 99 | queue_adapter.enqueue_in(pid, queue, offset, worker, args, options) 100 | end 101 | 102 | @doc """ 103 | Schedule multiple jobs to be atomically enqueued at specific times 104 | 105 | Expected args: 106 | * `pid` - PID for Exq Manager or Enqueuer to handle this 107 | * `jobs` - List of jobs each defined as `[queue, worker, args, options]` 108 | * `queue` - Name of queue to use 109 | * `worker` - Worker module to target 110 | * `args` - Array of args to send to worker 111 | * `options`: Following job options are supported 112 | * `max_retries` (integer) - max retry count 113 | * `jid` (string) - user supplied jid value 114 | * `unique_for` (integer) - lock expiration duration in seconds 115 | * `unique_token` (string) - unique lock token. By default the token is computed based on the queue, class and args. 116 | * `unique_until` (atom) - defaults to `:success`. Supported values are 117 | * `:success` - unlock on job success 118 | * `:start` - unlock on job first execution 119 | * `:expiry` - unlock when the lock is expired. Depends on `unique_for` value. 120 | * `schedule` - (optional) - used to schedule the job for future. If not present, job will be enqueued immediately by default. 121 | * `{:in, seconds_from_now}` 122 | * `{:at, datetime}` 123 | 124 | """ 125 | def enqueue_all(pid, jobs) do 126 | queue_adapter = Config.get(:queue_adapter) 127 | queue_adapter.enqueue_all(pid, jobs) 128 | end 129 | end 130 | end 131 | end 132 | -------------------------------------------------------------------------------- /lib/exq/enqueuer.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Enqueuer do 2 | @moduledoc """ 3 | Enqueuer. 4 | """ 5 | 6 | # Mixin EnqueueApi 7 | use Exq.Enqueuer.EnqueueApi 8 | 9 | def start_link(opts \\ []) do 10 | Exq.start_link(Keyword.put(opts, :mode, :enqueuer)) 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /lib/exq/enqueuer/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Enqueuer.Server do 2 | @moduledoc """ 3 | The Enqueuer is responsible for enqueueing jobs into Redis. 4 | 5 | It can either be called directly by the client, or instantiated as a standalone process. 6 | 7 | It supports enqueuing immediate jobs, or scheduling jobs in the future. 8 | 9 | ## Initialization: 10 | * `:name` - Name of target registered process 11 | * `:namespace` - Redis namespace to store all data under. Defaults to "exq". 12 | * `:queues` - Array of currently active queues (TODO: Remove, I suspect it's not needed). 13 | * `:redis` - pid of Redis process. 14 | * `:scheduler_poll_timeout` - How often to poll Redis for scheduled / retry jobs. 15 | 16 | """ 17 | 18 | require Logger 19 | 20 | alias Exq.Support.Config 21 | use GenServer 22 | 23 | defmodule State do 24 | defstruct redis: nil, namespace: nil 25 | end 26 | 27 | def start_link(opts \\ []) do 28 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 29 | end 30 | 31 | ## =========================================================== 32 | ## GenServer callbacks 33 | ## =========================================================== 34 | 35 | def init(opts) do 36 | {:ok, %State{redis: opts[:redis], namespace: opts[:namespace]}} 37 | end 38 | 39 | def handle_call(:redis, _from, state) do 40 | {:reply, {state.redis, state.namespace}, state} 41 | end 42 | 43 | def terminate(_reason, _state) do 44 | :ok 45 | end 46 | 47 | # Internal Functions 48 | 49 | def server_name(name) do 50 | name = name || Config.get(:name) 51 | "#{name}.Enqueuer" |> String.to_atom() 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /lib/exq/heartbeat/monitor.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Heartbeat.Monitor do 2 | use GenServer 3 | require Logger 4 | alias Exq.Redis.Heartbeat 5 | alias Exq.Support.Config 6 | 7 | defmodule State do 8 | defstruct [ 9 | :namespace, 10 | :redis, 11 | :interval, 12 | :queues, 13 | :node_id, 14 | :missed_heartbeats_allowed, 15 | :stats 16 | ] 17 | end 18 | 19 | def start_link(options) do 20 | GenServer.start_link( 21 | __MODULE__, 22 | %State{ 23 | namespace: Keyword.fetch!(options, :namespace), 24 | redis: Keyword.fetch!(options, :redis), 25 | interval: Keyword.fetch!(options, :heartbeat_interval), 26 | node_id: Keyword.get(options, :node_id, Config.node_identifier().node_id()), 27 | queues: Keyword.fetch!(options, :queues), 28 | stats: Keyword.get(options, :stats), 29 | missed_heartbeats_allowed: Keyword.fetch!(options, :missed_heartbeats_allowed) 30 | }, 31 | [] 32 | ) 33 | end 34 | 35 | def init(state) do 36 | :ok = schedule_verify(state.interval) 37 | {:ok, state} 38 | end 39 | 40 | def handle_info(:verify, state) do 41 | case Heartbeat.dead_nodes( 42 | state.redis, 43 | state.namespace, 44 | state.interval, 45 | state.missed_heartbeats_allowed 46 | ) do 47 | {:ok, node_ids} -> 48 | Enum.each(node_ids, fn {node_id, score} -> 49 | :ok = re_enqueue_backup(state, node_id, score) 50 | end) 51 | 52 | _error -> 53 | :ok 54 | end 55 | 56 | :ok = schedule_verify(state.interval) 57 | {:noreply, state} 58 | end 59 | 60 | def handle_info(msg, state) do 61 | Logger.error("Received unexpected info message in #{__MODULE__} #{inspect(msg)}") 62 | {:noreply, state} 63 | end 64 | 65 | defp schedule_verify(interval) do 66 | _reference = Process.send_after(self(), :verify, interval) 67 | :ok 68 | end 69 | 70 | defp re_enqueue_backup(state, node_id, score) do 71 | Logger.info( 72 | "#{node_id} missed the last #{state.missed_heartbeats_allowed} heartbeats. Re-enqueing jobs from backup and cleaning up stats." 73 | ) 74 | 75 | Enum.uniq(Exq.Redis.JobQueue.list_queues(state.redis, state.namespace) ++ state.queues) 76 | |> Enum.each(fn queue -> 77 | Heartbeat.re_enqueue_backup(state.redis, state.namespace, node_id, queue, score) 78 | end) 79 | 80 | if state.stats do 81 | :ok = Exq.Stats.Server.cleanup_host_stats(state.stats, state.namespace, node_id) 82 | end 83 | 84 | _ = Heartbeat.unregister(state.redis, state.namespace, node_id) 85 | :ok 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /lib/exq/heartbeat/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Heartbeat.Server do 2 | use GenServer 3 | require Logger 4 | alias Exq.Support.Config 5 | alias Exq.Redis.Heartbeat 6 | 7 | defmodule State do 8 | defstruct [:namespace, :node_id, :redis, :interval] 9 | end 10 | 11 | def start_link(options) do 12 | GenServer.start_link( 13 | __MODULE__, 14 | %State{ 15 | namespace: Keyword.fetch!(options, :namespace), 16 | node_id: Keyword.get(options, :node_id, Config.node_identifier().node_id()), 17 | redis: Keyword.fetch!(options, :redis), 18 | interval: Keyword.fetch!(options, :heartbeat_interval) 19 | }, 20 | [] 21 | ) 22 | end 23 | 24 | def init(state) do 25 | :ok = schedule_ping(0) 26 | {:ok, state} 27 | end 28 | 29 | def handle_info(:ping, state) do 30 | case Heartbeat.register(state.redis, state.namespace, state.node_id) do 31 | :ok -> 32 | :ok = schedule_ping(state.interval) 33 | 34 | _error -> 35 | :ok = schedule_ping(Enum.min([state.interval, 5000])) 36 | end 37 | 38 | {:noreply, state} 39 | end 40 | 41 | def handle_info(msg, state) do 42 | Logger.error("Received unexpected info message in #{__MODULE__} #{inspect(msg)}") 43 | {:noreply, state} 44 | end 45 | 46 | defp schedule_ping(interval) do 47 | _reference = Process.send_after(self(), :ping, interval) 48 | :ok 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /lib/exq/middleware/behaviour.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Behaviour do 2 | alias Exq.Middleware.Pipeline 3 | 4 | @callback before_work(%Pipeline{}) :: %Pipeline{} 5 | @callback after_processed_work(%Pipeline{}) :: %Pipeline{} 6 | @callback after_failed_work(%Pipeline{}) :: %Pipeline{} 7 | end 8 | -------------------------------------------------------------------------------- /lib/exq/middleware/job.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Job do 2 | @behaviour Exq.Middleware.Behaviour 3 | alias Exq.Redis.JobQueue 4 | alias Exq.Middleware.Pipeline 5 | import Pipeline 6 | 7 | def before_work(pipeline) do 8 | job = Exq.Support.Job.decode(pipeline.assigns.job_serialized) 9 | 10 | pipeline 11 | |> assign(:job, job) 12 | |> assign(:worker_module, Exq.Support.Coercion.to_module(job.class)) 13 | end 14 | 15 | def after_processed_work(pipeline) do 16 | pipeline |> remove_job_from_backup 17 | end 18 | 19 | def after_failed_work(pipeline) do 20 | pipeline |> retry_or_fail_job |> remove_job_from_backup 21 | end 22 | 23 | defp retry_or_fail_job(%Pipeline{assigns: assigns} = pipeline) do 24 | if assigns.job do 25 | JobQueue.retry_or_fail_job( 26 | assigns.redis, 27 | assigns.namespace, 28 | assigns.job, 29 | to_string(assigns.error_message) 30 | ) 31 | end 32 | 33 | pipeline 34 | end 35 | 36 | def remove_job_from_backup(%Pipeline{assigns: assigns} = pipeline) do 37 | JobQueue.remove_job_from_backup( 38 | assigns.redis, 39 | assigns.namespace, 40 | assigns.host, 41 | assigns.queue, 42 | assigns.job_serialized 43 | ) 44 | 45 | pipeline 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /lib/exq/middleware/logger.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Logger do 2 | @behaviour Exq.Middleware.Behaviour 3 | 4 | alias Exq.Middleware.Pipeline 5 | import Pipeline 6 | require Logger 7 | 8 | def before_work(pipeline) do 9 | Logger.info("#{log_context(pipeline)} start") 10 | assign(pipeline, :started_at, DateTime.utc_now()) 11 | end 12 | 13 | def after_processed_work(pipeline) do 14 | Logger.info("#{log_context(pipeline)} done: #{formatted_diff(delta(pipeline))}") 15 | pipeline 16 | end 17 | 18 | def after_failed_work(pipeline) do 19 | Logger.warning(to_string(pipeline.assigns.error_message)) 20 | Logger.warning("#{log_context(pipeline)} fail: #{formatted_diff(delta(pipeline))}") 21 | pipeline 22 | end 23 | 24 | defp delta(%Pipeline{assigns: assigns}) do 25 | now_usecs = DateTime.utc_now() |> DateTime.to_unix(:microsecond) 26 | started_usecs = assigns.started_at |> DateTime.to_unix(:microsecond) 27 | now_usecs - started_usecs 28 | end 29 | 30 | defp log_context(%Pipeline{assigns: assigns}) do 31 | "#{assigns.worker_module}[#{assigns.job.jid}]" 32 | end 33 | 34 | defp formatted_diff(diff) when diff > 1000, do: [diff |> div(1000) |> Integer.to_string(), "ms"] 35 | defp formatted_diff(diff), do: [diff |> Integer.to_string(), "µs"] 36 | end 37 | -------------------------------------------------------------------------------- /lib/exq/middleware/manager.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Manager do 2 | @behaviour Exq.Middleware.Behaviour 3 | require Logger 4 | alias Exq.Manager.Server, as: Manager 5 | alias Exq.Middleware.Pipeline 6 | 7 | def before_work(pipeline) do 8 | pipeline 9 | end 10 | 11 | def after_processed_work(pipeline) do 12 | pipeline |> notify(true) 13 | end 14 | 15 | def after_failed_work(pipeline) do 16 | pipeline |> notify(false) 17 | end 18 | 19 | defp notify(%Pipeline{assigns: assigns} = pipeline, success) do 20 | Manager.job_terminated( 21 | assigns.manager, 22 | assigns.queue, 23 | success 24 | ) 25 | 26 | pipeline 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /lib/exq/middleware/pipeline.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Pipeline do 2 | @moduledoc """ 3 | Pipeline is a structure that is used as an argument in functions of module with 4 | `Exq.Middleware.Behaviour` behaviour. 5 | 6 | This structure must be returned by particular function to be used in the next 7 | middleware based on defined middleware chain. 8 | 9 | Pipeline contains the following options: 10 | 11 | * `assigns` - map that contains shared data across the whole job lifecycle 12 | * `worker_pid` - process id of `Exq.Worker.Server` 13 | * `event` - name of current middleware function, possible values are: `before_work`, 14 | `after_processed_work` and `after_failed_work` 15 | * `halted` - flag indicating whether pipeline was halted, defaults to `false` 16 | * `terminated` - flag indicating whether worker and pipeline were halted, If 17 | the flag was set to true, the job will not be dispatched and all after_*_work/1 18 | will be skipped. For each specific middleware: 19 | - Exq.Middleware.Job: Will NOT remove the backup from job queue 20 | - Exq.Middleware.Logger: Will NOT record job as done or failed with timestamp 21 | - Exq.Middleware.Manager: Will NOT update worker counter 22 | - Exq.Middleware.Unique: Will NOT clear unique lock 23 | - Exq.Middleware.Stats: Will NOT remove job from processes queue 24 | 25 | """ 26 | 27 | defstruct assigns: %{}, 28 | halted: false, 29 | terminated: false, 30 | worker_pid: nil, 31 | event: nil 32 | 33 | alias Exq.Middleware.Pipeline 34 | 35 | @doc """ 36 | Puts the `key` with value equal to `value` into `assigns` map 37 | """ 38 | def assign(%Pipeline{assigns: assigns} = pipeline, key, value) when is_atom(key) do 39 | %{pipeline | assigns: Map.put(assigns, key, value)} 40 | end 41 | 42 | @doc """ 43 | Sets `halted` to true 44 | """ 45 | def halt(%Pipeline{} = pipeline) do 46 | %{pipeline | halted: true} 47 | end 48 | 49 | @doc """ 50 | Sets `terminated` to true 51 | """ 52 | def terminate(%Pipeline{} = pipeline) do 53 | %{pipeline | terminated: true} 54 | end 55 | 56 | @doc """ 57 | Puts a state of `Exq.Worker.Server` into `assigns` map 58 | """ 59 | def assign_worker_state(pipeline, worker_state) do 60 | pipeline 61 | |> assign(:redis, worker_state.redis) 62 | |> assign(:host, worker_state.host) 63 | |> assign(:namespace, worker_state.namespace) 64 | |> assign(:queue, worker_state.queue) 65 | |> assign(:manager, worker_state.manager) 66 | |> assign(:stats, worker_state.stats) 67 | |> assign(:job_serialized, worker_state.job_serialized) 68 | end 69 | 70 | @doc """ 71 | Implements middleware chain: sequential call of function with `pipeline.event` name inside `module` module 72 | """ 73 | def chain(pipeline, []) do 74 | pipeline 75 | end 76 | 77 | def chain(%Pipeline{halted: true} = pipeline, _modules) do 78 | pipeline 79 | end 80 | 81 | def chain(%Pipeline{terminated: true} = pipeline, _modules) do 82 | pipeline 83 | end 84 | 85 | def chain(pipeline, [module | modules]) do 86 | chain(apply(module, pipeline.event, [pipeline]), modules) 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /lib/exq/middleware/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Server do 2 | @moduledoc """ 3 | Middleware Server is responsible for storing middleware chain that is evaluated 4 | when performing particular job. 5 | 6 | Middleware chain defaults to Stats, Job and Manager middlewares. 7 | 8 | To push new middleware you must create module with common interface. Interface is similar to `Plug` 9 | implementation. It has three functions, every function receives `Exq.Middlewares.Pipeline` structure 10 | and every function must return the same structure, modified or not. 11 | 12 | Basically, `before_work/1` function may update worker state, while `after_processed_work/1` and 13 | `after_failed_work/1` are for cleanup and notification stuff. 14 | 15 | For example, here is a valid middleware module: 16 | 17 | defmodule MyMiddleware do 18 | @behaiour Exq.Middleware.Behaviour 19 | 20 | def before_work(pipeline) do 21 | # some functionality goes here... 22 | pipeline 23 | end 24 | 25 | def after_processed_work(pipeline) do 26 | # some functionality goes here... 27 | pipeline 28 | end 29 | 30 | def after_failed_work(pipeline) do 31 | # some functionality goes here... 32 | pipeline 33 | end 34 | end 35 | 36 | To add this module to middleware chain: 37 | 38 | Exq.Middleware.Server.push(middleware_server_pid, MyMiddleware) 39 | 40 | """ 41 | 42 | use GenServer 43 | 44 | @doc """ 45 | Starts middleware server. 46 | """ 47 | def start_link(opts \\ []) do 48 | GenServer.start_link(__MODULE__, default_middleware(opts), name: server_name(opts[:name])) 49 | end 50 | 51 | @doc """ 52 | Adds specified `middleware` module into the end of middleware list. 53 | 54 | `middleware` should have `Exq.Middleware.Behaviour` behaviour. 55 | """ 56 | def push(pid, middleware) do 57 | GenServer.cast(pid, {:push, middleware}) 58 | end 59 | 60 | @doc """ 61 | Retrieves list of middleware modules. 62 | """ 63 | def all(pid) do 64 | GenServer.call(pid, :all) 65 | end 66 | 67 | @doc """ 68 | Returns middleware server name. 69 | """ 70 | def server_name(name) do 71 | name = name || Exq.Support.Config.get(:name) 72 | "#{name}.Middleware.Server" |> String.to_atom() 73 | end 74 | 75 | @doc false 76 | def terminate(_reason, _state) do 77 | :ok 78 | end 79 | 80 | ## =========================================================== 81 | ## GenServer callbacks 82 | ## =========================================================== 83 | 84 | def handle_cast({:push, middleware}, state) do 85 | {:noreply, List.insert_at(state, -1, middleware)} 86 | end 87 | 88 | def handle_call(:all, _from, state) do 89 | {:reply, state, state} 90 | end 91 | 92 | def init(args) do 93 | {:ok, args} 94 | end 95 | 96 | ## =========================================================== 97 | ## Internal Functions 98 | ## =========================================================== 99 | 100 | defp default_middleware([]), do: [] 101 | defp default_middleware(opts), do: opts[:default_middleware] 102 | end 103 | -------------------------------------------------------------------------------- /lib/exq/middleware/stats.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Stats do 2 | @behaviour Exq.Middleware.Behaviour 3 | require Logger 4 | alias Exq.Stats.Server, as: Stats 5 | alias Exq.Middleware.Pipeline 6 | import Pipeline 7 | 8 | def before_work(pipeline) do 9 | {:ok, info} = add_process(pipeline) 10 | assign(pipeline, :process_info, info) 11 | end 12 | 13 | def after_processed_work(pipeline) do 14 | pipeline |> process_terminated |> record_processed 15 | end 16 | 17 | def after_failed_work(pipeline) do 18 | pipeline |> process_terminated |> record_failure 19 | end 20 | 21 | defp add_process(%Pipeline{assigns: assigns, worker_pid: worker_pid}) do 22 | Stats.add_process( 23 | assigns.stats, 24 | assigns.namespace, 25 | worker_pid, 26 | assigns.host, 27 | assigns.queue, 28 | assigns.job_serialized 29 | ) 30 | end 31 | 32 | defp process_terminated(%Pipeline{assigns: assigns} = pipeline) do 33 | Stats.process_terminated(assigns.stats, assigns.namespace, assigns.process_info) 34 | pipeline 35 | end 36 | 37 | defp record_processed(%Pipeline{assigns: assigns} = pipeline) do 38 | Stats.record_processed(assigns.stats, assigns.namespace, assigns.job) 39 | pipeline 40 | end 41 | 42 | defp record_failure(%Pipeline{assigns: assigns} = pipeline) do 43 | Stats.record_failure( 44 | assigns.stats, 45 | assigns.namespace, 46 | to_string(assigns.error_message), 47 | assigns.job 48 | ) 49 | 50 | pipeline 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/exq/middleware/telemetry.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Telemetry do 2 | @moduledoc """ 3 | This middleware allows you to subscribe to the telemetry events and 4 | collect metrics about your jobs. 5 | 6 | ### Exq telemetry events 7 | 8 | The middleware emits three events, same as what `:telemetry.span/3` emits. 9 | * `[:exq, :job, :start]` - Is invoked whenever a job starts. 10 | 11 | ** Measurements ** 12 | 13 | - `system_time` (integer) - System time when the job started 14 | 15 | * `[:exq, :job, :stop]` - Is invoked whenever a job completes successfully. 16 | 17 | ** Measurements ** 18 | 19 | - `duration` (integer) - Duration of the job execution in native unit 20 | 21 | * `[:exq, :job, :exception]` - Is invoked whenever a job fails. 22 | 23 | ** Measurements ** 24 | 25 | - `duration` (integer) - Duration of the job execution in native unit 26 | 27 | ** Metadata ** 28 | 29 | In addition to the common metadata, exception event will have the following fields. 30 | 31 | - `kind` (exit | error) - either `exit` or `error` 32 | - `reason` (term) - could be an `Exception.t/0` or term 33 | - `stacktrace` (list) - Stacktrace of the error. Will be empty if the kind is `exit`. 34 | 35 | ** Metadata ** 36 | 37 | Each event has the following common metadata: 38 | * `enqueued_at` (`DateTime.t/0`) - datetime the job was enqueued 39 | * `queue` (`String.t/0`) - the name of the queue the job was executed in 40 | * `class` (`String.t/0`) - the job's class 41 | * `jid` (`String.t/0`) - the job's jid 42 | * `retry_count` (integer) - number of times this job has failed so far 43 | 44 | 45 | ### Examples 46 | 47 | defmodule MyApp.Application do 48 | def start(_type, _args) do 49 | children = [ 50 | # ..... 51 | {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} 52 | ] 53 | 54 | opts = [strategy: :one_for_one, name: MyApp.Supervisor] 55 | Supervisor.start_link(children, opts) 56 | end 57 | 58 | defp metrics do 59 | [ 60 | counter("exq.job.stop.duration"), 61 | counter("exq.job.exception.duration"), 62 | distribution("exq.job.stop.duration", 63 | buckets: [0.1, 0.2, 0.3, 0.5, 0.75, 1, 2, 3, 5, 10], 64 | unit: {:native, :millisecond} 65 | ), 66 | distribution("exq.job.exception.duration", 67 | buckets: [0.1, 0.2, 0.3, 0.5, 0.75, 1, 2, 3, 5, 10], 68 | unit: {:native, :millisecond} 69 | ), 70 | summary("exq.job.stop.duration", unit: {:native, :millisecond}), 71 | summary("exq.job.exception.duration", unit: {:native, :millisecond}) 72 | ] 73 | end 74 | end 75 | 76 | """ 77 | 78 | @behaviour Exq.Middleware.Behaviour 79 | alias Exq.Middleware.Pipeline 80 | import Pipeline 81 | 82 | defguardp is_stacktrace(stacktrace) 83 | when is_list(stacktrace) and length(stacktrace) > 0 and is_tuple(hd(stacktrace)) and 84 | (tuple_size(hd(stacktrace)) == 3 or tuple_size(hd(stacktrace)) == 4) 85 | 86 | @impl true 87 | def after_failed_work(pipeline) do 88 | duration = System.monotonic_time() - pipeline.assigns.telemetry_start_time 89 | 90 | error_map = 91 | case pipeline.assigns.error do 92 | {reason, stacktrace} when is_stacktrace(stacktrace) -> 93 | %{kind: :error, reason: reason, stacktrace: stacktrace} 94 | 95 | reason -> 96 | %{kind: :exit, reason: reason, stacktrace: []} 97 | end 98 | 99 | :telemetry.execute( 100 | [:exq, :job, :exception], 101 | %{duration: duration}, 102 | Map.merge(metadata(pipeline.assigns.job), error_map) 103 | ) 104 | 105 | pipeline 106 | end 107 | 108 | @impl true 109 | def after_processed_work(pipeline) do 110 | duration = System.monotonic_time() - pipeline.assigns.telemetry_start_time 111 | 112 | :telemetry.execute( 113 | [:exq, :job, :stop], 114 | %{duration: duration}, 115 | metadata(pipeline.assigns.job) 116 | ) 117 | 118 | pipeline 119 | end 120 | 121 | @impl true 122 | def before_work(pipeline) do 123 | :telemetry.execute( 124 | [:exq, :job, :start], 125 | %{system_time: System.system_time()}, 126 | metadata(pipeline.assigns.job) 127 | ) 128 | 129 | assign(pipeline, :telemetry_start_time, System.monotonic_time()) 130 | end 131 | 132 | defp metadata(job), 133 | do: %{ 134 | enqueued_at: DateTime.from_unix!(round(job.enqueued_at * 1000), :millisecond), 135 | queue: job.queue, 136 | class: job.class, 137 | jid: job.jid, 138 | retry_count: job.retry_count || 0 139 | } 140 | end 141 | -------------------------------------------------------------------------------- /lib/exq/middleware/unique.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Middleware.Unique do 2 | @behaviour Exq.Middleware.Behaviour 3 | alias Exq.Redis.JobQueue 4 | alias Exq.Middleware.Pipeline 5 | 6 | def before_work( 7 | %Pipeline{assigns: %{job_serialized: job_serialized, redis: redis, namespace: namespace}} = 8 | pipeline 9 | ) do 10 | job = Exq.Support.Job.decode(job_serialized) 11 | 12 | case job do 13 | %{unique_until: "start", unique_token: unique_token, retry_count: retry_count} 14 | when retry_count in [0, nil] -> 15 | {:ok, _} = JobQueue.unlock(redis, namespace, unique_token) 16 | 17 | _ -> 18 | :ok 19 | end 20 | 21 | pipeline 22 | end 23 | 24 | def after_processed_work( 25 | %Pipeline{assigns: %{job_serialized: job_serialized, redis: redis, namespace: namespace}} = 26 | pipeline 27 | ) do 28 | job = Exq.Support.Job.decode(job_serialized) 29 | 30 | case job do 31 | %{unique_until: "success", unique_token: unique_token} -> 32 | {:ok, _} = JobQueue.unlock(redis, namespace, unique_token) 33 | 34 | _ -> 35 | :ok 36 | end 37 | 38 | pipeline 39 | end 40 | 41 | def after_failed_work( 42 | %Pipeline{assigns: %{job_serialized: job_serialized, redis: redis, namespace: namespace}} = 43 | pipeline 44 | ) do 45 | job = Exq.Support.Job.decode(job_serialized) 46 | 47 | case job do 48 | %{unique_until: "success", unique_token: unique_token} -> 49 | if JobQueue.dead?(job) do 50 | {:ok, _} = JobQueue.unlock(redis, namespace, unique_token) 51 | end 52 | 53 | _ -> 54 | :ok 55 | end 56 | 57 | pipeline 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/exq/mock.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Mock do 2 | alias Exq.Support.Config 3 | alias Exq.Adapters.Queue.Redis 4 | alias Exq.Support.Job 5 | alias Exq.Support.Time 6 | alias Exq.Support.Coercion 7 | use GenServer 8 | @timeout 30000 9 | 10 | defmodule State do 11 | @moduledoc false 12 | defstruct default_mode: :redis, jobs: %{}, modes: %{} 13 | end 14 | 15 | ### Public api 16 | 17 | @doc """ 18 | Start Mock server 19 | 20 | * `mode` - The default mode that's used for all tests. See `set_mode/1` for details. 21 | 22 | """ 23 | def start_link(options \\ []) do 24 | queue_adapter = Config.get(:queue_adapter) 25 | 26 | if queue_adapter != Exq.Adapters.Queue.Mock do 27 | raise RuntimeError, """ 28 | Exq.Mock can only work if queue_adapter is set to Exq.Adapters.Queue.Mock 29 | Add the following to your test config 30 | config :exq, queue_adapter: Exq.Adapters.Queue.Mock 31 | """ 32 | end 33 | 34 | GenServer.start_link(__MODULE__, options, name: __MODULE__) 35 | end 36 | 37 | @doc """ 38 | Set the mode for current test 39 | 40 | * `:redis` - jobs get enqueued and processed via redis. 41 | * `:fake` - jobs get enqueued in a local queue 42 | * `:inline` - jobs get executed in the same process 43 | 44 | """ 45 | def set_mode(mode) when mode in [:redis, :inline, :fake] do 46 | GenServer.call(__MODULE__, {:mode, self(), mode}, @timeout) 47 | end 48 | 49 | @doc """ 50 | List of enqueued jobs. 51 | 52 | This only works if the mode is set to `:fake` 53 | """ 54 | def jobs do 55 | GenServer.call(__MODULE__, {:jobs, self()}, @timeout) 56 | end 57 | 58 | ### Private 59 | 60 | @impl true 61 | def init(options) do 62 | {:ok, %State{default_mode: Keyword.get(options, :mode, :redis)}} 63 | end 64 | 65 | @doc false 66 | def enqueue(pid, queue, worker, args, options) do 67 | {:ok, runnable} = 68 | GenServer.call( 69 | __MODULE__, 70 | {:enqueue, self(), :enqueue, [pid, queue, worker, args, options]}, 71 | @timeout 72 | ) 73 | 74 | runnable.() 75 | end 76 | 77 | @doc false 78 | def enqueue_at(pid, queue, time, worker, args, options) do 79 | {:ok, runnable} = 80 | GenServer.call( 81 | __MODULE__, 82 | {:enqueue, self(), :enqueue_at, [pid, queue, time, worker, args, options]}, 83 | @timeout 84 | ) 85 | 86 | runnable.() 87 | end 88 | 89 | @doc false 90 | def enqueue_in(pid, queue, offset, worker, args, options) do 91 | {:ok, runnable} = 92 | GenServer.call( 93 | __MODULE__, 94 | {:enqueue, self(), :enqueue_in, [pid, queue, offset, worker, args, options]}, 95 | @timeout 96 | ) 97 | 98 | runnable.() 99 | end 100 | 101 | @doc false 102 | def enqueue_all(pid, jobs) do 103 | {:ok, runnable} = GenServer.call(__MODULE__, {:enqueue_all, self(), pid, jobs}, @timeout) 104 | runnable.() 105 | end 106 | 107 | @impl true 108 | def handle_call({:enqueue, owner_pid, type, args}, _from, state) do 109 | state = maybe_add_and_monitor_pid(state, owner_pid, state.default_mode) 110 | {state, runnable} = to_runnable(owner_pid, type, args, state) 111 | {:reply, {:ok, runnable}, state} 112 | end 113 | 114 | @impl true 115 | def handle_call({:enqueue_all, owner_pid, pid, jobs}, _from, state) do 116 | state = maybe_add_and_monitor_pid(state, owner_pid, state.default_mode) 117 | 118 | {state, runnable} = 119 | if state.modes[owner_pid] == :redis do 120 | to_runnable(owner_pid, :enqueue_all, [pid, jobs], state) 121 | else 122 | {state, runnables} = 123 | Enum.reduce(jobs, {state, []}, fn [queue, worker, args, options], {state, runnables} -> 124 | {type, args} = 125 | case options[:schedule] do 126 | {:at, at_time} -> 127 | {:enqueue_at, [pid, queue, at_time, worker, args, options]} 128 | 129 | {:in, offset} -> 130 | {:enqueue_in, [pid, queue, offset, worker, args, options]} 131 | 132 | _ -> 133 | {:enqueue, [pid, queue, worker, args, options]} 134 | end 135 | 136 | {state, runnable} = to_runnable(owner_pid, type, args, state) 137 | {state, [runnable | runnables]} 138 | end) 139 | 140 | runnables = Enum.reverse(runnables) 141 | 142 | runnable = fn -> 143 | {:ok, Enum.map(runnables, fn f -> f.() end)} 144 | end 145 | 146 | {state, runnable} 147 | end 148 | 149 | {:reply, {:ok, runnable}, state} 150 | end 151 | 152 | def handle_call({:mode, owner_pid, mode}, _from, state) do 153 | state = maybe_add_and_monitor_pid(state, owner_pid, mode) 154 | {:reply, :ok, state} 155 | end 156 | 157 | def handle_call({:jobs, owner_pid}, _from, state) do 158 | jobs = state.jobs[owner_pid] || [] 159 | {:reply, jobs, state} 160 | end 161 | 162 | @impl true 163 | def handle_info({:DOWN, _, _, pid, _}, state) do 164 | {_, state} = pop_in(state.modes[pid]) 165 | {_, state} = pop_in(state.jobs[pid]) 166 | {:noreply, state} 167 | end 168 | 169 | defp to_runnable(owner_pid, type, args, state) do 170 | case state.modes[owner_pid] do 171 | :redis -> 172 | runnable = fn -> apply(Redis, type, args) end 173 | {state, runnable} 174 | 175 | :inline -> 176 | runnable = fn -> 177 | job = to_job(type, args) 178 | apply(Coercion.to_module(job.class), :perform, job.args) 179 | {:ok, job.jid} 180 | end 181 | 182 | {state, runnable} 183 | 184 | :fake -> 185 | job = to_job(type, args) 186 | state = update_in(state.jobs[owner_pid], &((&1 || []) ++ [job])) 187 | 188 | runnable = fn -> 189 | {:ok, job.jid} 190 | end 191 | 192 | {state, runnable} 193 | end 194 | end 195 | 196 | defp to_job(_, [_pid, queue, worker, args, options]) do 197 | %Job{ 198 | jid: Keyword.get_lazy(options, :jid, fn -> UUID.uuid4() end), 199 | queue: queue, 200 | class: worker, 201 | args: args, 202 | enqueued_at: DateTime.utc_now() 203 | } 204 | end 205 | 206 | defp to_job(type, [_pid, queue, time_or_offset, worker, args, options]) do 207 | scheduled_at = 208 | case type do 209 | :enqueue_at -> time_or_offset 210 | :enqueue_in -> Time.offset_from_now(time_or_offset) 211 | end 212 | 213 | %Job{ 214 | jid: Keyword.get_lazy(options, :jid, fn -> UUID.uuid4() end), 215 | queue: queue, 216 | class: worker, 217 | args: args, 218 | enqueued_at: scheduled_at 219 | } 220 | end 221 | 222 | defp maybe_add_and_monitor_pid(state, pid, mode) do 223 | case state.modes do 224 | %{^pid => _mode} -> 225 | state 226 | 227 | _ -> 228 | Process.monitor(pid) 229 | state = put_in(state.modes[pid], mode) 230 | state 231 | end 232 | end 233 | end 234 | -------------------------------------------------------------------------------- /lib/exq/node/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Node.Server do 2 | use GenServer 3 | require Logger 4 | alias Exq.Support.Config 5 | alias Exq.Support.Time 6 | alias Exq.Redis.JobStat 7 | alias Exq.Support.Node 8 | 9 | defmodule State do 10 | defstruct [ 11 | :node, 12 | :interval, 13 | :namespace, 14 | :redis, 15 | :node_id, 16 | :manager, 17 | :workers_sup, 18 | ping_count: 0 19 | ] 20 | end 21 | 22 | def start_link(options) do 23 | node_id = Keyword.get(options, :node_id, Config.node_identifier().node_id()) 24 | 25 | GenServer.start_link( 26 | __MODULE__, 27 | %State{ 28 | manager: Keyword.fetch!(options, :manager), 29 | workers_sup: Keyword.fetch!(options, :workers_sup), 30 | node_id: node_id, 31 | node: build_node(node_id), 32 | namespace: Keyword.fetch!(options, :namespace), 33 | redis: Keyword.fetch!(options, :redis), 34 | interval: 5000 35 | }, 36 | [] 37 | ) 38 | end 39 | 40 | def init(state) do 41 | :ok = schedule_ping(state.interval) 42 | {:ok, state} 43 | end 44 | 45 | def handle_info( 46 | :ping, 47 | %{ 48 | node: node, 49 | namespace: namespace, 50 | redis: redis, 51 | manager: manager, 52 | workers_sup: workers_sup 53 | } = state 54 | ) do 55 | {:ok, queues} = Exq.subscriptions(manager) 56 | busy = Exq.Worker.Supervisor.workers_count(workers_sup) 57 | node = %{node | queues: queues, busy: busy, quiet: Enum.empty?(queues)} 58 | 59 | :ok = 60 | JobStat.node_ping(redis, namespace, node) 61 | |> process_signal(state) 62 | 63 | if Integer.mod(state.ping_count, 10) == 0 do 64 | JobStat.prune_dead_nodes(redis, namespace) 65 | end 66 | 67 | :ok = schedule_ping(state.interval) 68 | {:noreply, %{state | ping_count: state.ping_count + 1}} 69 | end 70 | 71 | def handle_info(msg, state) do 72 | Logger.error("Received unexpected info message in #{__MODULE__} #{inspect(msg)}") 73 | {:noreply, state} 74 | end 75 | 76 | defp process_signal(nil, _), do: :ok 77 | 78 | defp process_signal("TSTP", state) do 79 | Logger.info("Received TSTP, unsubscribing from all queues") 80 | :ok = Exq.unsubscribe_all(state.manager) 81 | end 82 | 83 | defp process_signal(unknown, _) do 84 | Logger.warning("Received unsupported signal #{unknown}") 85 | :ok 86 | end 87 | 88 | defp schedule_ping(interval) do 89 | _reference = Process.send_after(self(), :ping, interval) 90 | :ok 91 | end 92 | 93 | defp build_node(node_id) do 94 | {:ok, hostname} = :inet.gethostname() 95 | 96 | %Node{ 97 | hostname: to_string(hostname), 98 | started_at: Time.unix_seconds(), 99 | pid: List.to_string(:os.getpid()), 100 | identity: node_id 101 | } 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /lib/exq/node_identifier/behaviour.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.NodeIdentifier.Behaviour do 2 | @callback node_id() :: String.t() 3 | end 4 | -------------------------------------------------------------------------------- /lib/exq/node_identifier/hostname_identifier.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.NodeIdentifier.HostnameIdentifier do 2 | @behaviour Exq.NodeIdentifier.Behaviour 3 | 4 | def node_id do 5 | {:ok, hostname} = :inet.gethostname() 6 | to_string(hostname) 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /lib/exq/redis/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Redis.Connection do 2 | @moduledoc """ 3 | The Connection module encapsulates interaction with a live Redis connection or pool. 4 | 5 | """ 6 | require Logger 7 | 8 | alias Exq.Support.Config 9 | alias Exq.Support.Redis 10 | 11 | def flushdb!(redis) do 12 | {:ok, res} = q(redis, ["flushdb"]) 13 | res 14 | end 15 | 16 | def decr!(redis, key) do 17 | {:ok, count} = q(redis, ["DECR", key]) 18 | count 19 | end 20 | 21 | def incr!(redis, key) do 22 | {:ok, count} = q(redis, ["INCR", key]) 23 | count 24 | end 25 | 26 | def get!(redis, key) do 27 | {:ok, val} = q(redis, ["GET", key]) 28 | val 29 | end 30 | 31 | def set!(redis, key, val \\ 0) do 32 | q(redis, ["SET", key, val]) 33 | end 34 | 35 | def del!(redis, key, options \\ []) do 36 | q(redis, ["DEL", key], options) 37 | end 38 | 39 | def expire!(redis, key, time \\ 10) do 40 | q(redis, ["EXPIRE", key, time]) 41 | end 42 | 43 | def llen!(redis, list) do 44 | {:ok, len} = q(redis, ["LLEN", list]) 45 | len 46 | end 47 | 48 | def keys!(redis, search \\ "*") do 49 | {:ok, keys} = q(redis, ["KEYS", search]) 50 | keys 51 | end 52 | 53 | def scan!(redis, cursor, search, count) do 54 | {:ok, keys} = q(redis, ["SCAN", cursor, "MATCH", search, "COUNT", count]) 55 | keys 56 | end 57 | 58 | def scard!(redis, set) do 59 | {:ok, count} = q(redis, ["SCARD", set]) 60 | count 61 | end 62 | 63 | def smembers!(redis, set) do 64 | {:ok, members} = q(redis, ["SMEMBERS", set]) 65 | members 66 | end 67 | 68 | def sadd!(redis, set, member) do 69 | {:ok, res} = q(redis, ["SADD", set, member]) 70 | res 71 | end 72 | 73 | def srem!(redis, set, member) do 74 | {:ok, res} = q(redis, ["SREM", set, member]) 75 | res 76 | end 77 | 78 | def sismember!(redis, set, member) do 79 | {:ok, res} = q(redis, ["SISMEMBER", set, member]) 80 | res 81 | end 82 | 83 | def lrange!(redis, list, range_start \\ "0", range_end \\ "-1") do 84 | {:ok, items} = q(redis, ["LRANGE", list, range_start, range_end]) 85 | items 86 | end 87 | 88 | def lrem!(redis, list, value, count \\ 1, options \\ []) do 89 | {:ok, res} = 90 | if is_list(value) do 91 | commands = Enum.map(value, fn v -> ["LREM", list, count, v] end) 92 | qp(redis, commands, options) 93 | else 94 | q(redis, ["LREM", list, count, value], options) 95 | end 96 | 97 | res 98 | end 99 | 100 | def rpush!(redis, key, value) do 101 | {:ok, res} = q(redis, ["RPUSH", key, value]) 102 | res 103 | end 104 | 105 | def lpush!(redis, key, value) do 106 | {:ok, res} = q(redis, ["LPUSH", key, value]) 107 | res 108 | end 109 | 110 | def lpop(redis, key) do 111 | q(redis, ["LPOP", key]) 112 | end 113 | 114 | def zadd(redis, set, score, member, options \\ []) do 115 | q(redis, ["ZADD", set, score, member], options) 116 | end 117 | 118 | def zadd!(redis, set, score, member) do 119 | {:ok, res} = q(redis, ["ZADD", set, score, member]) 120 | res 121 | end 122 | 123 | def zcard!(redis, set) do 124 | {:ok, count} = q(redis, ["ZCARD", set]) 125 | count 126 | end 127 | 128 | def zcount!(redis, set, min \\ "-inf", max \\ "+inf") do 129 | {:ok, count} = q(redis, ["ZCOUNT", set, min, max]) 130 | count 131 | end 132 | 133 | def zrangebyscore!(redis, set, min \\ "0", max \\ "+inf") do 134 | {:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max]) 135 | items 136 | end 137 | 138 | def zrangebyscorewithlimit!(redis, set, offset, size, min \\ "0", max \\ "+inf") do 139 | {:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max, "LIMIT", offset, size]) 140 | items 141 | end 142 | 143 | def zrangebyscore(redis, set, min \\ "0", max \\ "+inf") do 144 | q(redis, ["ZRANGEBYSCORE", set, min, max]) 145 | end 146 | 147 | def zrangebyscorewithscore!(redis, set, min \\ "0", max \\ "+inf") do 148 | {:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max, "WITHSCORES"]) 149 | items 150 | end 151 | 152 | def zrangebyscorewithscoreandlimit!(redis, set, offset, size, min \\ "0", max \\ "+inf") do 153 | {:ok, items} = q(redis, ["ZRANGEBYSCORE", set, min, max, "WITHSCORES", "LIMIT", offset, size]) 154 | items 155 | end 156 | 157 | def zrangebyscorewithscore(redis, set, min \\ "0", max \\ "+inf") do 158 | q(redis, ["ZRANGEBYSCORE", set, min, max, "WITHSCORES"]) 159 | end 160 | 161 | def zrevrangebyscorewithlimit!(redis, set, offset, size, min \\ "0", max \\ "+inf") do 162 | {:ok, items} = q(redis, ["ZREVRANGEBYSCORE", set, max, min, "LIMIT", offset, size]) 163 | items 164 | end 165 | 166 | def zrevrangebyscorewithscoreandlimit!(redis, set, offset, size, min \\ "0", max \\ "+inf") do 167 | {:ok, items} = 168 | q(redis, ["ZREVRANGEBYSCORE", set, max, min, "WITHSCORES", "LIMIT", offset, size]) 169 | 170 | items 171 | end 172 | 173 | def zrange!(redis, set, range_start \\ "0", range_end \\ "-1") do 174 | {:ok, items} = q(redis, ["ZRANGE", set, range_start, range_end]) 175 | items 176 | end 177 | 178 | def zrem!(redis, set, members) when is_list(members) do 179 | {:ok, res} = q(redis, ["ZREM", set | members]) 180 | res 181 | end 182 | 183 | def zrem!(redis, set, member) do 184 | {:ok, res} = q(redis, ["ZREM", set, member]) 185 | res 186 | end 187 | 188 | def zrem(redis, set, member) do 189 | q(redis, ["ZREM", set, member]) 190 | end 191 | 192 | def q(redis, command, options \\ []) do 193 | Redis.with_retry_on_connection_error( 194 | fn -> 195 | redis 196 | |> Redix.command(command, timeout: Config.get(:redis_timeout)) 197 | |> handle_response(redis) 198 | end, 199 | Keyword.get(options, :retry_on_connection_error, 0) 200 | ) 201 | end 202 | 203 | def qp(redis, command, options \\ []) do 204 | Redis.with_retry_on_connection_error( 205 | fn -> 206 | redis 207 | |> Redix.pipeline(command, timeout: Config.get(:redis_timeout)) 208 | |> handle_responses(redis) 209 | end, 210 | Keyword.get(options, :retry_on_connection_error, 0) 211 | ) 212 | end 213 | 214 | def qp!(redis, command, options \\ []) do 215 | Redis.with_retry_on_connection_error( 216 | fn -> 217 | redis 218 | |> Redix.pipeline!(command, timeout: Config.get(:redis_timeout)) 219 | |> handle_responses(redis) 220 | end, 221 | Keyword.get(options, :retry_on_connection_error, 0) 222 | ) 223 | end 224 | 225 | defp handle_response({:error, %{message: "READONLY" <> _rest}} = error, redis) do 226 | disconnect(redis) 227 | error 228 | end 229 | 230 | defp handle_response({:error, %{message: "NOSCRIPT" <> _rest}} = error, _) do 231 | error 232 | end 233 | 234 | defp handle_response({:error, %Redix.ConnectionError{reason: :disconnected}} = error, _) do 235 | error 236 | end 237 | 238 | defp handle_response({:error, message} = error, _) do 239 | Logger.error(inspect(message)) 240 | error 241 | end 242 | 243 | defp handle_response(response, _) do 244 | response 245 | end 246 | 247 | defp handle_responses({:ok, responses} = result, redis) do 248 | # Disconnect once for multiple readonly redis node errors. 249 | if Enum.any?(responses, &readonly_error?/1) do 250 | disconnect(redis) 251 | end 252 | 253 | result 254 | end 255 | 256 | defp handle_responses(responses, redis) when is_list(responses) do 257 | # Disconnect once for multiple readonly redis node errors. 258 | if Enum.any?(responses, &readonly_error?/1) do 259 | disconnect(redis) 260 | end 261 | 262 | responses 263 | end 264 | 265 | defp handle_responses(responses, _) do 266 | responses 267 | end 268 | 269 | defp readonly_error?(%{message: "READONLY" <> _rest}), do: true 270 | defp readonly_error?(_), do: false 271 | 272 | defp disconnect(redis) do 273 | pid = Process.whereis(redis) 274 | 275 | if !is_nil(pid) && Process.alive?(pid) do 276 | # Let the supervisor restart the process with a new connection. 277 | Logger.error("Redis failover - forcing a reconnect") 278 | Process.exit(pid, :kill) 279 | # Give the process some time to be restarted. 280 | :timer.sleep(100) 281 | end 282 | end 283 | end 284 | -------------------------------------------------------------------------------- /lib/exq/redis/heartbeat.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Redis.Heartbeat do 2 | require Logger 3 | alias Exq.Redis.Connection 4 | alias Exq.Redis.JobQueue 5 | alias Exq.Redis.Script 6 | 7 | def register(redis, namespace, node_id) do 8 | score = DateTime.to_unix(DateTime.utc_now(), :millisecond) / 1000 9 | 10 | case Connection.qp(redis, [ 11 | ["MULTI"], 12 | ["ZREM", sorted_set_key(namespace), node_id], 13 | ["ZADD", sorted_set_key(namespace), score, node_id], 14 | ["EXEC"] 15 | ]) do 16 | {:ok, ["OK", "QUEUED", "QUEUED", [_, 1]]} -> 17 | :ok 18 | 19 | error -> 20 | Logger.error("Failed to send heartbeat. Unexpected error from redis: #{inspect(error)}") 21 | error 22 | end 23 | end 24 | 25 | def unregister(redis, namespace, node_id) do 26 | case Connection.zrem(redis, sorted_set_key(namespace), node_id) do 27 | {:ok, _} -> 28 | :ok 29 | 30 | error -> 31 | Logger.error( 32 | "Failed to clear old heartbeat. Unexpected error from redis: #{inspect(error)}" 33 | ) 34 | 35 | error 36 | end 37 | end 38 | 39 | def re_enqueue_backup(redis, namespace, node_id, queue, current_score) do 40 | resp = 41 | Script.eval!( 42 | redis, 43 | :heartbeat_re_enqueue_backup, 44 | [ 45 | JobQueue.backup_queue_key(namespace, node_id, queue), 46 | JobQueue.queue_key(namespace, queue), 47 | sorted_set_key(namespace) 48 | ], 49 | [node_id, current_score, 10] 50 | ) 51 | 52 | case resp do 53 | {:ok, [remaining, moved]} -> 54 | if moved > 0 do 55 | Logger.info( 56 | "Re-enqueued #{moved} job(s) from backup for node_id [#{node_id}] and queue [#{queue}]" 57 | ) 58 | end 59 | 60 | if remaining > 0 do 61 | re_enqueue_backup(redis, namespace, node_id, queue, current_score) 62 | end 63 | 64 | _ -> 65 | nil 66 | end 67 | end 68 | 69 | def dead_nodes(redis, namespace, interval, missed_heartbeats_allowed) do 70 | score = DateTime.to_unix(DateTime.utc_now(), :millisecond) / 1000 71 | cutoff = score - interval / 1000 * (missed_heartbeats_allowed + 1) 72 | cutoff = Enum.max([0, cutoff]) 73 | 74 | with {:ok, results} <- 75 | Connection.zrangebyscorewithscore(redis, sorted_set_key(namespace), 0, cutoff) do 76 | {:ok, 77 | Enum.chunk_every(results, 2) 78 | |> Map.new(fn [k, v] -> {k, v} end)} 79 | end 80 | end 81 | 82 | defp sorted_set_key(namespace) do 83 | "#{namespace}:heartbeats" 84 | end 85 | end 86 | -------------------------------------------------------------------------------- /lib/exq/redis/script.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Redis.Script do 2 | alias Exq.Redis.Connection 3 | 4 | defmodule Prepare do 5 | def script(source) do 6 | hash = 7 | :crypto.hash(:sha, source) 8 | |> Base.encode16(case: :lower) 9 | 10 | {hash, source} 11 | end 12 | end 13 | 14 | @scripts %{ 15 | enqueue: 16 | Prepare.script(""" 17 | local queues_key, job_queue_key, unique_key = KEYS[1], KEYS[2], KEYS[3] 18 | local job_queue, job, jid, unlocks_in = ARGV[1], ARGV[2], ARGV[3], tonumber(ARGV[4]) 19 | local unlocked = true 20 | local conflict_jid = nil 21 | 22 | if unlocks_in then 23 | unlocked = redis.call("set", unique_key, jid, "px", unlocks_in, "nx") 24 | end 25 | 26 | if unlocked then 27 | redis.call('SADD', queues_key, job_queue) 28 | redis.call('LPUSH', job_queue_key, job) 29 | return 0 30 | else 31 | conflict_jid = redis.call("get", unique_key) 32 | return {1, conflict_jid} 33 | end 34 | """), 35 | enqueue_at: 36 | Prepare.script(""" 37 | local schedule_queue, unique_key = KEYS[1], KEYS[2] 38 | local job, score, jid, unlocks_in = ARGV[1], tonumber(ARGV[2]), ARGV[3], tonumber(ARGV[4]) 39 | local unlocked = true 40 | local conflict_jid = nil 41 | 42 | if unlocks_in then 43 | unlocked = redis.call("set", unique_key, jid, "px", unlocks_in, "nx") 44 | end 45 | 46 | if unlocked then 47 | redis.call('ZADD', schedule_queue, score, job) 48 | return 0 49 | else 50 | conflict_jid = redis.call("get", unique_key) 51 | return {1, conflict_jid} 52 | end 53 | """), 54 | enqueue_all: 55 | Prepare.script(""" 56 | local schedule_queue, queues_key = KEYS[1], KEYS[2] 57 | local i = 1 58 | local result = {} 59 | 60 | while i <= #(ARGV) / 5 do 61 | local keys_start = i * 2 62 | local args_start = (i - 1) * 5 63 | local unique_key, job_queue_key = KEYS[keys_start + 1], KEYS[keys_start + 2] 64 | local jid = ARGV[args_start + 1] 65 | local job_queue = ARGV[args_start + 2] 66 | local score = tonumber(ARGV[args_start + 3]) 67 | local job = ARGV[args_start + 4] 68 | local unlocks_in = tonumber(ARGV[args_start + 5]) 69 | local unlocked = true 70 | local conflict_jid = nil 71 | 72 | if unlocks_in then 73 | unlocked = redis.call("set", unique_key, jid, "px", unlocks_in, "nx") 74 | end 75 | 76 | if unlocked and score == 0 then 77 | redis.call('SADD', queues_key, job_queue) 78 | redis.call('LPUSH', job_queue_key, job) 79 | result[i] = {0, jid} 80 | elseif unlocked then 81 | redis.call('ZADD', schedule_queue, score, job) 82 | result[i] = {0, jid} 83 | else 84 | conflict_jid = redis.call("get", unique_key) 85 | result[i] = {1, conflict_jid} 86 | end 87 | 88 | i = i + 1 89 | end 90 | 91 | return result 92 | """), 93 | scheduler_dequeue_jobs: 94 | Prepare.script(""" 95 | local schedule_queue, namespace_prefix = KEYS[1], KEYS[2] 96 | local jobs = ARGV 97 | local dequeued = 0 98 | for _, job in ipairs(jobs) do 99 | local job_queue = cjson.decode(job)['queue'] 100 | local count = redis.call('ZREM', schedule_queue, job) 101 | if count == 1 then 102 | redis.call('SADD', namespace_prefix .. 'queues', job_queue) 103 | redis.call('LPUSH', namespace_prefix .. 'queue:' .. job_queue, job) 104 | dequeued = dequeued + 1 105 | end 106 | end 107 | return dequeued 108 | """), 109 | scheduler_dequeue: 110 | Prepare.script(""" 111 | local schedule_queue = KEYS[1] 112 | local limit, max_score, namespace_prefix = tonumber(ARGV[1]), tonumber(ARGV[2]), ARGV[3] 113 | local jobs = redis.call('ZRANGEBYSCORE', schedule_queue, 0, max_score, 'LIMIT', 0, limit) 114 | for _, job in ipairs(jobs) do 115 | local job_queue = cjson.decode(job)['queue'] 116 | redis.call('ZREM', schedule_queue, job) 117 | redis.call('SADD', namespace_prefix .. 'queues', job_queue) 118 | redis.call('LPUSH', namespace_prefix .. 'queue:' .. job_queue, job) 119 | end 120 | return #jobs 121 | """), 122 | mlpop_rpush: 123 | Prepare.script(""" 124 | local from, to = KEYS[1], KEYS[2] 125 | local limit = tonumber(ARGV[1]) 126 | local length = redis.call('LLEN', from) 127 | local value = nil 128 | local moved = 0 129 | while limit > 0 and length > 0 do 130 | value = redis.call('LPOP', from) 131 | redis.call('RPUSH', to, value) 132 | limit = limit - 1 133 | length = length - 1 134 | moved = moved + 1 135 | end 136 | return {length, moved} 137 | """), 138 | heartbeat_re_enqueue_backup: 139 | Prepare.script(""" 140 | local function contains(table, element) 141 | for _, value in pairs(table) do 142 | if value == element then 143 | return true 144 | end 145 | end 146 | return false 147 | end 148 | 149 | local backup_queue_key, queue_key, heartbeat_key = KEYS[1], KEYS[2], KEYS[3] 150 | local node_id, expected_score, limit = ARGV[1], ARGV[2], tonumber(ARGV[3]) 151 | local node_ids = redis.call('ZRANGEBYSCORE', heartbeat_key, expected_score, expected_score) 152 | if contains(node_ids, node_id) then 153 | local length = redis.call('LLEN', backup_queue_key) 154 | local value = nil 155 | local moved = 0 156 | while limit > 0 and length > 0 do 157 | value = redis.call('LPOP', backup_queue_key) 158 | redis.call('RPUSH', queue_key, value) 159 | limit = limit - 1 160 | length = length - 1 161 | moved = moved + 1 162 | end 163 | return {length, moved} 164 | else 165 | return {0, 0} 166 | end 167 | """) 168 | } 169 | 170 | def eval!(redis, script, keys, args) do 171 | {hash, source} = @scripts[script] 172 | 173 | case Connection.q(redis, ["EVALSHA", hash, length(keys)] ++ keys ++ args) do 174 | {:error, %Redix.Error{message: "NOSCRIPT" <> _}} -> 175 | Connection.q(redis, ["EVAL", source, length(keys)] ++ keys ++ args) 176 | 177 | result -> 178 | result 179 | end 180 | end 181 | end 182 | -------------------------------------------------------------------------------- /lib/exq/scheduler/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Scheduler.Server do 2 | @moduledoc """ 3 | The Scheduler is responsible for monitoring the `schedule` and `retry` queues. 4 | These queues use a Redis sorted set (term?) to schedule and pick off due jobs. 5 | Once a job is at or past it's execution date, the Scheduler moves the job into the 6 | live execution queue. 7 | 8 | Runs on a timed loop according to `scheduler_poll_timeout`. 9 | 10 | ## Initialization: 11 | * `:name` - Name of target registered process 12 | * `:namespace` - Redis namespace to store all data under. Defaults to "exq". 13 | * `:queues` - Array of currently active queues (TODO: Remove, I suspect it's not needed). 14 | * `:redis` - pid of Redis process. 15 | * `:scheduler_poll_timeout` - How often to poll Redis for scheduled / retry jobs. 16 | """ 17 | 18 | require Logger 19 | use GenServer 20 | 21 | defmodule State do 22 | defstruct redis: nil, namespace: nil, queues: nil, scheduler_poll_timeout: nil 23 | end 24 | 25 | def start_link(opts \\ []) do 26 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 27 | end 28 | 29 | def start_timeout(pid) do 30 | GenServer.cast(pid, :start_timeout) 31 | end 32 | 33 | def server_name(name) do 34 | name = name || Exq.Support.Config.get(:name) 35 | "#{name}.Scheduler" |> String.to_atom() 36 | end 37 | 38 | ## =========================================================== 39 | ## gen server callbacks 40 | ## =========================================================== 41 | 42 | def init(opts) do 43 | state = %State{ 44 | redis: opts[:redis], 45 | namespace: opts[:namespace], 46 | queues: opts[:queues], 47 | scheduler_poll_timeout: opts[:scheduler_poll_timeout] 48 | } 49 | 50 | start_timeout(self()) 51 | 52 | {:ok, state} 53 | end 54 | 55 | def handle_cast(:start_timeout, state) do 56 | handle_info(:timeout, state) 57 | end 58 | 59 | def handle_info(:timeout, state) do 60 | {updated_state, timeout} = dequeue(state) 61 | {:noreply, updated_state, timeout} 62 | end 63 | 64 | ## =========================================================== 65 | ## Internal Functions 66 | ## =========================================================== 67 | 68 | @doc """ 69 | Dequeue any active jobs in the scheduled and retry queues, and enqueue them to live queue. 70 | """ 71 | def dequeue(state) do 72 | Exq.Redis.JobQueue.scheduler_dequeue(state.redis, state.namespace) 73 | {state, state.scheduler_poll_timeout} 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /lib/exq/serializers/behaviour.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Serializers.Behaviour do 2 | @callback decode(any) :: any 3 | @callback decode!(any) :: any 4 | @callback encode(any) :: any 5 | @callback encode!(any) :: any 6 | 7 | @callback encode_job(any) :: any 8 | @callback decode_job(any) :: any 9 | @callback encode_process(any) :: any 10 | @callback decode_process(any) :: any 11 | end 12 | -------------------------------------------------------------------------------- /lib/exq/serializers/json_serializer.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Serializers.JsonSerializer do 2 | @behaviour Exq.Serializers.Behaviour 3 | alias Exq.Support.Config 4 | 5 | defp json_library do 6 | Config.get(:json_library) 7 | end 8 | 9 | def decode(json) do 10 | json_library().decode(json) 11 | end 12 | 13 | def encode(e) do 14 | json_library().encode(e) 15 | end 16 | 17 | def decode!(json) do 18 | json_library().decode!(json) 19 | end 20 | 21 | def encode!(e) do 22 | json_library().encode!(e) 23 | end 24 | 25 | def decode_job(serialized) do 26 | deserialized = decode!(serialized) 27 | 28 | %Exq.Support.Job{ 29 | args: Map.get(deserialized, "args"), 30 | class: Map.get(deserialized, "class"), 31 | enqueued_at: Map.get(deserialized, "enqueued_at"), 32 | error_message: Map.get(deserialized, "error_message"), 33 | error_class: Map.get(deserialized, "error_class"), 34 | failed_at: Map.get(deserialized, "failed_at"), 35 | retried_at: Map.get(deserialized, "retried_at"), 36 | finished_at: Map.get(deserialized, "finished_at"), 37 | jid: Map.get(deserialized, "jid"), 38 | processor: Map.get(deserialized, "processor"), 39 | queue: Map.get(deserialized, "queue"), 40 | retry: Map.get(deserialized, "retry"), 41 | retry_count: Map.get(deserialized, "retry_count"), 42 | unique_for: Map.get(deserialized, "unique_for"), 43 | unique_until: Map.get(deserialized, "unique_until"), 44 | unique_token: Map.get(deserialized, "unique_token"), 45 | unlocks_at: Map.get(deserialized, "unlocks_at") 46 | } 47 | end 48 | 49 | def encode_job(job) do 50 | deserialized = %{ 51 | args: job.args, 52 | class: job.class, 53 | enqueued_at: job.enqueued_at, 54 | error_message: job.error_message, 55 | error_class: job.error_class, 56 | failed_at: job.failed_at, 57 | retried_at: job.retried_at, 58 | finished_at: job.finished_at, 59 | jid: job.jid, 60 | processor: job.processor, 61 | queue: job.queue, 62 | retry: job.retry, 63 | retry_count: job.retry_count 64 | } 65 | 66 | deserialized = 67 | if job.unique_for do 68 | Map.merge(deserialized, %{ 69 | unique_for: job.unique_for, 70 | unique_until: job.unique_until, 71 | unique_token: job.unique_token, 72 | unlocks_at: job.unlocks_at 73 | }) 74 | else 75 | deserialized 76 | end 77 | 78 | encode!(deserialized) 79 | end 80 | 81 | def decode_process(serialized) do 82 | deserialized = decode!(serialized) 83 | 84 | %Exq.Support.Process{ 85 | pid: Map.get(deserialized, "pid"), 86 | host: Map.get(deserialized, "host"), 87 | payload: 88 | Map.get(deserialized, "payload") 89 | |> Exq.Support.Job.decode(), 90 | run_at: Map.get(deserialized, "run_at"), 91 | queue: Map.get(deserialized, "queue") 92 | } 93 | end 94 | 95 | def encode_process(process) do 96 | deserialized = 97 | Enum.into( 98 | [ 99 | pid: process.pid, 100 | host: process.host, 101 | payload: process.payload, 102 | run_at: process.run_at, 103 | queue: process.queue 104 | ], 105 | Map.new() 106 | ) 107 | 108 | encode!(deserialized) 109 | end 110 | 111 | def encode_node(node) do 112 | encode!(Map.from_struct(node)) 113 | end 114 | 115 | def decode_node(serialized) do 116 | deserialized = decode!(serialized) 117 | 118 | %Exq.Support.Node{ 119 | hostname: Map.get(deserialized, "hostname"), 120 | identity: Map.get(deserialized, "identity"), 121 | started_at: Map.get(deserialized, "started_at"), 122 | pid: Map.get(deserialized, "pid"), 123 | queues: Map.get(deserialized, "queues"), 124 | labels: Map.get(deserialized, "labels"), 125 | tag: Map.get(deserialized, "tag"), 126 | busy: Map.get(deserialized, "busy"), 127 | quiet: Map.get(deserialized, "quiet"), 128 | concurrency: Map.get(deserialized, "concurrency") 129 | } 130 | end 131 | end 132 | -------------------------------------------------------------------------------- /lib/exq/stats/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Stats.Server do 2 | @moduledoc """ 3 | Stats process is responsible for recording all stats into Redis. 4 | 5 | The stats format is compatible with the Sidekiq stats format, so that 6 | The Sidekiq UI can be also used to view Exq status as well, and Exq 7 | can run side by side with Sidekiq without breaking any of it's UI. 8 | 9 | This includes job success/failure as well as in-progress jobs 10 | """ 11 | use GenServer 12 | alias Exq.Redis.JobStat 13 | alias Exq.Support.Config 14 | alias Exq.Support.Process 15 | alias Exq.Support.Time 16 | alias Exq.Redis.Connection 17 | 18 | require Logger 19 | 20 | defmodule State do 21 | defstruct redis: nil, queue: :queue.new() 22 | end 23 | 24 | @doc """ 25 | Add in progress worker process 26 | """ 27 | def add_process(stats, namespace, worker, host, queue, job_serialized) do 28 | process_info = %Process{ 29 | pid: inspect(worker), 30 | host: host, 31 | queue: queue, 32 | payload: Exq.Support.Config.serializer().decode_job(job_serialized), 33 | run_at: Time.unix_seconds() 34 | } 35 | 36 | serialized = Exq.Support.Process.encode(process_info) 37 | GenServer.cast(stats, {:add_process, namespace, process_info, serialized}) 38 | {:ok, process_info} 39 | end 40 | 41 | @doc """ 42 | Remove in progress worker process 43 | """ 44 | def process_terminated(stats, namespace, process_info) do 45 | GenServer.cast(stats, {:process_terminated, namespace, process_info}) 46 | :ok 47 | end 48 | 49 | @doc """ 50 | Record job as successfully processes 51 | """ 52 | def record_processed(stats, namespace, job) do 53 | GenServer.cast(stats, {:record_processed, namespace, job}) 54 | :ok 55 | end 56 | 57 | @doc """ 58 | Record job as failed 59 | """ 60 | def record_failure(stats, namespace, error, job) do 61 | GenServer.cast(stats, {:record_failure, namespace, error, job}) 62 | :ok 63 | end 64 | 65 | @doc """ 66 | Cleanup stats on boot. This includes cleaning up busy workers. 67 | """ 68 | def cleanup_host_stats(stats, namespace, host) do 69 | GenServer.call(stats, {:cleanup_host_stats, namespace, host}) 70 | :ok 71 | end 72 | 73 | def server_name(name) do 74 | name = name || Exq.Support.Config.get(:name) 75 | "#{name}.Stats" |> String.to_atom() 76 | end 77 | 78 | def force_flush(stats) do 79 | GenServer.call(stats, :force_flush) 80 | end 81 | 82 | ## =========================================================== 83 | ## gen server callbacks 84 | ## =========================================================== 85 | 86 | def start_link(opts \\ []) do 87 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 88 | end 89 | 90 | def init(opts) do 91 | Elixir.Process.flag(:trap_exit, true) 92 | Elixir.Process.send_after(self(), :flush, Config.get(:stats_flush_interval)) 93 | {:ok, %State{redis: opts[:redis]}} 94 | end 95 | 96 | def handle_cast(msg, state) do 97 | state = %{state | queue: :queue.in(msg, state.queue)} 98 | {:noreply, state} 99 | end 100 | 101 | def handle_call(:force_flush, _from, state) do 102 | queue = process_queue(state.queue, state, []) 103 | state = %{state | queue: queue} 104 | {:reply, :ok, state} 105 | end 106 | 107 | def handle_call({:cleanup_host_stats, namespace, host}, _from, state) do 108 | try do 109 | JobStat.cleanup_processes(state.redis, namespace, host) 110 | rescue 111 | e -> Logger.error("Error cleaning up processes - #{Kernel.inspect(e)}") 112 | end 113 | 114 | {:reply, :ok, state} 115 | end 116 | 117 | def handle_info(:flush, state) do 118 | queue = process_queue(state.queue, state, []) 119 | state = %{state | queue: queue} 120 | Elixir.Process.send_after(self(), :flush, Config.get(:stats_flush_interval)) 121 | {:noreply, state} 122 | end 123 | 124 | def terminate(_reason, state) do 125 | # flush any pending stats 126 | process_queue(state.queue, state, []) 127 | :ok 128 | end 129 | 130 | ## =========================================================== 131 | ## Methods 132 | ## =========================================================== 133 | def process_queue(queue, state, redis_batch, size \\ 0) do 134 | case :queue.out(queue) do 135 | {:empty, q} -> 136 | if size > 0 do 137 | Connection.qp!(state.redis, redis_batch) 138 | end 139 | 140 | q 141 | 142 | {{:value, msg}, q} -> 143 | if size < Config.get(:stats_batch_size) do 144 | redis_batch = redis_batch ++ generate_instructions(msg) 145 | process_queue(q, state, redis_batch, size + 1) 146 | else 147 | Connection.qp!(state.redis, redis_batch) 148 | redis_batch = [] ++ generate_instructions(msg) 149 | process_queue(q, state, redis_batch, 1) 150 | end 151 | end 152 | end 153 | 154 | def generate_instructions({:add_process, namespace, process_info, serialized}) do 155 | JobStat.add_process_commands(namespace, process_info, serialized) 156 | end 157 | 158 | def generate_instructions({:record_processed, namespace, job}) do 159 | JobStat.record_processed_commands(namespace, job) 160 | end 161 | 162 | def generate_instructions({:record_failure, namespace, error, job}) do 163 | JobStat.record_failure_commands(namespace, error, job) 164 | end 165 | 166 | def generate_instructions({:process_terminated, namespace, process}) do 167 | JobStat.remove_process_commands(namespace, process) 168 | end 169 | end 170 | -------------------------------------------------------------------------------- /lib/exq/support/binary.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Binary do 2 | def take_prefix(full, prefix) do 3 | base = byte_size(prefix) 4 | <<_::binary-size(base), rest::binary>> = full 5 | rest 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /lib/exq/support/coercion.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Coercion do 2 | @moduledoc false 3 | 4 | def to_integer(value) when is_integer(value) do 5 | value 6 | end 7 | 8 | def to_integer(binary) when is_binary(binary) do 9 | binary 10 | |> Integer.parse() 11 | |> case do 12 | {integer, ""} -> 13 | integer 14 | 15 | _ -> 16 | raise ArgumentError, 17 | message: "Failed to parse #{inspect(binary)} into an integer." 18 | end 19 | end 20 | 21 | def to_integer(value) do 22 | raise ArgumentError, 23 | message: "Failed to parse #{inspect(value)} into an integer." 24 | end 25 | 26 | def to_boolean(value) when is_boolean(value) do 27 | value 28 | end 29 | 30 | @true_values ["true", "yes", "1"] 31 | def to_boolean(value) when is_binary(value) do 32 | case value |> String.trim() |> String.downcase() do 33 | x when x in @true_values -> true 34 | _ -> false 35 | end 36 | end 37 | 38 | def to_boolean(value) do 39 | raise ArgumentError, 40 | message: "Failed to parse #{inspect(value)} into a boolean." 41 | end 42 | 43 | def to_module(class) when is_atom(class) do 44 | to_module(to_string(class)) 45 | end 46 | 47 | def to_module("Elixir." <> class) do 48 | to_module(class) 49 | end 50 | 51 | def to_module(class) do 52 | target = String.replace(class, "::", ".") 53 | [mod | _func_or_empty] = Regex.split(~r/\//, target) 54 | String.to_atom("Elixir.#{mod}") 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/exq/support/config.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Config do 2 | @default_config %{ 3 | name: Exq, 4 | mode: :default, 5 | host: "127.0.0.1", 6 | port: 6379, 7 | database: 0, 8 | redis_options: [], 9 | redis_timeout: 5000, 10 | namespace: "exq", 11 | queues: ["default"], 12 | json_library: Jason, 13 | heartbeat_enable: false, 14 | heartbeat_interval: 60_000, 15 | missed_heartbeats_allowed: 5, 16 | scheduler_enable: true, 17 | concurrency: 100, 18 | scheduler_poll_timeout: 200, 19 | scheduler_page_size: 10, 20 | poll_timeout: 100, 21 | genserver_timeout: 5000, 22 | shutdown_timeout: 5000, 23 | max_retries: 25, 24 | dead_max_jobs: 10_000, 25 | # 6 months 26 | dead_timeout_in_seconds: 180 * 24 * 60 * 60, 27 | stats_flush_interval: 1000, 28 | stats_batch_size: 2000, 29 | serializer: Exq.Serializers.JsonSerializer, 30 | node_identifier: Exq.NodeIdentifier.HostnameIdentifier, 31 | backoff: Exq.Backoff.SidekiqDefault, 32 | start_on_application: true, 33 | middleware: [ 34 | Exq.Middleware.Stats, 35 | Exq.Middleware.Job, 36 | Exq.Middleware.Manager, 37 | Exq.Middleware.Unique, 38 | Exq.Middleware.Logger 39 | ], 40 | queue_adapter: Exq.Adapters.Queue.Redis 41 | } 42 | 43 | def get(key) do 44 | get(key, Map.get(@default_config, key)) 45 | end 46 | 47 | def get(key, fallback) do 48 | case Application.get_env(:exq, key, fallback) do 49 | {:system, varname} -> System.get_env(varname) 50 | {:system, varname, default} -> System.get_env(varname) || default 51 | value -> value 52 | end 53 | end 54 | 55 | def serializer do 56 | get(:serializer) 57 | end 58 | 59 | def node_identifier do 60 | get(:node_identifier) 61 | end 62 | 63 | def backoff() do 64 | get(:backoff) 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /lib/exq/support/job.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Job do 2 | @moduledoc """ 3 | Serializable Job format used by Exq. 4 | """ 5 | 6 | defstruct error_message: nil, 7 | error_class: nil, 8 | retried_at: nil, 9 | failed_at: nil, 10 | retry: false, 11 | retry_count: 0, 12 | processor: nil, 13 | queue: nil, 14 | class: nil, 15 | args: nil, 16 | jid: nil, 17 | finished_at: nil, 18 | enqueued_at: nil, 19 | unique_for: nil, 20 | unique_until: nil, 21 | unique_token: nil, 22 | unlocks_at: nil 23 | 24 | alias Exq.Support.Config 25 | 26 | def decode(serialized) do 27 | Config.serializer().decode_job(serialized) 28 | end 29 | 30 | def encode(nil), do: nil 31 | 32 | def encode(%__MODULE__{} = job) do 33 | encode(%{ 34 | error_message: encode(job.error_message), 35 | error_class: job.error_class, 36 | failed_at: job.failed_at, 37 | retried_at: job.retried_at, 38 | retry: job.retry, 39 | retry_count: job.retry_count, 40 | processor: job.processor, 41 | queue: job.queue, 42 | class: job.class, 43 | args: job.args, 44 | jid: job.jid, 45 | finished_at: job.finished_at, 46 | enqueued_at: job.enqueued_at, 47 | unique_for: job.unique_for, 48 | unique_until: job.unique_until, 49 | unique_token: job.unique_token, 50 | unlocks_at: job.unlocks_at 51 | }) 52 | end 53 | 54 | def encode(%RuntimeError{message: message}), do: %{message: message} 55 | 56 | def encode(%{} = job_map) do 57 | job_map = 58 | case Map.fetch(job_map, :error_message) do 59 | {:ok, val} -> 60 | Map.put(job_map, :error_message, encode(val)) 61 | 62 | :error -> 63 | job_map 64 | end 65 | 66 | Config.serializer().encode_job(job_map) 67 | end 68 | 69 | def encode(val), do: val 70 | end 71 | -------------------------------------------------------------------------------- /lib/exq/support/mode.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Mode do 2 | @moduledoc """ 3 | This module defines several modes in which Exq can be used. 4 | 5 | These modes are: 6 | * `default` - starts the default processes 7 | * `enqueuer` - starts processes which are responsible for job enqueueing 8 | * `api` - starts processes which are responsible for API usage 9 | 10 | """ 11 | 12 | @doc """ 13 | Returns child list for the main Exq supervisor. 14 | """ 15 | 16 | import Exq.Support.Opts, only: [redis_worker_opts: 1] 17 | 18 | def children(opts) do 19 | {module, args, opts} = redis_worker_opts(opts) 20 | # make sure redis always first(start in order) 21 | children = [worker(module, args)] 22 | children = children ++ children(opts[:mode], opts) 23 | children 24 | end 25 | 26 | def children(:default, opts) do 27 | shutdown_timeout = Keyword.get(opts, :shutdown_timeout) 28 | 29 | children = [ 30 | worker(Exq.Worker.Metadata, [opts]), 31 | worker(Exq.Middleware.Server, [opts]), 32 | worker(Exq.Stats.Server, [opts]), 33 | worker(Exq.Node.Server, [opts]), 34 | supervisor(Exq.Worker.Supervisor, [opts]), 35 | worker(Exq.Manager.Server, [opts]), 36 | worker(Exq.WorkerDrainer.Server, [opts], shutdown: shutdown_timeout), 37 | worker(Exq.Enqueuer.Server, [opts]), 38 | worker(Exq.Api.Server, [opts]) 39 | ] 40 | 41 | children = 42 | if opts[:scheduler_enable] do 43 | children ++ [worker(Exq.Scheduler.Server, [opts])] 44 | else 45 | children 46 | end 47 | 48 | if opts[:heartbeat_enable] do 49 | children ++ [worker(Exq.Heartbeat.Server, [opts]), worker(Exq.Heartbeat.Monitor, [opts])] 50 | else 51 | children 52 | end 53 | end 54 | 55 | def children(:enqueuer, opts) do 56 | [worker(Exq.Enqueuer.Server, [opts])] 57 | end 58 | 59 | def children(:api, opts) do 60 | [worker(Exq.Api.Server, [opts])] 61 | end 62 | 63 | def children([:enqueuer, :api], opts) do 64 | [ 65 | worker(Exq.Enqueuer.Server, [opts]), 66 | worker(Exq.Api.Server, [opts]) 67 | ] 68 | end 69 | 70 | def children([:api, :enqueuer], opts), do: children([:enqueuer, :api], opts) 71 | 72 | defp worker(module, args, opts \\ []) do 73 | overrides = Keyword.put(opts, :type, :worker) 74 | supervisor_child_spec(module, args, overrides) 75 | end 76 | 77 | defp supervisor(module, args, opts \\ []) do 78 | overrides = Keyword.put(opts, :type, :supervisor) 79 | supervisor_child_spec(module, args, overrides) 80 | end 81 | 82 | defp supervisor_child_spec(module, args, overrides) do 83 | spec = %{id: module, start: {module, :start_link, args}} 84 | Supervisor.child_spec(spec, overrides) 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /lib/exq/support/node.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Node do 2 | @moduledoc """ 3 | Struct for node. 4 | """ 5 | defstruct hostname: nil, 6 | identity: nil, 7 | started_at: nil, 8 | pid: nil, 9 | queues: [], 10 | labels: [], 11 | tag: "", 12 | busy: 0, 13 | concurrency: 0, 14 | quiet: false 15 | 16 | alias Exq.Support.Config 17 | 18 | @doc """ 19 | Serialize node to JSON. 20 | """ 21 | def encode(%__MODULE__{} = node) do 22 | Config.serializer().encode_node(node) 23 | end 24 | 25 | @doc """ 26 | Decode JSON into node. 27 | """ 28 | def decode(serialized) do 29 | Config.serializer().decode_node(serialized) 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/exq/support/opts.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Opts do 2 | @moduledoc """ 3 | Exq supported options. 4 | """ 5 | 6 | alias Exq.Support.Coercion 7 | alias Exq.Support.Config 8 | 9 | @doc """ 10 | Returns top supervisor's name default is `Exq.Sup`. 11 | """ 12 | def top_supervisor(name) do 13 | name = name || Config.get(:name) 14 | "#{name}.Sup" |> String.to_atom() 15 | end 16 | 17 | defp conform_opts(opts) do 18 | mode = opts[:mode] || Config.get(:mode) 19 | redis = redis_client_name(opts[:name]) 20 | opts = [{:redis, redis} | opts] 21 | 22 | redis_opts = redis_opts(opts) 23 | server_opts = server_opts(mode, opts) 24 | {redis_opts, server_opts} 25 | end 26 | 27 | def redis_client_name(name) do 28 | name = name || Config.get(:name) 29 | "#{name}.Redis.Client" |> String.to_atom() 30 | end 31 | 32 | def redis_inspect_opts(opts \\ []) do 33 | args = redis_opts(opts) 34 | 35 | case args do 36 | [url, options] -> [url, mask_password(options)] 37 | [options] -> [mask_password(options)] 38 | end 39 | |> inspect() 40 | end 41 | 42 | def redis_opts(opts \\ []) do 43 | redis_options = opts[:redis_options] || Config.get(:redis_options) 44 | socket_opts = opts[:socket_opts] || Config.get(:socket_opts) || [] 45 | 46 | redis_options = 47 | Keyword.merge( 48 | [name: opts[:redis], socket_opts: socket_opts], 49 | redis_options 50 | ) 51 | 52 | if url = opts[:url] || Config.get(:url) do 53 | [url, redis_options] 54 | else 55 | if Keyword.has_key?(redis_options, :sentinel) do 56 | [redis_options] 57 | else 58 | host = opts[:host] || Config.get(:host) 59 | port = Coercion.to_integer(opts[:port] || Config.get(:port)) 60 | database = Coercion.to_integer(opts[:database] || Config.get(:database)) 61 | password = opts[:password] || Config.get(:password) 62 | 63 | [ 64 | Keyword.merge( 65 | [host: host, port: port, database: database, password: password], 66 | redis_options 67 | ) 68 | ] 69 | end 70 | end 71 | end 72 | 73 | @doc """ 74 | Returns `{redis_module, redis_args, gen_server_opts}`. 75 | """ 76 | def redis_worker_opts(opts) do 77 | {redis_opts, opts} = conform_opts(opts) 78 | {Redix, redis_opts, opts} 79 | end 80 | 81 | defp mask_password(options) do 82 | options = 83 | if Keyword.has_key?(options, :password) do 84 | Keyword.update!(options, :password, fn 85 | nil -> nil 86 | _ -> "*****" 87 | end) 88 | else 89 | options 90 | end 91 | 92 | options = 93 | if Keyword.has_key?(options, :sentinel) do 94 | Keyword.update!(options, :sentinel, &mask_password/1) 95 | else 96 | options 97 | end 98 | 99 | if Keyword.has_key?(options, :sentinels) do 100 | Keyword.update!(options, :sentinels, fn sentinels -> 101 | Enum.map(sentinels, &mask_password/1) 102 | end) 103 | else 104 | options 105 | end 106 | end 107 | 108 | defp server_opts(:default, opts) do 109 | scheduler_enable = 110 | Coercion.to_boolean(opts[:scheduler_enable] || Config.get(:scheduler_enable)) 111 | 112 | namespace = opts[:namespace] || Config.get(:namespace) 113 | 114 | scheduler_poll_timeout = 115 | Coercion.to_integer(opts[:scheduler_poll_timeout] || Config.get(:scheduler_poll_timeout)) 116 | 117 | poll_timeout = Coercion.to_integer(opts[:poll_timeout] || Config.get(:poll_timeout)) 118 | 119 | shutdown_timeout = 120 | Coercion.to_integer(opts[:shutdown_timeout] || Config.get(:shutdown_timeout)) 121 | 122 | manager = Exq.Manager.Server.server_name(opts[:name]) 123 | enqueuer = Exq.Enqueuer.Server.server_name(opts[:name]) 124 | stats = Exq.Stats.Server.server_name(opts[:name]) 125 | scheduler = Exq.Scheduler.Server.server_name(opts[:name]) 126 | workers_sup = Exq.Worker.Supervisor.supervisor_name(opts[:name]) 127 | middleware = Exq.Middleware.Server.server_name(opts[:name]) 128 | metadata = Exq.Worker.Metadata.server_name(opts[:name]) 129 | 130 | queue_configs = opts[:queues] || Config.get(:queues) 131 | per_queue_concurrency = cast_concurrency(opts[:concurrency] || Config.get(:concurrency)) 132 | queues = get_queues(queue_configs) 133 | concurrency = get_concurrency(queue_configs, per_queue_concurrency) 134 | default_middleware = Config.get(:middleware) 135 | 136 | heartbeat_enable = 137 | Coercion.to_boolean(Keyword.get(opts, :heartbeat_enable, Config.get(:heartbeat_enable))) 138 | 139 | heartbeat_interval = 140 | Coercion.to_integer(opts[:heartbeat_interval] || Config.get(:heartbeat_interval)) 141 | 142 | missed_heartbeats_allowed = 143 | Coercion.to_integer( 144 | opts[:missed_heartbeats_allowed] || Config.get(:missed_heartbeats_allowed) 145 | ) 146 | 147 | [ 148 | scheduler_enable: scheduler_enable, 149 | namespace: namespace, 150 | scheduler_poll_timeout: scheduler_poll_timeout, 151 | workers_sup: workers_sup, 152 | poll_timeout: poll_timeout, 153 | enqueuer: enqueuer, 154 | metadata: metadata, 155 | stats: stats, 156 | name: opts[:name], 157 | manager: manager, 158 | scheduler: scheduler, 159 | queues: queues, 160 | redis: opts[:redis], 161 | concurrency: concurrency, 162 | middleware: middleware, 163 | default_middleware: default_middleware, 164 | mode: :default, 165 | shutdown_timeout: shutdown_timeout, 166 | heartbeat_enable: heartbeat_enable, 167 | heartbeat_interval: heartbeat_interval, 168 | missed_heartbeats_allowed: missed_heartbeats_allowed 169 | ] 170 | end 171 | 172 | defp server_opts(mode, opts) do 173 | namespace = opts[:namespace] || Config.get(:namespace) 174 | [name: opts[:name], namespace: namespace, redis: opts[:redis], mode: mode] 175 | end 176 | 177 | defp get_queues(queue_configs) do 178 | Enum.map(queue_configs, fn queue_config -> 179 | case queue_config do 180 | {queue, _concurrency} -> queue 181 | queue -> queue 182 | end 183 | end) 184 | end 185 | 186 | defp get_concurrency(queue_configs, per_queue_concurrency) do 187 | Enum.map(queue_configs, fn queue_config -> 188 | case queue_config do 189 | {queue, concurrency} -> {queue, cast_concurrency(concurrency)} 190 | queue -> {queue, per_queue_concurrency} 191 | end 192 | end) 193 | end 194 | 195 | def cast_concurrency({module, options}), do: {module, options} 196 | def cast_concurrency(:infinity), do: {Exq.Dequeue.Local, [concurrency: :infinity]} 197 | def cast_concurrency(:infinite), do: {Exq.Dequeue.Local, [concurrency: :infinity]} 198 | def cast_concurrency(x) when is_integer(x), do: {Exq.Dequeue.Local, [concurrency: x]} 199 | 200 | def cast_concurrency(x) when is_binary(x) do 201 | case x |> String.trim() |> String.downcase() do 202 | "infinity" -> {Exq.Dequeue.Local, [concurrency: :infinity]} 203 | "infinite" -> {Exq.Dequeue.Local, [concurrency: :infinity]} 204 | x -> {Exq.Dequeue.Local, [concurrency: Coercion.to_integer(x)]} 205 | end 206 | end 207 | end 208 | -------------------------------------------------------------------------------- /lib/exq/support/process.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Process do 2 | @moduledoc """ 3 | Struct for in progress worker. 4 | """ 5 | defstruct pid: nil, host: nil, payload: nil, run_at: nil, queue: nil 6 | 7 | alias Exq.Support.Config 8 | 9 | @doc """ 10 | Serialize process to JSON. 11 | """ 12 | def encode(%__MODULE__{} = process) do 13 | Config.serializer().encode_process(%{ 14 | pid: process.pid, 15 | host: process.host, 16 | payload: Exq.Support.Job.encode(process.payload), 17 | run_at: process.run_at, 18 | queue: process.queue 19 | }) 20 | end 21 | 22 | @doc """ 23 | Decode JSON into process. 24 | """ 25 | def decode(serialized) do 26 | Config.serializer().decode_process(serialized) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /lib/exq/support/randomize.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Randomize do 2 | @moduledoc """ 3 | Helper functions for random number. 4 | """ 5 | 6 | def random(number) do 7 | Enum.random(0..number) 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /lib/exq/support/redis.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Redis do 2 | require Logger 3 | 4 | @doc """ 5 | Rescue GenServer timeout. 6 | """ 7 | def rescue_timeout(f, options \\ []) do 8 | try do 9 | f.() 10 | catch 11 | :exit, {:timeout, info} -> 12 | Logger.info("Manager timeout occurred #{inspect(info)}") 13 | Keyword.get(options, :timeout_return_value, nil) 14 | end 15 | end 16 | 17 | def with_retry_on_connection_error(f, times) when times <= 0 do 18 | f.() 19 | end 20 | 21 | def with_retry_on_connection_error(f, times) when times > 0 do 22 | try do 23 | case f.() do 24 | {:error, %Redix.ConnectionError{} = exception} -> 25 | Logger.error("Retrying redis connection error: #{inspect(exception)}") 26 | with_retry_on_connection_error(f, times - 1) 27 | 28 | result -> 29 | result 30 | end 31 | rescue 32 | exception in [Redix.ConnectionError] -> 33 | Logger.error("Retrying redis connection error: #{inspect(exception)}") 34 | with_retry_on_connection_error(f, times - 1) 35 | end 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /lib/exq/support/time.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Support.Time do 2 | @moduledoc """ 3 | Helper functions for data and time. 4 | """ 5 | 6 | import DateTime, only: [utc_now: 0, to_unix: 2, from_unix!: 2] 7 | 8 | def offset_from_now(offset) do 9 | now_micro_sec = utc_now() |> to_unix(:microsecond) 10 | now = now_micro_sec 11 | 12 | from_unix!(round(now + offset * 1_000_000), :microsecond) 13 | end 14 | 15 | def time_to_score(time \\ utc_now()) do 16 | time 17 | |> unix_seconds 18 | |> Float.to_string() 19 | end 20 | 21 | def unix_seconds(time \\ utc_now()) do 22 | to_unix(time, :microsecond) / 1_000_000.0 23 | end 24 | 25 | def format_current_date(current_date) do 26 | date_time = 27 | %{current_date | microsecond: {0, 0}} 28 | |> DateTime.to_string() 29 | 30 | date = 31 | current_date 32 | |> DateTime.to_date() 33 | |> Date.to_string() 34 | 35 | {date_time, date} 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /lib/exq/worker/metadata.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Worker.Metadata do 2 | @moduledoc """ 3 | Provides storage functionality for job metadata. 4 | 5 | The metadata is associated with the worker pid and automatically discarded 6 | when the worker process exits. 7 | """ 8 | 9 | use GenServer 10 | alias Exq.Support.Config 11 | 12 | def start_link(opts) do 13 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 14 | end 15 | 16 | def associate(server, pid, value) when is_pid(pid) do 17 | GenServer.call(server, {:associate, pid, value}) 18 | end 19 | 20 | def lookup(server, pid) when is_pid(pid) do 21 | :ets.lookup_element(server, pid, 3) 22 | end 23 | 24 | ## =========================================================== 25 | ## GenServer callbacks 26 | ## =========================================================== 27 | 28 | def init(opts) do 29 | table = :ets.new(server_name(opts[:name]), [:named_table]) 30 | {:ok, table} 31 | end 32 | 33 | def handle_call({:associate, pid, value}, _from, table) do 34 | ref = Process.monitor(pid) 35 | true = :ets.insert(table, {pid, ref, value}) 36 | {:reply, :ok, table} 37 | end 38 | 39 | def handle_info({:DOWN, ref, _type, pid, _reason}, table) do 40 | [{^pid, ^ref, _}] = :ets.lookup(table, pid) 41 | true = :ets.delete(table, pid) 42 | {:noreply, table} 43 | end 44 | 45 | def handle_info(_msg, state) do 46 | {:noreply, state} 47 | end 48 | 49 | # Internal Functions 50 | 51 | def server_name(name) do 52 | name = name || Config.get(:name) 53 | "#{name}.Worker.Metadata" |> String.to_atom() 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/exq/worker/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Worker.Server do 2 | @moduledoc """ 3 | Worker process is responsible for the parsing and execution of a Job. 4 | 5 | It then broadcasts results to Stats / Manager. 6 | 7 | Currently uses the `terminate` callback to track job success/failure. 8 | 9 | ## Initialization: 10 | * `job_serialized` - Full JSON payload of the Job. 11 | * `manager` - Manager process pid. 12 | * `queue` - The queue the job came from. 13 | * `stats` - Stats process pid. 14 | * `namespace` - Redis namespace 15 | * `host` - Host name 16 | 17 | Expects :work message after initialization to kickoff work. 18 | """ 19 | use GenServer 20 | 21 | alias Exq.Middleware.Server, as: Middleware 22 | alias Exq.Middleware.Pipeline 23 | alias Exq.Worker.Metadata 24 | 25 | defmodule State do 26 | defstruct job_serialized: nil, 27 | manager: nil, 28 | queue: nil, 29 | namespace: nil, 30 | stats: nil, 31 | host: nil, 32 | redis: nil, 33 | middleware: nil, 34 | pipeline: nil, 35 | metadata: nil, 36 | middleware_state: nil 37 | end 38 | 39 | def start_link( 40 | job_serialized, 41 | manager, 42 | queue, 43 | stats, 44 | namespace, 45 | host, 46 | redis, 47 | middleware, 48 | metadata 49 | ) do 50 | GenServer.start_link( 51 | __MODULE__, 52 | {job_serialized, manager, queue, stats, namespace, host, redis, middleware, metadata}, 53 | [] 54 | ) 55 | end 56 | 57 | @doc """ 58 | Kickoff work associated with worker. 59 | """ 60 | def work(pid) do 61 | GenServer.cast(pid, :work) 62 | end 63 | 64 | ## =========================================================== 65 | ## GenServer callbacks 66 | ## =========================================================== 67 | 68 | def init({job_serialized, manager, queue, stats, namespace, host, redis, middleware, metadata}) do 69 | { 70 | :ok, 71 | %State{ 72 | job_serialized: job_serialized, 73 | manager: manager, 74 | queue: queue, 75 | stats: stats, 76 | namespace: namespace, 77 | host: host, 78 | redis: redis, 79 | middleware: middleware, 80 | metadata: metadata 81 | } 82 | } 83 | end 84 | 85 | @doc """ 86 | Kickoff work associated with worker. 87 | 88 | This step handles: 89 | * Parsing of JSON object 90 | * Preparation of target module 91 | 92 | Calls :dispatch to then call target module. 93 | """ 94 | def handle_cast(:work, state) do 95 | state = %{state | middleware_state: Middleware.all(state.middleware)} 96 | state = %{state | pipeline: before_work(state)} 97 | 98 | case state |> Map.fetch!(:pipeline) |> Map.get(:terminated, false) do 99 | # case done to run the after hooks 100 | true -> nil 101 | _ -> GenServer.cast(self(), :dispatch) 102 | end 103 | 104 | {:noreply, state} 105 | end 106 | 107 | # Dispatch work to the target module (call :perform method of target). 108 | def handle_cast(:dispatch, state) do 109 | dispatch_work( 110 | state.pipeline.assigns.worker_module, 111 | state.pipeline.assigns.job, 112 | state.metadata 113 | ) 114 | 115 | {:noreply, state} 116 | end 117 | 118 | # Worker done with normal termination message. 119 | def handle_cast({:done, result}, state) do 120 | state = 121 | if !has_pipeline_after_work_ran?(state.pipeline) do 122 | %{state | pipeline: pipeline_after_processed_work(state, result)} 123 | else 124 | state 125 | end 126 | 127 | {:stop, :normal, state} 128 | end 129 | 130 | def handle_info({:DOWN, _, _, _, :normal}, state) do 131 | state = 132 | if !has_pipeline_after_work_ran?(state.pipeline) do 133 | error = "Worker shutdown" 134 | %{state | pipeline: pipeline_after_failed_work(state, error, error)} 135 | else 136 | state 137 | end 138 | 139 | {:stop, :normal, state} 140 | end 141 | 142 | def handle_info({:DOWN, _, :process, _, error}, state) do 143 | error_message = 144 | error 145 | |> Inspect.Algebra.to_doc(%Inspect.Opts{}) 146 | |> Inspect.Algebra.format(%Inspect.Opts{}.width) 147 | |> to_string 148 | 149 | state = 150 | if !has_pipeline_after_work_ran?(state.pipeline) do 151 | %{state | pipeline: pipeline_after_failed_work(state, error_message, error)} 152 | else 153 | state 154 | end 155 | 156 | {:stop, :normal, state} 157 | end 158 | 159 | def handle_info(_info, state) do 160 | {:noreply, state} 161 | end 162 | 163 | ## =========================================================== 164 | ## Internal Functions 165 | ## =========================================================== 166 | 167 | def dispatch_work(worker_module, job, metadata) do 168 | # trap exit so that link can still track dispatch without crashing 169 | Process.flag(:trap_exit, true) 170 | worker = self() 171 | 172 | {:ok, pid} = 173 | Task.start_link(fn -> 174 | :ok = Metadata.associate(metadata, self(), job) 175 | result = apply(worker_module, :perform, job.args) 176 | GenServer.cast(worker, {:done, result}) 177 | end) 178 | 179 | Process.monitor(pid) 180 | end 181 | 182 | defp before_work(state) do 183 | %Pipeline{event: :before_work, worker_pid: self()} 184 | |> Pipeline.assign_worker_state(state) 185 | |> Pipeline.chain(state.middleware_state) 186 | end 187 | 188 | defp pipeline_after_processed_work(state, result) do 189 | %Pipeline{event: :after_processed_work, worker_pid: self(), assigns: state.pipeline.assigns} 190 | |> Pipeline.assign(:result, result) 191 | |> Pipeline.chain(state.middleware_state) 192 | end 193 | 194 | defp pipeline_after_failed_work(state, error_message, error) do 195 | %Pipeline{event: :after_failed_work, worker_pid: self(), assigns: state.pipeline.assigns} 196 | |> Pipeline.assign(:error_message, error_message) 197 | |> Pipeline.assign(:error, error) 198 | |> Pipeline.chain(state.middleware_state) 199 | end 200 | 201 | defp has_pipeline_after_work_ran?(pipeline) do 202 | Map.has_key?(pipeline, :result) || Map.has_key?(pipeline, :error) 203 | end 204 | end 205 | -------------------------------------------------------------------------------- /lib/exq/worker/supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.Worker.Supervisor do 2 | @moduledoc """ 3 | Supervisor for Exq Worker. 4 | """ 5 | 6 | use DynamicSupervisor 7 | 8 | def start_link(opts) do 9 | DynamicSupervisor.start_link(__MODULE__, [], name: supervisor_name(opts[:name])) 10 | end 11 | 12 | def init([]) do 13 | DynamicSupervisor.init(strategy: :one_for_one) 14 | end 15 | 16 | def supervisor_name(name) do 17 | name = name || Exq.Support.Config.get(:name) 18 | "#{name}.Worker.Sup" |> String.to_atom() 19 | end 20 | 21 | def start_child(sup, args, opts) do 22 | shutdown_timeout = Keyword.get(opts, :shutdown_timeout) 23 | 24 | spec = %{ 25 | id: Exq.Worker.Server, 26 | start: {Exq.Worker.Server, :start_link, args}, 27 | restart: :temporary, 28 | shutdown: shutdown_timeout 29 | } 30 | 31 | DynamicSupervisor.start_child(sup, spec) 32 | end 33 | 34 | def workers(sup) do 35 | DynamicSupervisor.which_children(sup) 36 | end 37 | 38 | def workers_count(sup) do 39 | DynamicSupervisor.count_children(sup) 40 | |> Map.get(:active) 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/exq/worker_drainer/server.ex: -------------------------------------------------------------------------------- 1 | defmodule Exq.WorkerDrainer.Server do 2 | @moduledoc """ 3 | The WorkerDrainer server is responsible for gracefully draining 4 | workers when the application is shutting down. 5 | 6 | When shutdown starts it instructs the Manager to stop accepting new jobs and 7 | then waits for all currently in progress jobs to complete. 8 | 9 | If the jobs do not complete within an allowed timeout the WorkerDrainer 10 | will shut down, allowing the rest of the supervision tree (including the 11 | remaining workers) to then shut down. 12 | 13 | The length of the grace period can be configured with the 14 | `shutdown_timeout` option, which defaults to 5000 ms. 15 | """ 16 | 17 | use GenServer 18 | alias Exq.{Worker, Manager} 19 | 20 | defstruct name: Exq, 21 | shutdown_timeout: 5000 22 | 23 | def server_name(name) do 24 | name = name || Exq.Support.Config.get(:name) 25 | "#{name}.WorkerDrainer" |> String.to_atom() 26 | end 27 | 28 | ## =========================================================== 29 | ## GenServer callbacks 30 | ## =========================================================== 31 | 32 | def start_link(opts \\ []) do 33 | GenServer.start_link(__MODULE__, opts, name: server_name(opts[:name])) 34 | end 35 | 36 | def init(opts) do 37 | Process.flag(:trap_exit, true) 38 | state = struct(__MODULE__, opts) 39 | {:ok, state} 40 | end 41 | 42 | def terminate(:shutdown, state) do 43 | drain_workers(state) 44 | end 45 | 46 | def terminate({:shutdown, _}, state) do 47 | drain_workers(state) 48 | end 49 | 50 | def terminate(:normal, state) do 51 | drain_workers(state) 52 | end 53 | 54 | def terminate(_, _) do 55 | :ok 56 | end 57 | 58 | ## =========================================================== 59 | ## Internal Functions 60 | ## =========================================================== 61 | 62 | defp drain_workers(state) do 63 | timer_ref = :erlang.start_timer(state.shutdown_timeout, self(), :end_of_grace_period) 64 | 65 | :ok = 66 | state.name 67 | |> Manager.Server.server_name() 68 | |> Exq.unsubscribe_all() 69 | 70 | state.name 71 | |> Worker.Supervisor.supervisor_name() 72 | |> Worker.Supervisor.workers() 73 | |> Enum.map(&Process.monitor(elem(&1, 1))) 74 | |> Enum.into(MapSet.new()) 75 | |> await_workers(timer_ref) 76 | end 77 | 78 | defp await_workers(%{map: refs}, _) when map_size(refs) == 0 do 79 | :ok 80 | end 81 | 82 | defp await_workers(worker_refs, timer_ref) do 83 | receive do 84 | {:DOWN, downed_ref, _, _, _} -> 85 | worker_refs 86 | |> MapSet.delete(downed_ref) 87 | |> await_workers(timer_ref) 88 | 89 | # Not all workers finished within grace period 90 | {:timeout, ^timer_ref, :end_of_grace_period} -> 91 | :ok 92 | end 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /lib/mix/tasks/exq.run.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.Exq.Run do 2 | use Mix.Task 3 | 4 | @shortdoc "Starts the Exq worker" 5 | 6 | def run(_args) do 7 | {:ok, _} = Application.ensure_all_started(:exq) 8 | IO.puts("Started Exq") 9 | :timer.sleep(:infinity) 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Exq.Mixfile do 2 | use Mix.Project 3 | 4 | @source_url "https://github.com/akira/exq" 5 | @version "0.19.0" 6 | 7 | def project do 8 | [ 9 | app: :exq, 10 | version: @version, 11 | elixir: "~> 1.11", 12 | elixirc_paths: ["lib"], 13 | test_coverage: [tool: ExCoveralls], 14 | deps: deps(), 15 | docs: docs(), 16 | package: package(), 17 | preferred_cli_env: [ 18 | coveralls: :test, 19 | "coveralls.github": :test 20 | ] 21 | ] 22 | end 23 | 24 | def application do 25 | [ 26 | mod: {Exq, []}, 27 | extra_applications: [:logger] 28 | ] 29 | end 30 | 31 | defp deps do 32 | [ 33 | {:elixir_uuid, ">= 1.2.0"}, 34 | {:redix, ">= 0.9.0"}, 35 | {:poison, ">= 1.2.0 and < 6.0.0", optional: true}, 36 | {:jason, "~> 1.0", optional: true}, 37 | 38 | # test 39 | {:excoveralls, "~> 0.18", only: :test}, 40 | {:castore, "~> 1.0", only: :test}, 41 | {:flaky_connection, 42 | git: "https://github.com/ananthakumaran/flaky_connection.git", only: :test}, 43 | 44 | # docs 45 | {:ex_doc, ">= 0.0.0", only: :dev, runtime: false}, 46 | {:benchee, "~> 1.0", only: :dev, runtime: false} 47 | ] 48 | end 49 | 50 | defp package do 51 | [ 52 | description: """ 53 | Exq is a job processing library compatible with Resque / Sidekiq for the 54 | Elixir language. 55 | """, 56 | maintainers: ["Alex Kira", "zhongwencool", "Anantha Kumaran"], 57 | licenses: ["Apache-2.0"], 58 | files: ~w(lib test) ++ ~w(LICENSE mix.exs CHANGELOG.md README.md), 59 | links: %{"GitHub" => @source_url} 60 | ] 61 | end 62 | 63 | defp docs do 64 | [ 65 | extras: ["CHANGELOG.md", "README.md"], 66 | main: "readme", 67 | formatters: ["html"], 68 | source_url: @source_url, 69 | source_ref: "v#{@version}" 70 | ] 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "benchee": {:hex, :benchee, "1.0.1", "66b211f9bfd84bd97e6d1beaddf8fc2312aaabe192f776e8931cb0c16f53a521", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}], "hexpm", "3ad58ae787e9c7c94dd7ceda3b587ec2c64604563e049b2a0e8baafae832addb"}, 3 | "castore": {:hex, :castore, "1.0.12", "053f0e32700cbec356280c0e835df425a3be4bc1e0627b714330ad9d0f05497f", [:mix], [], "hexpm", "3dca286b2186055ba0c9449b4e95b97bf1b57b47c1f2644555879e659960c224"}, 4 | "certifi": {:hex, :certifi, "2.9.0", "6f2a475689dd47f19fb74334859d460a2dc4e3252a3324bd2111b8f0429e7e21", [:rebar3], [], "hexpm", "266da46bdb06d6c6d35fde799bcb28d36d985d424ad7c08b5bb48f5b5cdd4641"}, 5 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 6 | "earmark": {:hex, :earmark, "1.3.1", "73812f447f7a42358d3ba79283cfa3075a7580a3a2ed457616d6517ac3738cb9", [:mix], [], "hexpm", "000aaeff08919e95e7aea13e4af7b2b9734577b3e6a7c50ee31ee88cab6ec4fb"}, 7 | "earmark_parser": {:hex, :earmark_parser, "1.4.12", "b245e875ec0a311a342320da0551da407d9d2b65d98f7a9597ae078615af3449", [:mix], [], "hexpm", "711e2cc4d64abb7d566d43f54b78f7dc129308a63bc103fbd88550d2174b3160"}, 8 | "elixir_uuid": {:hex, :elixir_uuid, "1.2.0", "ff26e938f95830b1db152cb6e594d711c10c02c6391236900ddd070a6b01271d", [:mix], [], "hexpm", "e4d6e26434471761ed45a3545239da87af7b70904dd4442a55f87d06b137c56b"}, 9 | "ex_doc": {:hex, :ex_doc, "0.24.0", "2df14354835afaabdf87cb2971ea9485d8a36ff590e4b6c250b4f60c8fdf9143", [:mix], [{:earmark_parser, "~> 1.4.0", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "a0f4bcff21ceebea48414e49885d2a3e542200f76a2facf3f8faa54935eeb721"}, 10 | "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"}, 11 | "exjsx": {:hex, :exjsx, "3.2.1", "1bc5bf1e4fd249104178f0885030bcd75a4526f4d2a1e976f4b428d347614f0f", [:mix], [{:jsx, "~> 2.8.0", [hex: :jsx, repo: "hexpm", optional: false]}], "hexpm", "b55727b206dab96feb025267e5c122ddb448f55b6648f9156b8d481215d80290"}, 12 | "flaky_connection": {:git, "https://github.com/ananthakumaran/flaky_connection.git", "92c57cb5c3e65e1beccda1db0040c9ca7cceafef", []}, 13 | "hackney": {:hex, :hackney, "1.18.1", "f48bf88f521f2a229fc7bae88cf4f85adc9cd9bcf23b5dc8eb6a1788c662c4f6", [:rebar3], [{:certifi, "~>2.9.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.3.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "a4ecdaff44297e9b5894ae499e9a070ea1888c84afdd1fd9b7b2bc384950128e"}, 14 | "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, 15 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 16 | "jsx": {:hex, :jsx, "2.8.2", "7acc7d785b5abe8a6e9adbde926a24e481f29956dd8b4df49e3e4e7bcc92a018", [:mix, :rebar3], [], "hexpm", "b4c5d3230b397c8d95579e4a3d72826bb6463160130ccf4182f5be8579b5f44c"}, 17 | "makeup": {:hex, :makeup, "1.0.5", "d5a830bc42c9800ce07dd97fa94669dfb93d3bf5fcf6ea7a0c67b2e0e4a7f26c", [:mix], [{:nimble_parsec, "~> 0.5 or ~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cfa158c02d3f5c0c665d0af11512fed3fba0144cf1aadee0f2ce17747fba2ca9"}, 18 | "makeup_elixir": {:hex, :makeup_elixir, "0.15.1", "b5888c880d17d1cc3e598f05cdb5b5a91b7b17ac4eaf5f297cb697663a1094dd", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.1", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "db68c173234b07ab2a07f645a5acdc117b9f99d69ebf521821d89690ae6c6ec8"}, 19 | "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, 20 | "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, 21 | "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, 22 | "nimble_parsec": {:hex, :nimble_parsec, "1.1.0", "3a6fca1550363552e54c216debb6a9e95bd8d32348938e13de5eda962c0d7f89", [:mix], [], "hexpm", "08eb32d66b706e913ff748f11694b17981c0b04a33ef470e33e11b3d3ac8f54b"}, 23 | "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, 24 | "poison": {:hex, :poison, "5.0.0", "d2b54589ab4157bbb82ec2050757779bfed724463a544b6e20d79855a9e43b24", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "11dc6117c501b80c62a7594f941d043982a1bd05a1184280c0d9166eb4d8d3fc"}, 25 | "ranch": {:hex, :ranch, "1.7.0", "9583f47160ca62af7f8d5db11454068eaa32b56eeadf984d4f46e61a076df5f2", [:rebar3], [], "hexpm", "59f7501c3a56125b2fc5684c3048fac9d043c0bf4d173941b12ca927949af189"}, 26 | "redix": {:hex, :redix, "0.10.2", "a9eabf47898aa878650df36194aeb63966d74f5bd69d9caa37babb32dbb93c5d", [:mix], [{:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "251b329893b8f6bb115fc0e30df7f12ee641b1e4547d760cf0909416a209f9bd"}, 27 | "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.6", "cf344f5692c82d2cd7554f5ec8fd961548d4fd09e7d22f5b62482e5aeaebd4b0", [:make, :mix, :rebar3], [], "hexpm", "bdb0d2471f453c88ff3908e7686f86f9be327d065cc1ec16fa4540197ea04680"}, 28 | "telemetry": {:hex, :telemetry, "0.4.2", "2808c992455e08d6177322f14d3bdb6b625fbcfd233a73505870d8738a2f4599", [:rebar3], [], "hexpm", "2d1419bd9dda6a206d7b5852179511722e2b18812310d304620c7bd92a13fcef"}, 29 | "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, 30 | } 31 | -------------------------------------------------------------------------------- /test/exq/heartbeat/monitor_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Exq.Heartbeat.MonitorTest do 2 | use ExUnit.Case 3 | import ExqTestUtil 4 | alias Exq.Support.Config 5 | alias Exq.Redis.Heartbeat 6 | 7 | @opts [ 8 | redis: :testredis, 9 | heartbeat_enable: true, 10 | heartbeat_interval: 200, 11 | missed_heartbeats_allowed: 3, 12 | queues: ["default"], 13 | namespace: "test", 14 | name: ExqHeartbeat, 15 | stats: ExqHeartbeat.Stats 16 | ] 17 | 18 | setup do 19 | TestRedis.setup() 20 | 21 | on_exit(fn -> 22 | wait() 23 | TestRedis.teardown() 24 | end) 25 | end 26 | 27 | test "re-enqueues orphaned jobs from dead node's backup queue" do 28 | {:ok, _} = Exq.Stats.Server.start_link(@opts) 29 | redis = :testredis 30 | 31 | servers = 32 | for i <- 1..5 do 33 | {:ok, heartbeat} = 34 | Exq.Heartbeat.Server.start_link(Keyword.put(@opts, :node_id, to_string(i))) 35 | 36 | {:ok, monitor} = 37 | Exq.Heartbeat.Monitor.start_link(Keyword.put(@opts, :node_id, to_string(i))) 38 | 39 | %{heartbeat: heartbeat, monitor: monitor} 40 | end 41 | 42 | assert {:ok, 1} = working(redis, "3") 43 | Process.sleep(1000) 44 | assert alive_nodes(redis) == ["1", "2", "3", "4", "5"] 45 | assert queue_length(redis, "3") == {:ok, 1} 46 | server = Enum.at(servers, 2) 47 | :ok = GenServer.stop(server.heartbeat) 48 | Process.sleep(2000) 49 | 50 | assert alive_nodes(redis) == ["1", "2", "4", "5"] 51 | assert queue_length(redis, "3") == {:ok, 0} 52 | end 53 | 54 | test "re-enqueues more than 10 orphaned jobs from dead node's backup queue" do 55 | {:ok, _} = Exq.Stats.Server.start_link(@opts) 56 | redis = :testredis 57 | 58 | servers = 59 | for i <- 1..5 do 60 | {:ok, heartbeat} = 61 | Exq.Heartbeat.Server.start_link(Keyword.put(@opts, :node_id, to_string(i))) 62 | 63 | {:ok, monitor} = 64 | Exq.Heartbeat.Monitor.start_link(Keyword.put(@opts, :node_id, to_string(i))) 65 | 66 | %{heartbeat: heartbeat, monitor: monitor} 67 | end 68 | 69 | for i <- 1..15 do 70 | assert {:ok, ^i} = working(redis, "3") 71 | end 72 | 73 | Process.sleep(1000) 74 | assert alive_nodes(redis) == ["1", "2", "3", "4", "5"] 75 | assert queue_length(redis, "3") == {:ok, 15} 76 | server = Enum.at(servers, 2) 77 | :ok = GenServer.stop(server.heartbeat) 78 | Process.sleep(2000) 79 | 80 | assert alive_nodes(redis) == ["1", "2", "4", "5"] 81 | assert queue_length(redis, "3") == {:ok, 0} 82 | end 83 | 84 | test "can handle connection failure" do 85 | with_application_env(:exq, :redis_timeout, 500, fn -> 86 | {:ok, _} = Exq.Stats.Server.start_link(@opts) 87 | redis = :testredis 88 | 89 | assert alive_nodes(redis) == [] 90 | {:ok, _} = Exq.Heartbeat.Server.start_link(Keyword.put(@opts, :node_id, "1")) 91 | {:ok, _} = Exq.Heartbeat.Monitor.start_link(Keyword.put(@opts, :node_id, "1")) 92 | 93 | spawn(fn -> 94 | Redix.command(:testredis, ["DEBUG", "SLEEP", "2"]) 95 | end) 96 | 97 | Process.sleep(2000) 98 | Redix.command(:testredis, ["FLUSHALL"]) 99 | Process.sleep(1000) 100 | assert alive_nodes(redis) == ["1"] 101 | end) 102 | end 103 | 104 | test "shouldn't dequeue from live node" do 105 | redis = :testredis 106 | namespace = Config.get(:namespace) 107 | interval = 100 108 | missed_heartbeats_allowed = 3 109 | Heartbeat.register(redis, namespace, "1") 110 | assert {:ok, 1} = working(redis, "1") 111 | Process.sleep(1000) 112 | 113 | {:ok, %{"1" => score}} = 114 | Heartbeat.dead_nodes( 115 | redis, 116 | namespace, 117 | interval, 118 | missed_heartbeats_allowed 119 | ) 120 | 121 | assert queue_length(redis, "1") == {:ok, 1} 122 | Heartbeat.re_enqueue_backup(redis, namespace, "1", "default", score) 123 | assert queue_length(redis, "1") == {:ok, 0} 124 | 125 | Heartbeat.register(redis, namespace, "1") 126 | assert {:ok, 1} = working(redis, "1") 127 | Process.sleep(1000) 128 | 129 | {:ok, %{"1" => score}} = 130 | Heartbeat.dead_nodes( 131 | redis, 132 | namespace, 133 | interval, 134 | missed_heartbeats_allowed 135 | ) 136 | 137 | # The node came back after we got the dead node list, but before we could re-enqueue 138 | Heartbeat.register(redis, namespace, "1") 139 | Heartbeat.re_enqueue_backup(redis, namespace, "1", "default", score) 140 | assert queue_length(redis, "1") == {:ok, 1} 141 | 142 | Process.sleep(1000) 143 | 144 | {:ok, %{"1" => score}} = 145 | Heartbeat.dead_nodes( 146 | redis, 147 | namespace, 148 | interval, 149 | missed_heartbeats_allowed 150 | ) 151 | 152 | # The node got removed by another heartbeat monitor 153 | Heartbeat.unregister(redis, namespace, "1") 154 | Heartbeat.re_enqueue_backup(redis, namespace, "1", "default", score) 155 | assert queue_length(redis, "1") == {:ok, 1} 156 | end 157 | 158 | defp alive_nodes(redis) do 159 | {:ok, nodes} = 160 | Redix.command(redis, ["ZRANGEBYSCORE", "#{Config.get(:namespace)}:heartbeats", "0", "+inf"]) 161 | 162 | Enum.sort(nodes) 163 | end 164 | 165 | defp working(redis, node_id) do 166 | Redix.command(redis, [ 167 | "LPUSH", 168 | Exq.Redis.JobQueue.backup_queue_key(Config.get(:namespace), node_id, "default"), 169 | "{}" 170 | ]) 171 | end 172 | 173 | defp queue_length(redis, node_id) do 174 | Redix.command(redis, [ 175 | "LLEN", 176 | Exq.Redis.JobQueue.backup_queue_key(Config.get(:namespace), node_id, "default") 177 | ]) 178 | end 179 | end 180 | -------------------------------------------------------------------------------- /test/failure_scenarios_test.exs: -------------------------------------------------------------------------------- 1 | defmodule FailureScenariosTest do 2 | use ExUnit.Case, async: false 3 | import ExqTestUtil 4 | 5 | @moduletag :failure_scenarios 6 | 7 | defmodule PerformWorker do 8 | def perform do 9 | send(:exqtest, {:worked}) 10 | end 11 | end 12 | 13 | defmodule SleepWorker do 14 | def perform do 15 | send(:exqtest, {:worked}) 16 | Process.register(self(), :sleep_worker) 17 | :timer.sleep(:infinity) 18 | end 19 | end 20 | 21 | setup do 22 | TestRedis.setup() 23 | Application.start(:ranch) 24 | 25 | on_exit(fn -> 26 | wait() 27 | TestRedis.teardown() 28 | end) 29 | 30 | :ok 31 | end 32 | 33 | test "handle Redis connection lost on manager" do 34 | conn = FlakyConnection.start(String.to_charlist(redis_host()), redis_port()) 35 | 36 | {:ok, _} = Exq.start_link(port: conn.port) 37 | 38 | wait_long() 39 | # Stop Redis and wait for a bit 40 | FlakyConnection.stop(conn) 41 | # Not ideal - but seems to be min time for manager to die past supervision 42 | :timer.sleep(5100) 43 | 44 | # Restart Flakey connection manually, things should be back to normal 45 | {:ok, agent} = Agent.start_link(fn -> [] end) 46 | 47 | {:ok, _} = 48 | :ranch.start_listener( 49 | conn.ref, 50 | 100, 51 | :ranch_tcp, 52 | [port: conn.port], 53 | FlakyConnectionHandler, 54 | [~c"127.0.0.1", redis_port(), agent] 55 | ) 56 | 57 | :timer.sleep(2000) 58 | 59 | assert_exq_up(Exq) 60 | Exq.stop(Exq) 61 | end 62 | 63 | test "handle Redis connection lost on enqueue" do 64 | conn = FlakyConnection.start(String.to_charlist(redis_host()), redis_port()) 65 | 66 | # Start Exq but don't listen to any queues 67 | {:ok, _} = Exq.start_link(port: conn.port) 68 | 69 | wait_long() 70 | # Stop Redis 71 | FlakyConnection.stop(conn) 72 | wait_long() 73 | 74 | # enqueue with redis stopped 75 | enq_result = Exq.enqueue(Exq, "default", "FakeWorker", []) 76 | assert enq_result == {:error, %Redix.ConnectionError{reason: :closed}} 77 | 78 | enq_result = Exq.enqueue_at(Exq, "default", DateTime.utc_now(), ExqTest.PerformWorker, []) 79 | assert enq_result == {:error, %Redix.ConnectionError{reason: :closed}} 80 | 81 | # Starting Redis again and things should be back to normal 82 | wait_long() 83 | 84 | # Restart Flakey connection manually 85 | {:ok, agent} = Agent.start_link(fn -> [] end) 86 | 87 | {:ok, _} = 88 | :ranch.start_listener( 89 | conn.ref, 90 | 100, 91 | :ranch_tcp, 92 | [port: conn.port], 93 | FlakyConnectionHandler, 94 | [~c"127.0.0.1", redis_port(), agent] 95 | ) 96 | 97 | :timer.sleep(2000) 98 | 99 | assert_exq_up(Exq) 100 | Exq.stop(Exq) 101 | end 102 | 103 | test "handle supervisor tree shutdown properly" do 104 | {:ok, sup} = Exq.start_link() 105 | 106 | assert Process.alive?(sup) == true 107 | 108 | # Create worker that sleeps infinitely with registered process 109 | {:ok, _jid} = Exq.enqueue(Exq, "default", FailureScenariosTest.SleepWorker, []) 110 | 111 | Process.register(self(), :exqtest) 112 | 113 | # wait until worker started 114 | assert_receive {:worked}, 500 115 | 116 | stop_process(sup) 117 | 118 | # Takes 5500 for worker to stop 119 | :timer.sleep(5500) 120 | 121 | # Make sure everything is shut down properly 122 | assert Process.alive?(sup) == false 123 | assert Process.whereis(Exq.Manager.Server) == nil 124 | assert Process.whereis(Exq.Stats.Server) == nil 125 | assert Process.whereis(Exq.Scheduler.Server) == nil 126 | assert Process.whereis(:sleep_worker) == nil 127 | end 128 | end 129 | -------------------------------------------------------------------------------- /test/fake_mode_test.exs: -------------------------------------------------------------------------------- 1 | defmodule FakeModeTest do 2 | use ExUnit.Case, async: true 3 | alias Exq.Support.Time 4 | 5 | defmodule BrokenWorker do 6 | def perform(_) do 7 | raise RuntimeError, "Unexpected" 8 | end 9 | end 10 | 11 | setup do 12 | Exq.Mock.set_mode(:fake) 13 | end 14 | 15 | describe "fake mode" do 16 | test "enqueue" do 17 | scheduled_at = DateTime.utc_now() 18 | assert [] = Exq.Mock.jobs() 19 | assert {:ok, _} = Exq.enqueue(Exq, "low", BrokenWorker, [1]) 20 | assert {:ok, _} = Exq.enqueue_at(Exq, "low", scheduled_at, BrokenWorker, [2]) 21 | assert {:ok, _} = Exq.enqueue_in(Exq, "low", 300, BrokenWorker, [3]) 22 | 23 | assert [ 24 | %Exq.Support.Job{ 25 | args: [1], 26 | class: FakeModeTest.BrokenWorker, 27 | queue: "low" 28 | }, 29 | %Exq.Support.Job{ 30 | args: [2], 31 | class: FakeModeTest.BrokenWorker, 32 | queue: "low", 33 | enqueued_at: ^scheduled_at 34 | }, 35 | %Exq.Support.Job{ 36 | args: [3], 37 | class: FakeModeTest.BrokenWorker, 38 | queue: "low", 39 | enqueued_at: scheduled_in 40 | } 41 | ] = Exq.Mock.jobs() 42 | 43 | scheduled_seconds = Time.unix_seconds(scheduled_in) 44 | current_seconds = Time.unix_seconds(DateTime.utc_now()) 45 | 46 | assert current_seconds + 290 < scheduled_seconds 47 | assert current_seconds + 310 > scheduled_seconds 48 | end 49 | 50 | test "enqueue_all" do 51 | scheduled_at = DateTime.utc_now() 52 | assert [] = Exq.Mock.jobs() 53 | 54 | assert {:ok, [{:ok, _}, {:ok, _}, {:ok, _}]} = 55 | Exq.enqueue_all(Exq, [ 56 | ["low", BrokenWorker, [1], []], 57 | ["low", BrokenWorker, [2], [schedule: {:at, scheduled_at}]], 58 | ["low", BrokenWorker, [3], [schedule: {:in, 300}]] 59 | ]) 60 | 61 | assert [ 62 | %Exq.Support.Job{ 63 | args: [1], 64 | class: FakeModeTest.BrokenWorker, 65 | queue: "low" 66 | }, 67 | %Exq.Support.Job{ 68 | args: [2], 69 | class: FakeModeTest.BrokenWorker, 70 | queue: "low", 71 | enqueued_at: ^scheduled_at 72 | }, 73 | %Exq.Support.Job{ 74 | args: [3], 75 | class: FakeModeTest.BrokenWorker, 76 | queue: "low", 77 | enqueued_at: scheduled_in 78 | } 79 | ] = Exq.Mock.jobs() 80 | 81 | scheduled_seconds = Time.unix_seconds(scheduled_in) 82 | current_seconds = Time.unix_seconds(DateTime.utc_now()) 83 | 84 | assert current_seconds + 290 < scheduled_seconds 85 | assert current_seconds + 310 > scheduled_seconds 86 | end 87 | 88 | test "with predetermined job ID" do 89 | jid = UUID.uuid4() 90 | 91 | assert [] = Exq.Mock.jobs() 92 | assert {:ok, jid} == Exq.enqueue(Exq, "low", BrokenWorker, [], jid: jid) 93 | 94 | assert [ 95 | %Exq.Support.Job{ 96 | args: [], 97 | class: FakeModeTest.BrokenWorker, 98 | queue: "low", 99 | jid: ^jid 100 | } 101 | ] = Exq.Mock.jobs() 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /test/flaky_connection_test.exs: -------------------------------------------------------------------------------- 1 | defmodule FlakyConnectionTest do 2 | use ExUnit.Case 3 | require Logger 4 | import ExqTestUtil 5 | 6 | @moduletag :failure_scenarios 7 | 8 | setup do 9 | TestRedis.setup() 10 | 11 | on_exit(fn -> 12 | TestRedis.teardown() 13 | end) 14 | 15 | :ok 16 | end 17 | 18 | defmodule Worker do 19 | def perform(_) do 20 | send(:tester, :done) 21 | end 22 | end 23 | 24 | test "redis_timeout allows for higher latency" do 25 | Application.start(:ranch) 26 | conn = FlakyConnection.start(redis_host(), redis_port()) 27 | 28 | # Needs to be x2 latency + ~10 29 | Application.put_all_env([exq: [redis_timeout: 2010]], persistent: true) 30 | 31 | Process.register(self(), :tester) 32 | {:ok, sup} = Exq.start_link(name: ExqPerf, port: conn.port) 33 | 34 | FlakyConnection.set_latency(conn, 1000) 35 | 36 | {:ok, _} = Exq.enqueue(ExqPerf.Enqueuer, "default", FlakyConnectionTest.Worker, ["work"]) 37 | 38 | stop_process(sup) 39 | end 40 | 41 | test "redis_timeout higher than 5000 without genserver_timeout" do 42 | Application.start(:ranch) 43 | conn = FlakyConnection.start(redis_host(), redis_port()) 44 | 45 | # Needs to be x2 latency + ~10 46 | Application.put_all_env([exq: [redis_timeout: 11010]], persistent: true) 47 | 48 | Process.register(self(), :tester) 49 | 50 | {:ok, sup} = Exq.start_link(port: conn.port) 51 | 52 | FlakyConnection.set_latency(conn, 5500) 53 | 54 | result = 55 | try do 56 | {:ok, _} = Exq.enqueue(Exq.Enqueuer, "default", FlakyConnectionTest.Worker, ["work"]) 57 | catch 58 | :exit, {:timeout, _} -> :failed 59 | end 60 | 61 | assert result == :failed 62 | 63 | stop_process(sup) 64 | end 65 | 66 | test "redis_timeout higher than 5000 with genserver_timeout" do 67 | Application.start(:ranch) 68 | conn = FlakyConnection.start(redis_host(), redis_port()) 69 | 70 | # redis_timeout needs to be x2 latency + ~10 71 | # genserver_timeout needs to be x2 latency + ~30 72 | Application.put_all_env( 73 | [exq: [redis_timeout: 11010, genserver_timeout: 11030]], 74 | persistent: true 75 | ) 76 | 77 | Process.register(self(), :tester) 78 | 79 | {:ok, sup} = Exq.start_link(port: conn.port) 80 | 81 | FlakyConnection.set_latency(conn, 5500) 82 | 83 | {:ok, _} = Exq.enqueue(Exq.Enqueuer, "default", FlakyConnectionTest.Worker, ["work"]) 84 | 85 | stop_process(sup) 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /test/inline_mode_test.exs: -------------------------------------------------------------------------------- 1 | defmodule InlineModeTest do 2 | use ExUnit.Case, async: true 3 | 4 | defmodule EchoWorker do 5 | def perform(value), do: value 6 | end 7 | 8 | setup do 9 | Exq.Mock.set_mode(:inline) 10 | end 11 | 12 | describe "inline mode" do 13 | test "enqueue should return the correct value" do 14 | assert {:ok, _} = Exq.enqueue(Exq, "low", EchoWorker, [1]) 15 | assert {:ok, _} = Exq.enqueue(Exq, "low", "InlineModeTest.EchoWorker", [1]) 16 | end 17 | 18 | test "enqueue_at should return the correct value" do 19 | assert {:ok, _} = Exq.enqueue_at(Exq, "low", DateTime.utc_now(), EchoWorker, [1]) 20 | end 21 | 22 | test "enqueue_in should return the correct value" do 23 | assert {:ok, _} = Exq.enqueue_in(Exq, "low", 300, EchoWorker, [1]) 24 | end 25 | 26 | test "enqueue_all should return the correct value" do 27 | assert {:ok, [{:ok, _}, {:ok, _}, {:ok, _}]} = 28 | Exq.enqueue_all(Exq, [ 29 | ["low", EchoWorker, [1], [schedule: {:in, 300}]], 30 | ["low", EchoWorker, [1], [schedule: {:at, DateTime.utc_now()}]], 31 | ["low", EchoWorker, [1], []] 32 | ]) 33 | end 34 | 35 | test "enqueue should use the provided job ID, if any" do 36 | jid = UUID.uuid4() 37 | assert {:ok, jid} == Exq.enqueue(Exq, "low", EchoWorker, [1], jid: jid) 38 | end 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /test/job_queue_test.exs: -------------------------------------------------------------------------------- 1 | defmodule JobQueueTest do 2 | use ExUnit.Case 3 | alias Exq.Redis.JobQueue 4 | alias Exq.Support.Job 5 | alias Exq.Support.Time 6 | 7 | import ExqTestUtil 8 | 9 | @host ~c"host-name" 10 | 11 | setup do 12 | TestRedis.setup() 13 | 14 | on_exit(fn -> 15 | TestRedis.teardown() 16 | end) 17 | end 18 | 19 | def assert_dequeue_job(queues, expected_result) do 20 | jobs = JobQueue.dequeue(:testredis, "test", @host, queues) 21 | result = jobs |> Enum.reject(fn {:ok, {status, _}} -> status == :none end) 22 | 23 | cond do 24 | is_boolean(expected_result) -> 25 | assert expected_result == !Enum.empty?(result) 26 | 27 | is_integer(expected_result) -> 28 | assert expected_result == Enum.count(result) 29 | 30 | is_map(expected_result) -> 31 | [{:ok, {job_string, _queue}}] = result 32 | job = Jason.decode!(job_string) 33 | assert expected_result == Map.take(job, Map.keys(expected_result)) 34 | end 35 | end 36 | 37 | test "enqueue/dequeue single queue" do 38 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 39 | [{:ok, {deq, _}}] = JobQueue.dequeue(:testredis, "test", @host, ["default"]) 40 | assert deq != :none 41 | [{:ok, {deq, _}}] = JobQueue.dequeue(:testredis, "test", @host, ["default"]) 42 | assert deq == :none 43 | end 44 | 45 | test "enqueue/dequeue multi queue" do 46 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 47 | JobQueue.enqueue(:testredis, "test", "myqueue", MyWorker, [], []) 48 | assert_dequeue_job(["default", "myqueue"], 2) 49 | assert_dequeue_job(["default", "myqueue"], false) 50 | end 51 | 52 | test "backup queue" do 53 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [1], []) 54 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [2], []) 55 | assert_dequeue_job(["default"], %{"args" => [1]}) 56 | assert_dequeue_job(["default"], %{"args" => [2]}) 57 | assert_dequeue_job(["default"], false) 58 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [3], []) 59 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [4], []) 60 | JobQueue.re_enqueue_backup(:testredis, "test", @host, "default") 61 | assert_dequeue_job(["default"], %{"args" => [1]}) 62 | assert_dequeue_job(["default"], %{"args" => [2]}) 63 | assert_dequeue_job(["default"], %{"args" => [3]}) 64 | assert_dequeue_job(["default"], %{"args" => [4]}) 65 | assert_dequeue_job(["default"], false) 66 | end 67 | 68 | test "backup queue re enqueues all jobs" do 69 | for i <- 1..15 do 70 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [i], []) 71 | assert_dequeue_job(["default"], %{"args" => [i]}) 72 | end 73 | 74 | for i <- 16..30 do 75 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [i], []) 76 | end 77 | 78 | JobQueue.re_enqueue_backup(:testredis, "test", @host, "default") 79 | 80 | for i <- 1..30 do 81 | assert_dequeue_job(["default"], %{"args" => [i]}) 82 | end 83 | 84 | assert_dequeue_job(["default"], false) 85 | end 86 | 87 | test "remove from backup queue" do 88 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 89 | JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 90 | 91 | [{:ok, {job, _}}] = JobQueue.dequeue(:testredis, "test", @host, ["default"]) 92 | assert_dequeue_job(["default"], true) 93 | 94 | # remove job from queue 95 | JobQueue.remove_job_from_backup(:testredis, "test", @host, "default", job) 96 | 97 | # should only have 1 job now 98 | JobQueue.re_enqueue_backup(:testredis, "test", @host, "default") 99 | 100 | assert_dequeue_job(["default"], true) 101 | assert_dequeue_job(["default"], false) 102 | end 103 | 104 | test "scheduler_dequeue single queue" do 105 | JobQueue.enqueue_in(:testredis, "test", "default", 0, MyWorker, [], []) 106 | JobQueue.enqueue_in(:testredis, "test", "default", 0, MyWorker, [], []) 107 | assert JobQueue.scheduler_dequeue(:testredis, "test") == 2 108 | assert_dequeue_job(["default"], true) 109 | assert_dequeue_job(["default"], true) 110 | assert_dequeue_job(["default"], false) 111 | end 112 | 113 | test "scheduler_dequeue multi queue" do 114 | JobQueue.enqueue_in(:testredis, "test", "default", -1, MyWorker, [], []) 115 | JobQueue.enqueue_in(:testredis, "test", "myqueue", -1, MyWorker, [], []) 116 | assert JobQueue.scheduler_dequeue(:testredis, "test") == 2 117 | assert_dequeue_job(["default", "myqueue"], 2) 118 | assert_dequeue_job(["default", "myqueue"], false) 119 | end 120 | 121 | test "scheduler_dequeue enqueue_at" do 122 | JobQueue.enqueue_at(:testredis, "test", "default", DateTime.utc_now(), MyWorker, [], []) 123 | {jid, job, job_serialized} = JobQueue.to_job_serialized("retry", MyWorker, [], retry: true) 124 | 125 | JobQueue.do_enqueue_job_at( 126 | :testredis, 127 | "test", 128 | job, 129 | job_serialized, 130 | jid, 131 | DateTime.utc_now(), 132 | "test:retry" 133 | ) 134 | 135 | assert JobQueue.scheduler_dequeue(:testredis, "test") == 2 136 | assert_dequeue_job(["default"], true) 137 | assert_dequeue_job(["default"], false) 138 | 139 | assert_dequeue_job(["retry"], true) 140 | assert_dequeue_job(["retry"], false) 141 | end 142 | 143 | test "retry job" do 144 | with_application_env(:exq, :max_retries, 1, fn -> 145 | JobQueue.retry_or_fail_job( 146 | :testredis, 147 | "test", 148 | %{ 149 | retry_count: 0, 150 | retry: true, 151 | queue: "default", 152 | class: "MyWorker", 153 | jid: UUID.uuid4(), 154 | error_class: nil, 155 | error_message: "failed", 156 | retried_at: Time.unix_seconds(), 157 | failed_at: Time.unix_seconds(), 158 | enqueued_at: Time.unix_seconds(), 159 | finished_at: nil, 160 | processor: nil, 161 | args: [], 162 | unique_for: nil, 163 | unique_until: nil, 164 | unique_token: nil, 165 | unlocks_at: nil 166 | }, 167 | %RuntimeError{} 168 | ) 169 | 170 | assert JobQueue.queue_size(:testredis, "test", :retry) == 1 171 | end) 172 | end 173 | 174 | test "scheduler_dequeue max_score" do 175 | add_usecs = fn time, offset -> 176 | base = time |> DateTime.to_unix(:microsecond) 177 | DateTime.from_unix!(base + offset, :microsecond) 178 | end 179 | 180 | JobQueue.enqueue_in(:testredis, "test", "default", 300, MyWorker, [], []) 181 | now = DateTime.utc_now() 182 | time1 = add_usecs.(now, 140_000_000) 183 | JobQueue.enqueue_at(:testredis, "test", "default", time1, MyWorker, [], []) 184 | time2 = add_usecs.(now, 150_000_000) 185 | JobQueue.enqueue_at(:testredis, "test", "default", time2, MyWorker, [], []) 186 | time2a = add_usecs.(now, 151_000_000) 187 | time2b = add_usecs.(now, 159_000_000) 188 | time3 = add_usecs.(now, 160_000_000) 189 | JobQueue.enqueue_at(:testredis, "test", "default", time3, MyWorker, [], []) 190 | time4 = add_usecs.(now, 160_000_001) 191 | JobQueue.enqueue_at(:testredis, "test", "default", time4, MyWorker, [], []) 192 | time5 = add_usecs.(now, 300_000_000) 193 | 194 | api_state = %Exq.Api.Server.State{redis: :testredis, namespace: "test"} 195 | assert JobQueue.queue_size(api_state.redis, api_state.namespace, "default") == 0 196 | assert JobQueue.queue_size(api_state.redis, api_state.namespace, :scheduled) == 5 197 | 198 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time2a)) == 2 199 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time2b)) == 0 200 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time3)) == 1 201 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time3)) == 0 202 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time4)) == 1 203 | assert JobQueue.scheduler_dequeue(:testredis, "test", Time.time_to_score(time5)) == 1 204 | 205 | assert JobQueue.queue_size(api_state.redis, api_state.namespace, "default") == 5 206 | assert JobQueue.queue_size(api_state.redis, api_state.namespace, :scheduled) == 0 207 | 208 | assert_dequeue_job(["default"], true) 209 | assert_dequeue_job(["default"], true) 210 | assert_dequeue_job(["default"], true) 211 | assert_dequeue_job(["default"], true) 212 | assert_dequeue_job(["default"], true) 213 | assert_dequeue_job(["default"], false) 214 | end 215 | 216 | test "scheduler_dequeue dequeues more than 10 jobs " do 217 | now = DateTime.utc_now() 218 | 219 | for _ <- 1..15 do 220 | JobQueue.enqueue_at(:testredis, "test", "default", now, MyWorker, [], []) 221 | end 222 | 223 | assert JobQueue.scheduler_dequeue(:testredis, "test") == 15 224 | 225 | for _ <- 1..15 do 226 | assert_dequeue_job(["default"], true) 227 | end 228 | 229 | assert_dequeue_job(["default"], false) 230 | end 231 | 232 | test "full_key" do 233 | assert JobQueue.full_key("exq", "k1") == "exq:k1" 234 | assert JobQueue.full_key("", "k1") == "k1" 235 | assert JobQueue.full_key(nil, "k1") == "k1" 236 | end 237 | 238 | test "creates and returns a jid" do 239 | {:ok, jid} = JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 240 | assert jid != nil 241 | 242 | [{:ok, {job_str, _}}] = JobQueue.dequeue(:testredis, "test", @host, ["default"]) 243 | expected_max_retries = Exq.Support.Config.get(:max_retries) 244 | assert %{"jid" => ^jid, "retry" => ^expected_max_retries} = Jason.decode!(job_str) 245 | end 246 | 247 | test "to_job_serialized using module atom" do 248 | {_jid, _job, serialized} = JobQueue.to_job_serialized("default", MyWorker, [], max_retries: 0) 249 | job = Job.decode(serialized) 250 | assert job.class == "MyWorker" 251 | assert job.retry == 0 252 | end 253 | 254 | test "to_job_serialized using module string" do 255 | {_jid, _job, serialized} = 256 | JobQueue.to_job_serialized("default", "MyWorker/perform", [], max_retries: 10) 257 | 258 | job = Job.decode(serialized) 259 | assert job.class == "MyWorker/perform" 260 | assert job.retry == 10 261 | end 262 | 263 | test "to_job_serialized using existing job ID" do 264 | jid = UUID.uuid4() 265 | {^jid, _job, serialized} = JobQueue.to_job_serialized("default", MyWorker, [], jid: jid) 266 | 267 | job = Job.decode(serialized) 268 | assert job.jid == jid 269 | end 270 | 271 | test "max_retries from runtime environment" do 272 | System.put_env("EXQ_MAX_RETRIES", "3") 273 | 274 | Application.put_all_env([exq: [max_retries: {:system, "EXQ_MAX_RETRIES"}]], persistent: true) 275 | 276 | {:ok, jid} = JobQueue.enqueue(:testredis, "test", "default", MyWorker, [], []) 277 | assert jid != nil 278 | 279 | [{:ok, {job_str, _}}] = JobQueue.dequeue(:testredis, "test", @host, ["default"]) 280 | assert %{"jid" => ^jid, "retry" => 3} = Jason.decode!(job_str) 281 | end 282 | end 283 | -------------------------------------------------------------------------------- /test/job_stat_test.exs: -------------------------------------------------------------------------------- 1 | defmodule JobStatTest do 2 | use ExUnit.Case 3 | 4 | alias Exq.Redis.JobStat 5 | alias Exq.Redis.Connection 6 | alias Exq.Redis.JobQueue 7 | alias Exq.Support.Process 8 | alias Exq.Support.Job 9 | alias Exq.Support.Time 10 | alias Exq.Support.Node 11 | 12 | defmodule EmptyMethodWorker do 13 | def perform do 14 | {:ok, "test"} 15 | end 16 | end 17 | 18 | def dead_jobs_count(redis) do 19 | {:ok, count} = Connection.q(redis, ["ZCOUNT", "test:dead", "-inf", "+inf"]) 20 | count 21 | end 22 | 23 | def enqueue_and_fail_job(redis) do 24 | Connection.incr!(redis, "test:stat:failed") 25 | {:ok, jid} = Exq.enqueue(Exq, "queue", EmptyMethodWorker, []) 26 | {:ok, _job} = JobQueue.find_job(redis, "test", jid, "queue") 27 | JobQueue.fail_job(redis, "test", %Exq.Support.Job{jid: jid}, "forced error") 28 | 29 | {:ok, jid} 30 | end 31 | 32 | def create_process_info(host) do 33 | process_info = %Process{ 34 | pid: inspect(self()), 35 | host: host, 36 | payload: %Job{}, 37 | run_at: Time.unix_seconds() 38 | } 39 | 40 | serialized = Exq.Support.Process.encode(process_info) 41 | {process_info, serialized} 42 | end 43 | 44 | setup do 45 | TestRedis.setup() 46 | on_exit(fn -> TestRedis.teardown() end) 47 | Exq.start_link() 48 | 49 | :ok 50 | end 51 | 52 | test "show realtime statistics" do 53 | {:ok, time1} = DateTime.from_unix(1_452_173_400_000, :millisecond) 54 | {:ok, time2} = DateTime.from_unix(1_452_175_515_000, :millisecond) 55 | 56 | JobStat.record_processed(:testredis, "test", nil, time1) 57 | JobStat.record_processed(:testredis, "test", nil, time2) 58 | JobStat.record_processed(:testredis, "test", nil, time1) 59 | JobStat.record_failure(:testredis, "test", nil, nil, time1) 60 | JobStat.record_failure(:testredis, "test", nil, nil, time2) 61 | 62 | Exq.start_link(mode: :api, name: ExqApi) 63 | {:ok, failures, successes} = Exq.Api.realtime_stats(ExqApi.Api) 64 | 65 | assert List.keysort(failures, 0) == [ 66 | {"2016-01-07 13:30:00Z", "1"}, 67 | {"2016-01-07 14:05:15Z", "1"} 68 | ] 69 | 70 | assert List.keysort(successes, 0) == [ 71 | {"2016-01-07 13:30:00Z", "2"}, 72 | {"2016-01-07 14:05:15Z", "1"} 73 | ] 74 | end 75 | 76 | test "show realtime statistics with no data" do 77 | Exq.start_link(mode: :api, name: ExqApi) 78 | 79 | {:ok, failures, successes} = Exq.Api.realtime_stats(ExqApi.Api) 80 | 81 | assert List.keysort(failures, 0) == [] 82 | assert List.keysort(successes, 0) == [] 83 | end 84 | 85 | test "remove queue" do 86 | Exq.enqueue(Exq, "test_queue", EmptyMethodWorker, []) 87 | assert Connection.smembers!(:testredis, "test:queues") == ["test_queue"] 88 | assert Connection.llen!(:testredis, "test:queue:test_queue") == 1 89 | 90 | JobStat.remove_queue(:testredis, "test", "test_queue") 91 | assert Connection.smembers!(:testredis, "test:queues") == [] 92 | assert Connection.llen!(:testredis, "test:queue:test_queue") == 0 93 | end 94 | 95 | test "remove failed" do 96 | {:ok, jid} = enqueue_and_fail_job(:testredis) 97 | assert dead_jobs_count(:testredis) == 1 98 | 99 | JobStat.remove_failed(:testredis, "test", jid) 100 | assert dead_jobs_count(:testredis) == 0 101 | assert Connection.get!(:testredis, "test:stat:failed") == "0" 102 | end 103 | 104 | test "prune dead nodes" do 105 | namespace = "test" 106 | JobStat.node_ping(:testredis, namespace, %Node{identity: "host123", busy: 1}) 107 | JobStat.node_ping(:testredis, namespace, %Node{identity: "host456", busy: 1}) 108 | 109 | {process_info, serialized} = create_process_info("host456") 110 | JobStat.add_process(:testredis, namespace, process_info, serialized) 111 | assert Enum.count(Exq.Redis.JobStat.processes(:testredis, namespace)) == 1 112 | 113 | JobStat.prune_dead_nodes(:testredis, namespace) 114 | assert ["host123", "host456"] == JobStat.node_ids(:testredis, namespace) |> Enum.sort() 115 | Connection.del!(:testredis, "test:host456") 116 | assert ["host123", "host456"] == JobStat.node_ids(:testredis, namespace) |> Enum.sort() 117 | JobStat.prune_dead_nodes(:testredis, namespace) 118 | assert ["host123"] == JobStat.node_ids(:testredis, namespace) 119 | assert Enum.count(Exq.Redis.JobStat.processes(:testredis, namespace)) == 0 120 | end 121 | 122 | test "clear failed" do 123 | Enum.each([1, 2, 3], fn _ -> enqueue_and_fail_job(:testredis) end) 124 | assert dead_jobs_count(:testredis) == 3 125 | 126 | JobStat.clear_failed(:testredis, "test") 127 | assert dead_jobs_count(:testredis) == 0 128 | assert Connection.get!(:testredis, "test:stat:failed") == "0" 129 | end 130 | 131 | test "add and remove process" do 132 | namespace = "test" 133 | JobStat.node_ping(:testredis, "test", %Node{identity: "host123", busy: 1}) 134 | {process_info, serialized} = create_process_info("host123") 135 | JobStat.add_process(:testredis, namespace, process_info, serialized) 136 | assert Enum.count(Exq.Redis.JobStat.processes(:testredis, namespace)) == 1 137 | 138 | JobStat.remove_process(:testredis, namespace, process_info) 139 | assert Enum.count(Exq.Redis.JobStat.processes(:testredis, namespace)) == 0 140 | end 141 | 142 | test "remove processes on boot" do 143 | namespace = "test" 144 | 145 | JobStat.node_ping(:testredis, "test", %Node{identity: "host123", busy: 1}) 146 | JobStat.node_ping(:testredis, "test", %Node{identity: "host456", busy: 1}) 147 | 148 | # add processes for multiple hosts 149 | {local_process, serialized1} = create_process_info("host123") 150 | JobStat.add_process(:testredis, namespace, local_process, serialized1) 151 | 152 | {remote_process, serialized2} = create_process_info("host456") 153 | JobStat.add_process(:testredis, namespace, remote_process, serialized2) 154 | 155 | assert Enum.count(Exq.Redis.JobStat.processes(:testredis, namespace)) == 2 156 | 157 | # Should cleanup only the host that is passed in 158 | JobStat.cleanup_processes(:testredis, namespace, "host123") 159 | processes = Exq.Redis.JobStat.processes(:testredis, namespace) 160 | assert Enum.count(processes) == 1 161 | assert Enum.find(processes, fn process -> process.host == "host456" end) != nil 162 | end 163 | end 164 | -------------------------------------------------------------------------------- /test/json_serializer_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Exq.Serializers.JsonSerializer.Test do 2 | use ExUnit.Case 3 | alias Exq.Serializers.JsonSerializer 4 | 5 | test "encode" do 6 | map = %{} 7 | json = "{}" 8 | assert JsonSerializer.encode(map) == {:ok, json} 9 | end 10 | 11 | test "encode!" do 12 | map = %{} 13 | json = "{}" 14 | assert JsonSerializer.encode!(map) == json 15 | end 16 | 17 | test "decode" do 18 | map = %{} 19 | json = "{}" 20 | assert JsonSerializer.decode(json) == {:ok, map} 21 | end 22 | 23 | test "decode!" do 24 | map = %{} 25 | json = "{}" 26 | assert JsonSerializer.decode!(json) == map 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /test/metadata_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MetadataTest do 2 | use ExUnit.Case 3 | alias Exq.Worker.Metadata 4 | 5 | @job %{args: [1, 2, 3]} 6 | 7 | setup do 8 | {:ok, _} = Metadata.start_link(%{}) 9 | {:ok, metadata: Metadata.server_name(nil)} 10 | end 11 | 12 | test "associate job to worker pid", %{metadata: metadata} do 13 | pid = 14 | spawn_link(fn -> 15 | receive do 16 | :fetch_and_quit -> 17 | assert Exq.worker_job() == @job 18 | :ok 19 | end 20 | end) 21 | 22 | assert Metadata.associate(metadata, pid, @job) == :ok 23 | assert Metadata.lookup(metadata, pid) == @job 24 | assert Exq.worker_job(Exq, pid) == @job 25 | send(pid, :fetch_and_quit) 26 | Process.sleep(50) 27 | 28 | assert_raise ArgumentError, fn -> 29 | Metadata.lookup(metadata, pid) 30 | end 31 | end 32 | 33 | test "custom name" do 34 | {:ok, _} = Metadata.start_link(%{name: ExqTest}) 35 | 36 | pid = 37 | spawn_link(fn -> 38 | receive do 39 | :fetch_and_quit -> 40 | assert Exq.worker_job(ExqTest) == @job 41 | :ok 42 | end 43 | end) 44 | 45 | assert Metadata.associate(Metadata.server_name(ExqTest), pid, @job) == :ok 46 | assert Exq.worker_job(ExqTest, pid) == @job 47 | send(pid, :fetch_and_quit) 48 | Process.sleep(50) 49 | end 50 | end 51 | -------------------------------------------------------------------------------- /test/middleware_test.exs: -------------------------------------------------------------------------------- 1 | defmodule MiddlewareTest do 2 | use ExUnit.Case 3 | 4 | alias Exq.Middleware.Server, as: Middleware 5 | alias Exq.Worker.Server, as: Worker 6 | import ExqTestUtil 7 | 8 | defmodule NoArgWorker do 9 | def perform do 10 | end 11 | end 12 | 13 | defmodule MissingMethodWorker do 14 | end 15 | 16 | defmodule ConstantWorker do 17 | def perform do 18 | 42 19 | end 20 | end 21 | 22 | defmodule RaiseWorker do 23 | def perform do 24 | raise "error" 25 | end 26 | end 27 | 28 | defmodule EchoMiddleware do 29 | @behaviour Exq.Middleware.Behaviour 30 | 31 | import Exq.Middleware.Pipeline 32 | 33 | def before_work(pipeline) do 34 | send(:middlewaretest, :before_work) 35 | pipeline 36 | end 37 | 38 | def after_processed_work(pipeline) do 39 | send(:middlewaretest, {:after_processed_work, pipeline.assigns.result}) 40 | pipeline 41 | end 42 | 43 | def after_failed_work(pipeline) do 44 | send(:middlewaretest, {:after_failed_work, pipeline.assigns.error}) 45 | pipeline 46 | end 47 | end 48 | 49 | defmodule MyMiddleware do 50 | @behaviour Exq.Middleware.Behaviour 51 | 52 | import Exq.Middleware.Pipeline 53 | 54 | def before_work(pipeline) do 55 | send(:middlewaretest, :before_work) 56 | assign(pipeline, :process_info, 1) 57 | end 58 | 59 | def after_processed_work(pipeline) do 60 | send(:middlewaretest, :after_processed_work) 61 | pipeline 62 | end 63 | 64 | def after_failed_work(pipeline) do 65 | send(:middlewaretest, :after_failed_work) 66 | pipeline 67 | end 68 | end 69 | 70 | defmodule HaltedMiddleware do 71 | @behaviour Exq.Middleware.Behaviour 72 | 73 | import Exq.Middleware.Pipeline 74 | 75 | def before_work(pipeline) do 76 | send(:middlewaretest, :before_work_halted) 77 | halt(pipeline) 78 | end 79 | 80 | def after_processed_work(pipeline) do 81 | send(:middlewaretest, :after_processed_work_halted) 82 | halt(pipeline) 83 | end 84 | 85 | def after_failed_work(pipeline) do 86 | halt(pipeline) 87 | end 88 | end 89 | 90 | defmodule TerminatedMiddleware do 91 | @behaviour Exq.Middleware.Behaviour 92 | 93 | import Exq.Middleware.Pipeline 94 | 95 | def before_work(pipeline) do 96 | send(:middlewaretest, :before_work_terminated) 97 | terminate(pipeline) 98 | end 99 | 100 | def after_processed_work(pipeline) do 101 | send(:middlewaretest, :after_processed_work_terminated) 102 | terminate(pipeline) 103 | end 104 | 105 | def after_failed_work(pipeline) do 106 | terminate(pipeline) 107 | end 108 | end 109 | 110 | defmodule StubServer do 111 | use GenServer 112 | 113 | def init(args) do 114 | {:ok, args} 115 | end 116 | 117 | def handle_cast(_msg, state) do 118 | {:noreply, state} 119 | end 120 | 121 | def handle_call(_msg, _from, state) do 122 | {:reply, {:ok, state}, state} 123 | end 124 | end 125 | 126 | def start_worker({class, args, middleware}) do 127 | job = 128 | "{ \"queue\": \"default\", \"class\": \"#{class}\", \"args\": #{args}, \"jid\": \"123\" }" 129 | 130 | {:ok, stub_server} = GenServer.start_link(MiddlewareTest.StubServer, []) 131 | 132 | {:ok, metadata} = Exq.Worker.Metadata.start_link(%{}) 133 | 134 | Worker.start_link( 135 | job, 136 | stub_server, 137 | "default", 138 | stub_server, 139 | "exq", 140 | "localhost", 141 | :testredis, 142 | middleware, 143 | metadata 144 | ) 145 | end 146 | 147 | setup do 148 | TestRedis.setup() 149 | 150 | on_exit(fn -> 151 | wait() 152 | TestRedis.teardown() 153 | end) 154 | 155 | Process.register(self(), :middlewaretest) 156 | {:ok, middleware} = GenServer.start_link(Middleware, []) 157 | {:ok, middleware: middleware} 158 | end 159 | 160 | test "calls chain for processed work", %{middleware: middleware} do 161 | {:ok, worker} = start_worker({"MiddlewareTest.NoArgWorker", "[]", middleware}) 162 | Middleware.push(middleware, Exq.Middleware.Job) 163 | Middleware.push(middleware, MyMiddleware) 164 | Worker.work(worker) 165 | state = :sys.get_state(worker) 166 | 167 | assert state.pipeline.assigns.process_info == 1 168 | assert_receive :before_work 169 | assert_receive :after_processed_work 170 | end 171 | 172 | test "assigns result for processed work", %{middleware: middleware} do 173 | {:ok, worker} = start_worker({"MiddlewareTest.ConstantWorker", "[]", middleware}) 174 | Middleware.push(middleware, Exq.Middleware.Job) 175 | Middleware.push(middleware, EchoMiddleware) 176 | Worker.work(worker) 177 | 178 | assert_receive :before_work 179 | assert_receive {:after_processed_work, 42} 180 | end 181 | 182 | test "calls chain for failed work", %{middleware: middleware} do 183 | {:ok, worker} = start_worker({"MiddlewareTest.MissingMethodWorker", "[]", middleware}) 184 | Middleware.push(middleware, Exq.Middleware.Job) 185 | Middleware.push(middleware, MyMiddleware) 186 | Worker.work(worker) 187 | state = :sys.get_state(worker) 188 | 189 | assert state.pipeline.assigns.process_info == 1 190 | assert_receive :before_work 191 | assert_receive :after_failed_work 192 | end 193 | 194 | test "assigns error for failed work", %{middleware: middleware} do 195 | {:ok, worker} = start_worker({"MiddlewareTest.RaiseWorker", "[]", middleware}) 196 | Middleware.push(middleware, Exq.Middleware.Job) 197 | Middleware.push(middleware, EchoMiddleware) 198 | Worker.work(worker) 199 | 200 | assert_receive :before_work 201 | assert_receive {:after_failed_work, {%RuntimeError{message: "error"}, _stack}} 202 | end 203 | 204 | test "halts middleware execution", %{middleware: middleware} do 205 | {:ok, worker} = start_worker({"MiddlewareTest.NoArgWorker", "[]", middleware}) 206 | Middleware.push(middleware, Exq.Middleware.Job) 207 | Middleware.push(middleware, HaltedMiddleware) 208 | Middleware.push(middleware, MyMiddleware) 209 | 210 | Worker.work(worker) 211 | state = :sys.get_state(worker) 212 | 213 | refute Map.has_key?(state.pipeline.assigns, :process_info) 214 | assert_receive :before_work_halted 215 | assert_receive :after_processed_work_halted 216 | refute_receive :before_work 217 | refute_receive :after_processed_work 218 | end 219 | 220 | test "terminates middleware execution", %{middleware: middleware} do 221 | {:ok, worker} = start_worker({"MiddlewareTest.NoArgWorker", "[]", middleware}) 222 | Middleware.push(middleware, Exq.Middleware.Job) 223 | Middleware.push(middleware, TerminatedMiddleware) 224 | Middleware.push(middleware, MyMiddleware) 225 | 226 | Worker.work(worker) 227 | state = :sys.get_state(worker) 228 | 229 | refute Map.has_key?(state.pipeline.assigns, :process_info) 230 | assert_receive :before_work_terminated 231 | refute_receive :before_work 232 | refute_receive :after_processed_work_terminated 233 | end 234 | 235 | test "restores default middleware after process kill" do 236 | {:ok, _pid} = Exq.start_link() 237 | 238 | chain = [ 239 | Exq.Middleware.Stats, 240 | Exq.Middleware.Job, 241 | Exq.Middleware.Manager, 242 | Exq.Middleware.Unique, 243 | Exq.Middleware.Telemetry 244 | ] 245 | 246 | assert Middleware.all(Middleware) == chain 247 | 248 | pid = Process.whereis(Middleware) 249 | Process.exit(pid, :kill) 250 | wait() 251 | 252 | assert Middleware.all(Middleware) == chain 253 | end 254 | end 255 | -------------------------------------------------------------------------------- /test/mode_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ModeTest do 2 | use ExUnit.Case 3 | 4 | test "supervisor waits for worker drainer to terminate" do 5 | children = Exq.Support.Mode.children(shutdown_timeout: 10_000) 6 | 7 | worker_drainer_child_spec = find_child(children, Exq.WorkerDrainer.Server) 8 | assert %{shutdown: 10_000} = worker_drainer_child_spec 9 | end 10 | 11 | defp find_child(children, child_id) do 12 | Enum.find(children, fn %{id: id} -> id == child_id end) 13 | end 14 | end 15 | -------------------------------------------------------------------------------- /test/performance_test.exs: -------------------------------------------------------------------------------- 1 | defmodule PerformanceTest do 2 | use ExUnit.Case 3 | require Logger 4 | import ExqTestUtil 5 | 6 | setup do 7 | TestRedis.setup() 8 | 9 | on_exit(fn -> 10 | TestRedis.teardown() 11 | end) 12 | 13 | :ok 14 | end 15 | 16 | defmodule Worker do 17 | def perform(arg) do 18 | if arg == "last" do 19 | Logger.info("Last message detected") 20 | send(:tester, :done) 21 | end 22 | end 23 | end 24 | 25 | defmodule FlakeyWorker do 26 | def perform(instruction) do 27 | case instruction do 28 | "done" -> send(:tester, :done) 29 | 1 -> raise "error" 30 | 2 -> Process.exit(self(), :normal) 31 | 3 -> Process.exit(self(), :kill) 32 | 4 -> 1 / 0 33 | 5 -> 1 = 0 34 | 6 -> Exq.worker_job(Exq) 35 | end 36 | end 37 | end 38 | 39 | test "test to_job_serialized performance" do 40 | started = :os.timestamp() 41 | max_timeout_ms = 1_000 42 | 43 | for _ <- 1..1000, 44 | do: 45 | Exq.Redis.JobQueue.to_job_serialized( 46 | "default", 47 | PerformanceTest.Worker, 48 | ["keep_on_trucking"], 49 | max_retries: 10 50 | ) 51 | 52 | elapsed_ms = :timer.now_diff(:os.timestamp(), started) / 1_000 53 | Logger.debug("to_job_serialized performance test took #{elapsed_ms / 1_000} secs") 54 | assert elapsed_ms < max_timeout_ms 55 | end 56 | 57 | test "performance is in acceptable range" do 58 | Process.register(self(), :tester) 59 | started = :os.timestamp() 60 | max_timeout_ms = 5 * 1_000 61 | 62 | {:ok, sup} = Exq.start_link() 63 | 64 | for _ <- 1..1000, 65 | do: Exq.enqueue(Exq, "default", PerformanceTest.Worker, ["keep_on_trucking"]) 66 | 67 | Exq.enqueue(Exq, "default", PerformanceTest.Worker, ["last"]) 68 | 69 | # Wait for last message 70 | receive do 71 | :done -> Logger.info("Received done") 72 | after 73 | # This won't count enqueue 74 | max_timeout_ms -> assert false, "Timeout of #{max_timeout_ms} reached for performance test" 75 | end 76 | 77 | elapsed_ms = :timer.now_diff(:os.timestamp(), started) / 1_000 78 | Logger.debug("Perf test took #{elapsed_ms / 1_000} secs") 79 | count = Exq.Redis.Connection.llen!(:testredis, "test:queue:default") 80 | 81 | assert count == 0 82 | assert elapsed_ms < max_timeout_ms 83 | # let stats finish 84 | wait_long() 85 | stop_process(sup) 86 | end 87 | 88 | test "performance for flakey workers" do 89 | Process.register(self(), :tester) 90 | max_timeout_ms = 2 * 1_000 91 | 92 | {:ok, sup} = Exq.start_link(concurrency: 20) 93 | 94 | for _ <- 1..200, 95 | do: 96 | Exq.enqueue(Exq, "default", PerformanceTest.FlakeyWorker, [ 97 | Enum.random([1, 2, 3, 4, 5, 6]) 98 | ]) 99 | 100 | Exq.enqueue(Exq, "default", PerformanceTest.FlakeyWorker, [:done]) 101 | 102 | # Wait for last message 103 | receive do 104 | :done -> Logger.info("Received done") 105 | after 106 | # This won't count enqueue 107 | max_timeout_ms -> assert false, "Timeout of #{max_timeout_ms} reached for performance test" 108 | end 109 | 110 | count = Exq.Redis.Connection.llen!(:testredis, "test:queue:default") 111 | 112 | assert count == 0 113 | # let stats finish 114 | wait_long() 115 | stop_process(sup) 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /test/readonly_reconnect_test.exs: -------------------------------------------------------------------------------- 1 | defmodule ReadonlyReconnectTest do 2 | use ExUnit.Case 3 | import ExqTestUtil 4 | 5 | setup do 6 | Process.flag(:trap_exit, true) 7 | {:ok, redis} = Redix.start_link(host: "127.0.0.1", port: 6556) 8 | Process.register(redis, :testredis) 9 | 10 | on_exit(fn -> 11 | wait() 12 | TestRedis.teardown() 13 | end) 14 | 15 | {:ok, redis: redis} 16 | end 17 | 18 | test "disconnect on read-only errors with single command", %{redis: redis} do 19 | Exq.Redis.Connection.q(:testredis, ["SET", "key", "value"]) 20 | assert_received({:EXIT, pid, :killed}) 21 | assert redis == pid 22 | end 23 | 24 | test "disconnect on read-only errors with command pipeline", %{redis: redis} do 25 | Exq.Redis.Connection.qp(:testredis, [["GET", "key"], ["SET", "key", "value"]]) 26 | assert_received({:EXIT, pid, :killed}) 27 | assert redis == pid 28 | end 29 | 30 | test "disconnect on read-only errors with command pipeline returning values", %{redis: redis} do 31 | Exq.Redis.Connection.qp!(:testredis, [["GET", "key"], ["SET", "key", "value"]]) 32 | assert_received({:EXIT, pid, :killed}) 33 | assert redis == pid 34 | end 35 | 36 | test "pass through other errors" do 37 | assert {:error, %Redix.Error{}} = Exq.Redis.Connection.q(:testredis, ["GETS", "key"]) 38 | assert {:ok, [%Redix.Error{}]} = Exq.Redis.Connection.qp(:testredis, [["GETS", "key"]]) 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /test/redis_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Exq.RedisTest do 2 | use ExUnit.Case 3 | 4 | alias Exq.Redis.Connection 5 | 6 | setup_all do 7 | ExqTestUtil.reset_config() 8 | TestRedis.setup() 9 | 10 | on_exit(fn -> 11 | ExqTestUtil.reset_config() 12 | TestRedis.teardown() 13 | end) 14 | end 15 | 16 | setup do 17 | on_exit(fn -> 18 | Connection.flushdb!(:testredis) 19 | end) 20 | 21 | :ok 22 | end 23 | 24 | test "smembers empty" do 25 | m = Connection.smembers!(:testredis, "bogus") 26 | assert m == [] 27 | end 28 | 29 | test "sadd" do 30 | r = Connection.sadd!(:testredis, "theset", "amember") 31 | assert r == 1 32 | assert Connection.smembers!(:testredis, "theset") == ["amember"] 33 | end 34 | 35 | test "sismember" do 36 | _ = Connection.sadd!(:testredis, "theset", "amember") 37 | _ = Connection.sadd!(:testredis, "theset", "anothermember") 38 | assert 1 == Connection.sismember!(:testredis, "theset", "amember") 39 | assert 1 == Connection.sismember!(:testredis, "theset", "anothermember") 40 | assert 0 == Connection.sismember!(:testredis, "theset", "not_a_member") 41 | end 42 | 43 | test "lpop empty" do 44 | assert Connection.lpop(:testredis, "bogus") == {:ok, nil} 45 | end 46 | 47 | test "rpush / lpop" do 48 | Connection.rpush!(:testredis, "akey", "avalue") 49 | assert Connection.lpop(:testredis, "akey") == {:ok, "avalue"} 50 | assert Connection.lpop(:testredis, "akey") == {:ok, nil} 51 | end 52 | 53 | test "zadd / zcard / zrem" do 54 | assert Connection.zcard!(:testredis, "akey") == 0 55 | assert Connection.zadd!(:testredis, "akey", "1.7", "avalue") == 1 56 | assert Connection.zcard!(:testredis, "akey") == 1 57 | assert Connection.zrem!(:testredis, "akey", "avalue") == 1 58 | assert Connection.zcard!(:testredis, "akey") == 0 59 | end 60 | 61 | test "zrangebyscore" do 62 | assert Connection.zcard!(:testredis, "akey") == 0 63 | assert Connection.zadd!(:testredis, "akey", "123456.123455", "avalue") == 1 64 | assert Connection.zadd!(:testredis, "akey", "123456.123456", "bvalue") == 1 65 | assert Connection.zadd!(:testredis, "akey", "123456.123457", "cvalue") == 1 66 | 67 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "111111.111111") == [] 68 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123455") == ["avalue"] 69 | 70 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123456") == [ 71 | "avalue", 72 | "bvalue" 73 | ] 74 | 75 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123457") == [ 76 | "avalue", 77 | "bvalue", 78 | "cvalue" 79 | ] 80 | 81 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "999999.999999") == [ 82 | "avalue", 83 | "bvalue", 84 | "cvalue" 85 | ] 86 | 87 | assert Connection.zrem!(:testredis, "akey", "bvalue") == 1 88 | 89 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123457") == [ 90 | "avalue", 91 | "cvalue" 92 | ] 93 | 94 | assert Connection.zrem!(:testredis, "akey", "avalue") == 1 95 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123456") == [] 96 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "123456.123457") == ["cvalue"] 97 | assert Connection.zrem!(:testredis, "akey", "avalue") == 0 98 | assert Connection.zrem!(:testredis, "akey", "cvalue") == 1 99 | assert Connection.zrangebyscore!(:testredis, "akey", 0, "999999.999999") == [] 100 | end 101 | 102 | test "zcount" do 103 | assert Connection.zcount!(:testredis, "akey") == Connection.zcard!(:testredis, "akey") 104 | assert Connection.zadd!(:testredis, "akey", "1", "avalue") == 1 105 | assert Connection.zadd!(:testredis, "akey", "1", "bvalue") == 1 106 | assert Connection.zadd!(:testredis, "akey", "2", "cvalue") == 1 107 | 108 | assert Connection.zcount!(:testredis, "akey", "0", "1") == 2 109 | assert Connection.zcount!(:testredis, "akey", "1", "1") == 2 110 | assert Connection.zcount!(:testredis, "akey", "-inf", "+inf") == 3 111 | assert Connection.zcount!(:testredis, "akey", "1", "(2") == 2 112 | assert Connection.zcount!(:testredis, "akey", "(1", "2") == 1 113 | end 114 | 115 | test "flushdb" do 116 | Connection.sadd!(:testredis, "theset", "amember") 117 | Connection.flushdb!(:testredis) 118 | assert Connection.smembers!(:testredis, "theset") == [] 119 | end 120 | end 121 | -------------------------------------------------------------------------------- /test/test-redis-replica.conf: -------------------------------------------------------------------------------- 1 | port 6556 2 | daemonize yes 3 | logfile stdout 4 | pidfile /tmp/resquex-redis-replica.pid 5 | slaveof 127.0.0.1 6555 6 | -------------------------------------------------------------------------------- /test/test-redis.conf: -------------------------------------------------------------------------------- 1 | port 6555 2 | daemonize yes 3 | logfile stdout 4 | pidfile /tmp/resquex-redis.pid 5 | -------------------------------------------------------------------------------- /test/test-sentinel.conf: -------------------------------------------------------------------------------- 1 | port 6666 2 | daemonize yes 3 | logfile stdout 4 | pidfile /tmp/resquex-redis-sentinel.pid 5 | 6 | sentinel monitor exq 127.0.0.1 6555 1 7 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | defmodule TestStats do 2 | alias Exq.Redis.Connection 3 | alias Exq.Redis.JobQueue 4 | alias Exq.Support.Coercion 5 | alias Exq.Support.Config 6 | 7 | def processed_count(redis, namespace) do 8 | count = Connection.get!(redis, JobQueue.full_key(namespace, "stat:processed")) 9 | {:ok, count} 10 | end 11 | 12 | def failed_count(redis, namespace) do 13 | count = Connection.get!(redis, JobQueue.full_key(namespace, "stat:failed")) 14 | {:ok, count} 15 | end 16 | end 17 | 18 | defmodule ExqTestUtil do 19 | @timeout 20 20 | @long_timeout 100 21 | 22 | alias Exq.Support.Coercion 23 | alias Exq.Support 24 | alias Config.Reader 25 | 26 | def redis_host do 27 | Support.Config.get(:host) 28 | end 29 | 30 | def redis_port do 31 | :port 32 | |> Support.Config.get() 33 | |> Coercion.to_integer() 34 | end 35 | 36 | import ExUnit.Assertions 37 | 38 | defmodule SendWorker do 39 | def perform(pid) do 40 | send(String.to_atom(pid), {:worked}) 41 | end 42 | end 43 | 44 | def reset_env(previous_env) do 45 | # Restore all previous values 46 | System.put_env(previous_env) 47 | 48 | # Remove any newly added keys 49 | for {key, _} <- System.get_env() do 50 | unless Map.has_key?(previous_env, key) do 51 | System.delete_env(key) 52 | end 53 | end 54 | end 55 | 56 | def assert_exq_up(exq) do 57 | my_pid = String.to_atom(UUID.uuid4()) 58 | Process.register(self(), my_pid) 59 | {:ok, _} = Exq.enqueue(exq, "default", "ExqTestUtil.SendWorker", [my_pid]) 60 | ExUnit.Assertions.assert_receive({:worked}) 61 | Process.unregister(my_pid) 62 | end 63 | 64 | def stop_process(pid) do 65 | try do 66 | Process.flag(:trap_exit, true) 67 | Process.exit(pid, :shutdown) 68 | 69 | receive do 70 | {:EXIT, _pid, _error} -> :ok 71 | end 72 | rescue 73 | e in RuntimeError -> e 74 | end 75 | 76 | Process.flag(:trap_exit, false) 77 | end 78 | 79 | def wait do 80 | :timer.sleep(@timeout) 81 | end 82 | 83 | def wait_long do 84 | :timer.sleep(@long_timeout) 85 | end 86 | 87 | def reset_config do 88 | config = Reader.read!(Path.join([Path.dirname(__DIR__), "config", "config.exs"])) 89 | Application.put_all_env(config, persistent: true) 90 | end 91 | 92 | def with_application_env(app, key, new, context) do 93 | old = Application.get_env(app, key, :undefined) 94 | Application.put_env(app, key, new) 95 | 96 | try do 97 | context.() 98 | after 99 | if old == :undefined do 100 | Application.delete_env(app, key) 101 | else 102 | Application.put_env(app, key, old) 103 | end 104 | end 105 | end 106 | end 107 | 108 | defmodule TestRedis do 109 | import ExqTestUtil 110 | alias Exq.Redis.Connection 111 | alias Exq.Support.Config 112 | 113 | # TODO: Automate config 114 | def start do 115 | unless Config.get(:test_with_local_redis) == false do 116 | [] = :os.cmd(~c"redis-server test/test-redis.conf") 117 | [] = :os.cmd(~c"redis-server test/test-redis-replica.conf") 118 | [] = :os.cmd(~c"redis-server test/test-sentinel.conf --sentinel") 119 | :timer.sleep(500) 120 | end 121 | end 122 | 123 | def stop do 124 | unless Config.get(:test_with_local_redis) == false do 125 | [] = :os.cmd(~c"redis-cli -p 6555 shutdown") 126 | [] = :os.cmd(~c"redis-cli -p 6556 shutdown") 127 | [] = :os.cmd(~c"redis-cli -p 6666 shutdown") 128 | end 129 | end 130 | 131 | def setup do 132 | {:ok, redis} = Redix.start_link(host: redis_host(), port: redis_port()) 133 | Process.register(redis, :testredis) 134 | flush_all() 135 | :ok 136 | end 137 | 138 | def flush_all do 139 | try do 140 | Connection.flushdb!(:testredis) 141 | catch 142 | :exit, {:timeout, _info} -> nil 143 | end 144 | end 145 | 146 | def teardown do 147 | if !Process.whereis(:testredis) do 148 | # For some reason at the end of test the link is down, before we actually stop and unregister? 149 | {:ok, redis} = Redix.start_link(host: redis_host(), port: redis_port()) 150 | Process.register(redis, :testredis) 151 | end 152 | 153 | try do 154 | Process.unregister(:testredis) 155 | rescue 156 | ArgumentError -> true 157 | end 158 | 159 | :ok 160 | end 161 | end 162 | 163 | # Don't run parallel tests to prevent redis issues 164 | # Exclude longer running failure condition tests by default 165 | ExUnit.configure(seed: 0, max_cases: 1, exclude: [failure_scenarios: true, pending: true]) 166 | 167 | # Start logger 168 | for app <- [:logger, :redix, :elixir_uuid] do 169 | Application.ensure_all_started(app) 170 | end 171 | 172 | TestRedis.start() 173 | 174 | System.at_exit(fn _status -> 175 | TestRedis.stop() 176 | end) 177 | 178 | Exq.Mock.start_link(mode: :redis) 179 | 180 | ExUnit.start(capture_log: true) 181 | -------------------------------------------------------------------------------- /test/worker_test.exs: -------------------------------------------------------------------------------- 1 | defmodule WorkerTest do 2 | use ExUnit.Case 3 | 4 | defmodule NoArgWorker do 5 | def perform do 6 | end 7 | end 8 | 9 | defmodule ThreeArgWorker do 10 | def perform(_, _, _) do 11 | end 12 | end 13 | 14 | defmodule CustomMethodWorker do 15 | def custom_perform do 16 | end 17 | end 18 | 19 | defmodule MetadataWorker do 20 | def perform() do 21 | %{class: "WorkerTest.MetadataWorker"} = Exq.worker_job() 22 | end 23 | end 24 | 25 | defmodule MissingMethodWorker do 26 | end 27 | 28 | defmodule RaiseWorker do 29 | def perform do 30 | raise "error" 31 | end 32 | end 33 | 34 | defmodule SuicideWorker do 35 | def perform do 36 | Process.exit(self(), :kill) 37 | end 38 | end 39 | 40 | defmodule TerminateWorker do 41 | def perform do 42 | Process.exit(self(), :normal) 43 | end 44 | end 45 | 46 | defmodule BadArithmaticWorker do 47 | def perform do 48 | 1 / 0 49 | end 50 | end 51 | 52 | defmodule BadMatchWorker do 53 | def perform do 54 | 1 = 0 55 | end 56 | end 57 | 58 | defmodule FunctionClauseWorker do 59 | def perform do 60 | hello("abc") 61 | end 62 | 63 | def hello(from) when is_pid(from) do 64 | IO.puts("HELLO") 65 | end 66 | end 67 | 68 | defmodule MockStatsServer do 69 | use GenServer 70 | 71 | def init(args) do 72 | {:ok, args} 73 | end 74 | 75 | def handle_cast({:add_process, _, _, _}, state) do 76 | send(:workertest, :add_process) 77 | {:noreply, state} 78 | end 79 | 80 | def handle_cast({:record_processed, _, _}, state) do 81 | send(:workertest, :record_processed) 82 | {:noreply, state} 83 | end 84 | 85 | def handle_cast({:record_failure, _, _, _}, state) do 86 | send(:workertest, :record_failure) 87 | {:noreply, state} 88 | end 89 | 90 | def handle_cast({:process_terminated, _, _}, state) do 91 | send(:workertest, :process_terminated) 92 | {:noreply, state} 93 | end 94 | end 95 | 96 | defmodule MockServer do 97 | @behaviour :gen_statem 98 | 99 | def start_link() do 100 | :gen_statem.start_link(__MODULE__, [], []) 101 | end 102 | 103 | @impl true 104 | def callback_mode(), do: :state_functions 105 | 106 | @impl true 107 | def init([]) do 108 | {:ok, :connected, []} 109 | end 110 | 111 | defp reply({pid, request_id} = _from, reply) do 112 | send(pid, {request_id, reply}) 113 | end 114 | 115 | def connected( 116 | :cast, 117 | {:pipeline, [["ZADD" | _], ["ZREMRANGEBYSCORE" | _], ["ZREMRANGEBYRANK" | _]], from, 118 | _timeout}, 119 | data 120 | ) do 121 | send(:workertest, :zadd_redis) 122 | reply(from, {:ok, [1, 0, 0]}) 123 | {:keep_state, data} 124 | end 125 | 126 | # Same reply as Redix connection 127 | def connected(:cast, {:pipeline, [["LREM" | _]], from, _timeout}, data) do 128 | send(:workertest, :lrem_redis) 129 | reply(from, {:ok, [1]}) 130 | {:keep_state, data} 131 | end 132 | 133 | def connected(:cast, {:job_terminated, _queue, _success}, data) do 134 | send(:workertest, :job_terminated) 135 | {:keep_state, data} 136 | end 137 | end 138 | 139 | def assert_terminate(worker, true) do 140 | Exq.Worker.Server.work(worker) 141 | assert_receive :add_process 142 | assert_receive :process_terminated 143 | assert_receive :job_terminated 144 | assert_receive :record_processed 145 | assert_receive :lrem_redis 146 | end 147 | 148 | def assert_terminate(worker, false) do 149 | Exq.Worker.Server.work(worker) 150 | assert_receive :add_process 151 | assert_receive :process_terminated 152 | assert_receive :job_terminated 153 | assert_receive :record_failure 154 | assert_receive :zadd_redis 155 | assert_receive :lrem_redis 156 | end 157 | 158 | def start_worker({class, args}) do 159 | Process.register(self(), :workertest) 160 | job = "{ \"queue\": \"default\", \"class\": \"#{class}\", \"args\": #{args} }" 161 | 162 | {:ok, stub_server} = 163 | start_supervised(%{ 164 | id: WorkerTest.MockServer, 165 | start: {WorkerTest.MockServer, :start_link, []} 166 | }) 167 | 168 | {:ok, mock_stats_server} = 169 | start_supervised(%{ 170 | id: WorkerTest.MockStatsServer, 171 | start: {GenServer, :start_link, [WorkerTest.MockStatsServer, %{}]} 172 | }) 173 | 174 | {:ok, middleware} = 175 | start_supervised(%{ 176 | id: Exq.Middleware.Server, 177 | start: {GenServer, :start_link, [Exq.Middleware.Server, []]} 178 | }) 179 | 180 | {:ok, metadata} = 181 | start_supervised(%{ 182 | id: Exq.Worker.Metadata, 183 | start: {Exq.Worker.Metadata, :start_link, [%{}]} 184 | }) 185 | 186 | Exq.Middleware.Server.push(middleware, Exq.Middleware.Stats) 187 | Exq.Middleware.Server.push(middleware, Exq.Middleware.Job) 188 | Exq.Middleware.Server.push(middleware, Exq.Middleware.Manager) 189 | Exq.Middleware.Server.push(middleware, Exq.Middleware.Unique) 190 | Exq.Middleware.Server.push(middleware, Exq.Middleware.Logger) 191 | 192 | start_supervised(%{ 193 | id: Exq.Worker.Server, 194 | start: 195 | {Exq.Worker.Server, :start_link, 196 | [ 197 | job, 198 | stub_server, 199 | "default", 200 | mock_stats_server, 201 | "exq", 202 | "localhost", 203 | stub_server, 204 | middleware, 205 | metadata 206 | ]} 207 | }) 208 | end 209 | 210 | test "execute valid job with perform" do 211 | {:ok, worker} = start_worker({"WorkerTest.NoArgWorker", "[]"}) 212 | assert_terminate(worker, true) 213 | end 214 | 215 | test "execute valid rubyish job with perform" do 216 | {:ok, worker} = start_worker({"WorkerTest::NoArgWorker", "[]"}) 217 | assert_terminate(worker, true) 218 | end 219 | 220 | test "execute valid job with perform args" do 221 | {:ok, worker} = start_worker({"WorkerTest.ThreeArgWorker", "[1, 2, 3]"}) 222 | assert_terminate(worker, true) 223 | end 224 | 225 | test "provide access to job metadata" do 226 | {:ok, worker} = start_worker({"WorkerTest.MetadataWorker", "[]"}) 227 | assert_terminate(worker, true) 228 | end 229 | 230 | test "execute worker raising error" do 231 | {:ok, worker} = start_worker({"WorkerTest.RaiseWorker", "[]"}) 232 | assert_terminate(worker, false) 233 | end 234 | 235 | test "execute valid job with custom function" do 236 | {:ok, worker} = start_worker({"WorkerTest.CustomMethodWorker/custom_perform", "[]"}) 237 | assert_terminate(worker, false) 238 | end 239 | 240 | # Go through Exit reasons: http://erlang.org/doc/reference_manual/errors.html#exit_reasons 241 | 242 | test "execute invalid module perform" do 243 | {:ok, worker} = start_worker({"NonExistent", "[]"}) 244 | assert_terminate(worker, false) 245 | end 246 | 247 | test "worker killed still sends stats" do 248 | {:ok, worker} = start_worker({"WorkerTest.SuicideWorker", "[]"}) 249 | assert_terminate(worker, false) 250 | end 251 | 252 | test "worker normally terminated still sends stats" do 253 | {:ok, worker} = start_worker({"WorkerTest.TerminateWorker", "[]"}) 254 | assert_terminate(worker, false) 255 | end 256 | 257 | test "worker with arithmetic error (badarith) still sends stats" do 258 | {:ok, worker} = start_worker({"WorkerTest.BadArithmaticWorker", "[]"}) 259 | assert_terminate(worker, false) 260 | end 261 | 262 | test "worker with bad match (badmatch) still sends stats" do 263 | {:ok, worker} = start_worker({"WorkerTest.BadMatchWorker", "[]"}) 264 | assert_terminate(worker, false) 265 | end 266 | 267 | test "worker with function clause error still sends stats" do 268 | {:ok, worker} = start_worker({"WorkerTest.FunctionClauseWorker", "[]"}) 269 | assert_terminate(worker, false) 270 | end 271 | 272 | test "execute invalid module function" do 273 | {:ok, worker} = start_worker({"WorkerTest.MissingMethodWorker/nonexist", "[]"}) 274 | assert_terminate(worker, false) 275 | end 276 | 277 | test "adds process info struct to worker state" do 278 | {:ok, worker} = start_worker({"WorkerTest.NoArgWorker", "[]"}) 279 | assert is_nil(:sys.get_state(worker).pipeline) 280 | 281 | Exq.Worker.Server.work(worker) 282 | assert is_map(:sys.get_state(worker).pipeline.assigns.process_info) 283 | end 284 | 285 | test "adds job struct to worker state" do 286 | {:ok, worker} = start_worker({"WorkerTest.NoArgWorker", "[]"}) 287 | assert is_nil(:sys.get_state(worker).pipeline) 288 | 289 | Exq.Worker.Server.work(worker) 290 | assert is_map(:sys.get_state(worker).pipeline.assigns.job) 291 | end 292 | 293 | test "adds worker module to worker state" do 294 | {:ok, worker} = start_worker({"WorkerTest.NoArgWorker", "[]"}) 295 | assert is_nil(:sys.get_state(worker).pipeline) 296 | 297 | Exq.Worker.Server.work(worker) 298 | assert :sys.get_state(worker).pipeline.assigns.worker_module == Elixir.WorkerTest.NoArgWorker 299 | end 300 | end 301 | --------------------------------------------------------------------------------