├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yaml └── workflows │ ├── benchstat.yml │ └── build.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── aggregator.go ├── aggregator_test.go ├── asynq.go ├── asynq_test.go ├── benchmark_test.go ├── client.go ├── client_test.go ├── context.go ├── doc.go ├── docs └── assets │ ├── asynq_history.gif │ ├── asynq_ps.gif │ ├── asynq_stats.gif │ ├── asynqmon-queues-view.png │ ├── asynqmon-task-view.png │ ├── cluster.png │ ├── dash.gif │ ├── demo.gif │ ├── overview.png │ └── task-queue.png ├── example_test.go ├── forwarder.go ├── forwarder_test.go ├── go.mod ├── go.sum ├── healthcheck.go ├── healthcheck_test.go ├── heartbeat.go ├── heartbeat_test.go ├── inspector.go ├── inspector_test.go ├── internal ├── base │ ├── base.go │ └── base_test.go ├── context │ ├── context.go │ └── context_test.go ├── errors │ ├── errors.go │ └── errors_test.go ├── log │ ├── log.go │ └── log_test.go ├── proto │ ├── asynq.pb.go │ └── asynq.proto ├── rdb │ ├── benchmark_test.go │ ├── inspect.go │ ├── inspect_test.go │ ├── rdb.go │ └── rdb_test.go ├── testbroker │ └── testbroker.go ├── testutil │ ├── builder.go │ ├── builder_test.go │ └── testutil.go └── timeutil │ ├── timeutil.go │ └── timeutil_test.go ├── janitor.go ├── janitor_test.go ├── periodic_task_manager.go ├── periodic_task_manager_test.go ├── processor.go ├── processor_test.go ├── recoverer.go ├── recoverer_test.go ├── scheduler.go ├── scheduler_test.go ├── servemux.go ├── servemux_test.go ├── server.go ├── server_test.go ├── signals_unix.go ├── signals_windows.go ├── subscriber.go ├── subscriber_test.go ├── syncer.go ├── syncer_test.go ├── tools ├── asynq │ ├── README.md │ ├── cmd │ │ ├── cron.go │ │ ├── dash.go │ │ ├── dash │ │ │ ├── dash.go │ │ │ ├── draw.go │ │ │ ├── draw_test.go │ │ │ ├── fetch.go │ │ │ ├── key_event.go │ │ │ ├── key_event_test.go │ │ │ ├── screen_drawer.go │ │ │ └── table.go │ │ ├── group.go │ │ ├── queue.go │ │ ├── root.go │ │ ├── server.go │ │ ├── stats.go │ │ └── task.go │ └── main.go ├── go.mod ├── go.sum └── metrics_exporter │ └── main.go └── x ├── go.mod ├── go.sum ├── metrics └── metrics.go └── rate ├── example_test.go ├── semaphore.go └── semaphore_test.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [hibiken] 4 | open_collective: ken-hibino 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG] Description of the bug" 5 | labels: bug 6 | assignees: 7 | - hibiken 8 | - kamikazechaser 9 | 10 | --- 11 | 12 | **Describe the bug** 13 | A clear and concise description of what the bug is. 14 | 15 | **Environment (please complete the following information):** 16 | - OS: [e.g. MacOS, Linux] 17 | - `asynq` package version [e.g. v0.25.0] 18 | - Redis/Valkey version 19 | 20 | **To Reproduce** 21 | Steps to reproduce the behavior (Code snippets if applicable): 22 | 1. Setup background processing ... 23 | 2. Enqueue tasks ... 24 | 3. See Error ... 25 | 26 | **Expected behavior** 27 | A clear and concise description of what you expected to happen. 28 | 29 | **Screenshots** 30 | If applicable, add screenshots to help explain your problem. 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[FEATURE REQUEST] Description of the feature request" 5 | labels: enhancement 6 | assignees: 7 | - hibiken 8 | - kamikazechaser 9 | 10 | --- 11 | 12 | **Is your feature request related to a problem? Please describe.** 13 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | labels: 8 | - "pr-deps" 9 | - package-ecosystem: "gomod" 10 | directory: "/tools" 11 | schedule: 12 | interval: "weekly" 13 | labels: 14 | - "pr-deps" 15 | - package-ecosystem: "gomod" 16 | directory: "/x" 17 | schedule: 18 | interval: "weekly" 19 | labels: 20 | - "pr-deps" 21 | - package-ecosystem: "github-actions" 22 | directory: "/" 23 | schedule: 24 | interval: "weekly" 25 | -------------------------------------------------------------------------------- /.github/workflows/benchstat.yml: -------------------------------------------------------------------------------- 1 | # This workflow runs benchmarks against the current branch, 2 | # compares them to benchmarks against master, 3 | # and uploads the results as an artifact. 4 | 5 | name: benchstat 6 | 7 | on: [pull_request] 8 | 9 | jobs: 10 | incoming: 11 | runs-on: ubuntu-latest 12 | services: 13 | redis: 14 | image: redis:7 15 | ports: 16 | - 6379:6379 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | - name: Set up Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version: 1.23.x 24 | - name: Benchmark 25 | run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a new.txt 26 | - name: Upload Benchmark 27 | uses: actions/upload-artifact@v4 28 | with: 29 | name: bench-incoming 30 | path: new.txt 31 | 32 | current: 33 | runs-on: ubuntu-latest 34 | services: 35 | redis: 36 | image: redis:7 37 | ports: 38 | - 6379:6379 39 | steps: 40 | - name: Checkout 41 | uses: actions/checkout@v4 42 | with: 43 | ref: master 44 | - name: Set up Go 45 | uses: actions/setup-go@v5 46 | with: 47 | go-version: 1.23.x 48 | - name: Benchmark 49 | run: go test -run=^$ -bench=. -count=5 -timeout=60m ./... | tee -a old.txt 50 | - name: Upload Benchmark 51 | uses: actions/upload-artifact@v4 52 | with: 53 | name: bench-current 54 | path: old.txt 55 | 56 | benchstat: 57 | needs: [incoming, current] 58 | runs-on: ubuntu-latest 59 | steps: 60 | - name: Checkout 61 | uses: actions/checkout@v4 62 | - name: Set up Go 63 | uses: actions/setup-go@v5 64 | with: 65 | go-version: 1.23.x 66 | - name: Install benchstat 67 | run: go get -u golang.org/x/perf/cmd/benchstat 68 | - name: Download Incoming 69 | uses: actions/download-artifact@v4 70 | with: 71 | name: bench-incoming 72 | - name: Download Current 73 | uses: actions/download-artifact@v4 74 | with: 75 | name: bench-current 76 | - name: Benchstat Results 77 | run: benchstat old.txt new.txt | tee -a benchstat.txt 78 | - name: Upload benchstat results 79 | uses: actions/upload-artifact@v4 80 | with: 81 | name: benchstat 82 | path: benchstat.txt 83 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | strategy: 8 | matrix: 9 | os: [ubuntu-latest] 10 | go-version: [1.22.x, 1.23.x] 11 | runs-on: ${{ matrix.os }} 12 | services: 13 | redis: 14 | image: redis:7 15 | ports: 16 | - 6379:6379 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Set up Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version: ${{ matrix.go-version }} 24 | cache: false 25 | 26 | - name: Build core module 27 | run: go build -v ./... 28 | 29 | - name: Build x module 30 | run: cd x && go build -v ./... && cd .. 31 | 32 | - name: Test core module 33 | run: go test -race -v -coverprofile=coverage.txt -covermode=atomic ./... 34 | 35 | - name: Test x module 36 | run: cd x && go test -race -v ./... && cd .. 37 | 38 | - name: Benchmark Test 39 | run: go test -run=^$ -bench=. -loglevel=debug ./... 40 | 41 | - name: Upload coverage to Codecov 42 | uses: codecov/codecov-action@v5 43 | 44 | build-tool: 45 | strategy: 46 | matrix: 47 | os: [ubuntu-latest] 48 | go-version: [1.22.x, 1.23.x] 49 | runs-on: ${{ matrix.os }} 50 | services: 51 | redis: 52 | image: redis:7 53 | ports: 54 | - 6379:6379 55 | steps: 56 | - uses: actions/checkout@v4 57 | 58 | - name: Set up Go 59 | uses: actions/setup-go@v5 60 | with: 61 | go-version: ${{ matrix.go-version }} 62 | cache: false 63 | 64 | - name: Build tools module 65 | run: cd tools && go build -v ./... && cd .. 66 | 67 | - name: Test tools module 68 | run: cd tools && go test -race -v ./... && cd .. 69 | 70 | golangci: 71 | name: lint 72 | runs-on: ubuntu-latest 73 | steps: 74 | - uses: actions/checkout@v4 75 | 76 | - uses: actions/setup-go@v5 77 | with: 78 | go-version: stable 79 | 80 | - name: golangci-lint 81 | uses: golangci/golangci-lint-action@v6 82 | with: 83 | version: v1.61 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vendor 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | # Test binary, built with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Ignore examples for now 16 | /examples 17 | 18 | # Ignore tool binaries 19 | /tools/asynq/asynq 20 | /tools/metrics_exporter/metrics_exporter 21 | 22 | # Ignore asynq config file 23 | .asynq.* 24 | 25 | # Ignore editor config files 26 | .vscode 27 | .idea 28 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | ken.hibino7@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thanks for your interest in contributing to Asynq! 4 | We are open to, and grateful for, any contributions made by the community. 5 | 6 | ## Reporting Bugs 7 | 8 | Have a look at our [issue tracker](https://github.com/hibiken/asynq/issues). If you can't find an issue (open or closed) 9 | describing your problem (or a very similar one) there, please open a new issue with 10 | the following details: 11 | 12 | - Which versions of Go and Redis are you using? 13 | - What are you trying to accomplish? 14 | - What is the full error you are seeing? 15 | - How can we reproduce this? 16 | - Please quote as much of your code as needed to reproduce (best link to a 17 | public repository or Gist) 18 | 19 | ## Getting Help 20 | 21 | We run a [Gitter 22 | channel](https://gitter.im/go-asynq/community) where you can ask questions and 23 | get help. Feel free to ask there before opening a GitHub issue. 24 | 25 | ## Submitting Feature Requests 26 | 27 | If you can't find an issue (open or closed) describing your idea on our [issue 28 | tracker](https://github.com/hibiken/asynq/issues), open an issue. Adding answers to the following 29 | questions in your description is +1: 30 | 31 | - What do you want to do, and how do you expect Asynq to support you with that? 32 | - How might this be added to Asynq? 33 | - What are possible alternatives? 34 | - Are there any disadvantages? 35 | 36 | Thank you! We'll try to respond as quickly as possible. 37 | 38 | ## Contributing Code 39 | 40 | 1. Fork this repo 41 | 2. Download your fork `git clone git@github.com:your-username/asynq.git && cd asynq` 42 | 3. Create your branch `git checkout -b your-branch-name` 43 | 4. Make and commit your changes 44 | 5. Push the branch `git push origin your-branch-name` 45 | 6. Create a new pull request 46 | 47 | Please try to keep your pull request focused in scope and avoid including unrelated commits. 48 | Please run tests against redis cluster locally with `--redis_cluster` flag to ensure that code works for Redis cluster. TODO: Run tests using Redis cluster on CI. 49 | 50 | After you have submitted your pull request, we'll try to get back to you as soon as possible. We may suggest some changes or improvements. 51 | 52 | Thank you for contributing! 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Kentaro Hibino 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) 2 | 3 | proto: internal/proto/asynq.proto 4 | protoc -I=$(ROOT_DIR)/internal/proto \ 5 | --go_out=$(ROOT_DIR)/internal/proto \ 6 | --go_opt=module=github.com/hibiken/asynq/internal/proto \ 7 | $(ROOT_DIR)/internal/proto/asynq.proto 8 | 9 | .PHONY: lint 10 | lint: 11 | golangci-lint run 12 | -------------------------------------------------------------------------------- /aggregator.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "sync" 10 | "time" 11 | 12 | "github.com/hibiken/asynq/internal/base" 13 | "github.com/hibiken/asynq/internal/log" 14 | ) 15 | 16 | // An aggregator is responsible for checking groups and aggregate into one task 17 | // if any of the grouping condition is met. 18 | type aggregator struct { 19 | logger *log.Logger 20 | broker base.Broker 21 | client *Client 22 | 23 | // channel to communicate back to the long running "aggregator" goroutine. 24 | done chan struct{} 25 | 26 | // list of queue names to check and aggregate. 27 | queues []string 28 | 29 | // Group configurations 30 | gracePeriod time.Duration 31 | maxDelay time.Duration 32 | maxSize int 33 | 34 | // User provided group aggregator. 35 | ga GroupAggregator 36 | 37 | // interval used to check for aggregation 38 | interval time.Duration 39 | 40 | // sema is a counting semaphore to ensure the number of active aggregating function 41 | // does not exceed the limit. 42 | sema chan struct{} 43 | } 44 | 45 | type aggregatorParams struct { 46 | logger *log.Logger 47 | broker base.Broker 48 | queues []string 49 | gracePeriod time.Duration 50 | maxDelay time.Duration 51 | maxSize int 52 | groupAggregator GroupAggregator 53 | } 54 | 55 | const ( 56 | // Maximum number of aggregation checks in flight concurrently. 57 | maxConcurrentAggregationChecks = 3 58 | 59 | // Default interval used for aggregation checks. If the provided gracePeriod is less than 60 | // the default, use the gracePeriod. 61 | defaultAggregationCheckInterval = 7 * time.Second 62 | ) 63 | 64 | func newAggregator(params aggregatorParams) *aggregator { 65 | interval := defaultAggregationCheckInterval 66 | if params.gracePeriod < interval { 67 | interval = params.gracePeriod 68 | } 69 | return &aggregator{ 70 | logger: params.logger, 71 | broker: params.broker, 72 | client: &Client{broker: params.broker}, 73 | done: make(chan struct{}), 74 | queues: params.queues, 75 | gracePeriod: params.gracePeriod, 76 | maxDelay: params.maxDelay, 77 | maxSize: params.maxSize, 78 | ga: params.groupAggregator, 79 | sema: make(chan struct{}, maxConcurrentAggregationChecks), 80 | interval: interval, 81 | } 82 | } 83 | 84 | func (a *aggregator) shutdown() { 85 | if a.ga == nil { 86 | return 87 | } 88 | a.logger.Debug("Aggregator shutting down...") 89 | // Signal the aggregator goroutine to stop. 90 | a.done <- struct{}{} 91 | } 92 | 93 | func (a *aggregator) start(wg *sync.WaitGroup) { 94 | if a.ga == nil { 95 | return 96 | } 97 | wg.Add(1) 98 | go func() { 99 | defer wg.Done() 100 | ticker := time.NewTicker(a.interval) 101 | for { 102 | select { 103 | case <-a.done: 104 | a.logger.Debug("Waiting for all aggregation checks to finish...") 105 | // block until all aggregation checks released the token 106 | for i := 0; i < cap(a.sema); i++ { 107 | a.sema <- struct{}{} 108 | } 109 | a.logger.Debug("Aggregator done") 110 | ticker.Stop() 111 | return 112 | case t := <-ticker.C: 113 | a.exec(t) 114 | } 115 | } 116 | }() 117 | } 118 | 119 | func (a *aggregator) exec(t time.Time) { 120 | select { 121 | case a.sema <- struct{}{}: // acquire token 122 | go a.aggregate(t) 123 | default: 124 | // If the semaphore blocks, then we are currently running max number of 125 | // aggregation checks. Skip this round and log warning. 126 | a.logger.Warnf("Max number of aggregation checks in flight. Skipping") 127 | } 128 | } 129 | 130 | func (a *aggregator) aggregate(t time.Time) { 131 | defer func() { <-a.sema /* release token */ }() 132 | for _, qname := range a.queues { 133 | groups, err := a.broker.ListGroups(qname) 134 | if err != nil { 135 | a.logger.Errorf("Failed to list groups in queue: %q", qname) 136 | continue 137 | } 138 | for _, gname := range groups { 139 | aggregationSetID, err := a.broker.AggregationCheck( 140 | qname, gname, t, a.gracePeriod, a.maxDelay, a.maxSize) 141 | if err != nil { 142 | a.logger.Errorf("Failed to run aggregation check: queue=%q group=%q", qname, gname) 143 | continue 144 | } 145 | if aggregationSetID == "" { 146 | a.logger.Debugf("No aggregation needed at this time: queue=%q group=%q", qname, gname) 147 | continue 148 | } 149 | 150 | // Aggregate and enqueue. 151 | msgs, deadline, err := a.broker.ReadAggregationSet(qname, gname, aggregationSetID) 152 | if err != nil { 153 | a.logger.Errorf("Failed to read aggregation set: queue=%q, group=%q, setID=%q", 154 | qname, gname, aggregationSetID) 155 | continue 156 | } 157 | tasks := make([]*Task, len(msgs)) 158 | for i, m := range msgs { 159 | tasks[i] = NewTask(m.Type, m.Payload) 160 | } 161 | aggregatedTask := a.ga.Aggregate(gname, tasks) 162 | ctx, cancel := context.WithDeadline(context.Background(), deadline) 163 | if _, err := a.client.EnqueueContext(ctx, aggregatedTask, Queue(qname)); err != nil { 164 | a.logger.Errorf("Failed to enqueue aggregated task (queue=%q, group=%q, setID=%q): %v", 165 | qname, gname, aggregationSetID, err) 166 | cancel() 167 | continue 168 | } 169 | if err := a.broker.DeleteAggregationSet(ctx, qname, gname, aggregationSetID); err != nil { 170 | a.logger.Warnf("Failed to delete aggregation set: queue=%q, group=%q, setID=%q", 171 | qname, gname, aggregationSetID) 172 | } 173 | cancel() 174 | } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /aggregator_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/hibiken/asynq/internal/base" 14 | "github.com/hibiken/asynq/internal/rdb" 15 | h "github.com/hibiken/asynq/internal/testutil" 16 | ) 17 | 18 | func TestAggregator(t *testing.T) { 19 | r := setup(t) 20 | defer r.Close() 21 | rdbClient := rdb.NewRDB(r) 22 | client := Client{broker: rdbClient} 23 | 24 | tests := []struct { 25 | desc string 26 | gracePeriod time.Duration 27 | maxDelay time.Duration 28 | maxSize int 29 | aggregateFunc func(gname string, tasks []*Task) *Task 30 | tasks []*Task // tasks to enqueue 31 | enqueueFrequency time.Duration // time between one enqueue event to another 32 | waitTime time.Duration // time to wait 33 | wantGroups map[string]map[string][]base.Z 34 | wantPending map[string][]*base.TaskMessage 35 | }{ 36 | { 37 | desc: "group older than the grace period should be aggregated", 38 | gracePeriod: 1 * time.Second, 39 | maxDelay: 0, // no maxdelay limit 40 | maxSize: 0, // no maxsize limit 41 | aggregateFunc: func(gname string, tasks []*Task) *Task { 42 | return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated 43 | }, 44 | tasks: []*Task{ 45 | NewTask("task1", nil, Group("mygroup")), 46 | NewTask("task2", nil, Group("mygroup")), 47 | NewTask("task3", nil, Group("mygroup")), 48 | }, 49 | enqueueFrequency: 300 * time.Millisecond, 50 | waitTime: 3 * time.Second, 51 | wantGroups: map[string]map[string][]base.Z{ 52 | "default": { 53 | "mygroup": {}, 54 | }, 55 | }, 56 | wantPending: map[string][]*base.TaskMessage{ 57 | "default": { 58 | h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(3).Build(), 59 | }, 60 | }, 61 | }, 62 | { 63 | desc: "group older than the max-delay should be aggregated", 64 | gracePeriod: 2 * time.Second, 65 | maxDelay: 4 * time.Second, 66 | maxSize: 0, // no maxsize limit 67 | aggregateFunc: func(gname string, tasks []*Task) *Task { 68 | return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated 69 | }, 70 | tasks: []*Task{ 71 | NewTask("task1", nil, Group("mygroup")), // time 0 72 | NewTask("task2", nil, Group("mygroup")), // time 1s 73 | NewTask("task3", nil, Group("mygroup")), // time 2s 74 | NewTask("task4", nil, Group("mygroup")), // time 3s 75 | }, 76 | enqueueFrequency: 1 * time.Second, 77 | waitTime: 4 * time.Second, 78 | wantGroups: map[string]map[string][]base.Z{ 79 | "default": { 80 | "mygroup": {}, 81 | }, 82 | }, 83 | wantPending: map[string][]*base.TaskMessage{ 84 | "default": { 85 | h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(4).Build(), 86 | }, 87 | }, 88 | }, 89 | { 90 | desc: "group reached the max-size should be aggregated", 91 | gracePeriod: 1 * time.Minute, 92 | maxDelay: 0, // no maxdelay limit 93 | maxSize: 5, 94 | aggregateFunc: func(gname string, tasks []*Task) *Task { 95 | return NewTask(gname, nil, MaxRetry(len(tasks))) // use max retry to see how many tasks were aggregated 96 | }, 97 | tasks: []*Task{ 98 | NewTask("task1", nil, Group("mygroup")), 99 | NewTask("task2", nil, Group("mygroup")), 100 | NewTask("task3", nil, Group("mygroup")), 101 | NewTask("task4", nil, Group("mygroup")), 102 | NewTask("task5", nil, Group("mygroup")), 103 | }, 104 | enqueueFrequency: 300 * time.Millisecond, 105 | waitTime: defaultAggregationCheckInterval * 2, 106 | wantGroups: map[string]map[string][]base.Z{ 107 | "default": { 108 | "mygroup": {}, 109 | }, 110 | }, 111 | wantPending: map[string][]*base.TaskMessage{ 112 | "default": { 113 | h.NewTaskMessageBuilder().SetType("mygroup").SetRetry(5).Build(), 114 | }, 115 | }, 116 | }, 117 | } 118 | 119 | for _, tc := range tests { 120 | h.FlushDB(t, r) 121 | 122 | aggregator := newAggregator(aggregatorParams{ 123 | logger: testLogger, 124 | broker: rdbClient, 125 | queues: []string{"default"}, 126 | gracePeriod: tc.gracePeriod, 127 | maxDelay: tc.maxDelay, 128 | maxSize: tc.maxSize, 129 | groupAggregator: GroupAggregatorFunc(tc.aggregateFunc), 130 | }) 131 | 132 | var wg sync.WaitGroup 133 | aggregator.start(&wg) 134 | 135 | for _, task := range tc.tasks { 136 | if _, err := client.Enqueue(task); err != nil { 137 | t.Errorf("%s: Client Enqueue failed: %v", tc.desc, err) 138 | aggregator.shutdown() 139 | wg.Wait() 140 | continue 141 | } 142 | time.Sleep(tc.enqueueFrequency) 143 | } 144 | 145 | time.Sleep(tc.waitTime) 146 | 147 | for qname, groups := range tc.wantGroups { 148 | for gname, want := range groups { 149 | gotGroup := h.GetGroupEntries(t, r, qname, gname) 150 | if diff := cmp.Diff(want, gotGroup, h.SortZSetEntryOpt); diff != "" { 151 | t.Errorf("%s: mismatch found in %q; (-want,+got)\n%s", tc.desc, base.GroupKey(qname, gname), diff) 152 | } 153 | } 154 | } 155 | 156 | for qname, want := range tc.wantPending { 157 | gotPending := h.GetPendingMessages(t, r, qname) 158 | if diff := cmp.Diff(want, gotPending, h.SortMsgOpt, h.IgnoreIDOpt); diff != "" { 159 | t.Errorf("%s: mismatch found in %q; (-want,+got)\n%s", tc.desc, base.PendingKey(qname), diff) 160 | } 161 | } 162 | aggregator.shutdown() 163 | wg.Wait() 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /asynq_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "crypto/tls" 9 | "flag" 10 | "sort" 11 | "strings" 12 | "testing" 13 | 14 | "github.com/redis/go-redis/v9" 15 | "github.com/google/go-cmp/cmp" 16 | "github.com/google/go-cmp/cmp/cmpopts" 17 | "github.com/hibiken/asynq/internal/log" 18 | h "github.com/hibiken/asynq/internal/testutil" 19 | ) 20 | 21 | //============================================================================ 22 | // This file defines helper functions and variables used in other test files. 23 | //============================================================================ 24 | 25 | // variables used for package testing. 26 | var ( 27 | redisAddr string 28 | redisDB int 29 | 30 | useRedisCluster bool 31 | redisClusterAddrs string // comma-separated list of host:port 32 | 33 | testLogLevel = FatalLevel 34 | ) 35 | 36 | var testLogger *log.Logger 37 | 38 | func init() { 39 | flag.StringVar(&redisAddr, "redis_addr", "localhost:6379", "redis address to use in testing") 40 | flag.IntVar(&redisDB, "redis_db", 14, "redis db number to use in testing") 41 | flag.BoolVar(&useRedisCluster, "redis_cluster", false, "use redis cluster as a broker in testing") 42 | flag.StringVar(&redisClusterAddrs, "redis_cluster_addrs", "localhost:7000,localhost:7001,localhost:7002", "comma separated list of redis server addresses") 43 | flag.Var(&testLogLevel, "loglevel", "log level to use in testing") 44 | 45 | testLogger = log.NewLogger(nil) 46 | testLogger.SetLevel(toInternalLogLevel(testLogLevel)) 47 | } 48 | 49 | func setup(tb testing.TB) (r redis.UniversalClient) { 50 | tb.Helper() 51 | if useRedisCluster { 52 | addrs := strings.Split(redisClusterAddrs, ",") 53 | if len(addrs) == 0 { 54 | tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.") 55 | } 56 | r = redis.NewClusterClient(&redis.ClusterOptions{ 57 | Addrs: addrs, 58 | }) 59 | } else { 60 | r = redis.NewClient(&redis.Options{ 61 | Addr: redisAddr, 62 | DB: redisDB, 63 | }) 64 | } 65 | // Start each test with a clean slate. 66 | h.FlushDB(tb, r) 67 | return r 68 | } 69 | 70 | func getRedisConnOpt(tb testing.TB) RedisConnOpt { 71 | tb.Helper() 72 | if useRedisCluster { 73 | addrs := strings.Split(redisClusterAddrs, ",") 74 | if len(addrs) == 0 { 75 | tb.Fatal("No redis cluster addresses provided. Please set addresses using --redis_cluster_addrs flag.") 76 | } 77 | return RedisClusterClientOpt{ 78 | Addrs: addrs, 79 | } 80 | } 81 | return RedisClientOpt{ 82 | Addr: redisAddr, 83 | DB: redisDB, 84 | } 85 | } 86 | 87 | var sortTaskOpt = cmp.Transformer("SortMsg", func(in []*Task) []*Task { 88 | out := append([]*Task(nil), in...) // Copy input to avoid mutating it 89 | sort.Slice(out, func(i, j int) bool { 90 | return out[i].Type() < out[j].Type() 91 | }) 92 | return out 93 | }) 94 | 95 | func TestParseRedisURI(t *testing.T) { 96 | tests := []struct { 97 | uri string 98 | want RedisConnOpt 99 | }{ 100 | { 101 | "redis://localhost:6379", 102 | RedisClientOpt{Addr: "localhost:6379"}, 103 | }, 104 | { 105 | "rediss://localhost:6379", 106 | RedisClientOpt{Addr: "localhost:6379", TLSConfig: &tls.Config{ServerName: "localhost"}}, 107 | }, 108 | { 109 | "redis://localhost:6379/3", 110 | RedisClientOpt{Addr: "localhost:6379", DB: 3}, 111 | }, 112 | { 113 | "redis://:mypassword@localhost:6379", 114 | RedisClientOpt{Addr: "localhost:6379", Password: "mypassword"}, 115 | }, 116 | { 117 | "redis://:mypassword@127.0.0.1:6379/11", 118 | RedisClientOpt{Addr: "127.0.0.1:6379", Password: "mypassword", DB: 11}, 119 | }, 120 | { 121 | "redis-socket:///var/run/redis/redis.sock", 122 | RedisClientOpt{Network: "unix", Addr: "/var/run/redis/redis.sock"}, 123 | }, 124 | { 125 | "redis-socket://:mypassword@/var/run/redis/redis.sock", 126 | RedisClientOpt{Network: "unix", Addr: "/var/run/redis/redis.sock", Password: "mypassword"}, 127 | }, 128 | { 129 | "redis-socket:///var/run/redis/redis.sock?db=7", 130 | RedisClientOpt{Network: "unix", Addr: "/var/run/redis/redis.sock", DB: 7}, 131 | }, 132 | { 133 | "redis-socket://:mypassword@/var/run/redis/redis.sock?db=12", 134 | RedisClientOpt{Network: "unix", Addr: "/var/run/redis/redis.sock", Password: "mypassword", DB: 12}, 135 | }, 136 | { 137 | "redis-sentinel://localhost:5000,localhost:5001,localhost:5002?master=mymaster", 138 | RedisFailoverClientOpt{ 139 | MasterName: "mymaster", 140 | SentinelAddrs: []string{"localhost:5000", "localhost:5001", "localhost:5002"}, 141 | }, 142 | }, 143 | { 144 | "redis-sentinel://:mypassword@localhost:5000,localhost:5001,localhost:5002?master=mymaster", 145 | RedisFailoverClientOpt{ 146 | MasterName: "mymaster", 147 | SentinelAddrs: []string{"localhost:5000", "localhost:5001", "localhost:5002"}, 148 | SentinelPassword: "mypassword", 149 | }, 150 | }, 151 | } 152 | 153 | for _, tc := range tests { 154 | got, err := ParseRedisURI(tc.uri) 155 | if err != nil { 156 | t.Errorf("ParseRedisURI(%q) returned an error: %v", tc.uri, err) 157 | continue 158 | } 159 | 160 | if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreUnexported(tls.Config{})); diff != "" { 161 | t.Errorf("ParseRedisURI(%q) = %+v, want %+v\n(-want,+got)\n%s", tc.uri, got, tc.want, diff) 162 | } 163 | } 164 | } 165 | 166 | func TestParseRedisURIErrors(t *testing.T) { 167 | tests := []struct { 168 | desc string 169 | uri string 170 | }{ 171 | { 172 | "unsupported scheme", 173 | "rdb://localhost:6379", 174 | }, 175 | { 176 | "missing scheme", 177 | "localhost:6379", 178 | }, 179 | { 180 | "multiple db numbers", 181 | "redis://localhost:6379/1,2,3", 182 | }, 183 | { 184 | "missing path for socket connection", 185 | "redis-socket://?db=one", 186 | }, 187 | { 188 | "non integer for db numbers for socket", 189 | "redis-socket:///some/path/to/redis?db=one", 190 | }, 191 | } 192 | 193 | for _, tc := range tests { 194 | _, err := ParseRedisURI(tc.uri) 195 | if err == nil { 196 | t.Errorf("%s: ParseRedisURI(%q) succeeded for malformed input, want error", 197 | tc.desc, tc.uri) 198 | } 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /benchmark_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "encoding/json" 10 | "fmt" 11 | "sync" 12 | "testing" 13 | "time" 14 | 15 | h "github.com/hibiken/asynq/internal/testutil" 16 | ) 17 | 18 | // Creates a new task of type "task" with payload {"data": n}. 19 | func makeTask(n int) *Task { 20 | b, err := json.Marshal(map[string]int{"data": n}) 21 | if err != nil { 22 | panic(err) 23 | } 24 | return NewTask(fmt.Sprintf("task%d", n), b) 25 | } 26 | 27 | // Simple E2E Benchmark testing with no scheduled tasks and retries. 28 | func BenchmarkEndToEndSimple(b *testing.B) { 29 | const count = 100000 30 | for n := 0; n < b.N; n++ { 31 | b.StopTimer() // begin setup 32 | setup(b) 33 | redis := getRedisConnOpt(b) 34 | client := NewClient(redis) 35 | srv := NewServer(redis, Config{ 36 | Concurrency: 10, 37 | RetryDelayFunc: func(n int, err error, t *Task) time.Duration { 38 | return time.Second 39 | }, 40 | LogLevel: testLogLevel, 41 | }) 42 | // Create a bunch of tasks 43 | for i := 0; i < count; i++ { 44 | if _, err := client.Enqueue(makeTask(i)); err != nil { 45 | b.Fatalf("could not enqueue a task: %v", err) 46 | } 47 | } 48 | client.Close() 49 | 50 | var wg sync.WaitGroup 51 | wg.Add(count) 52 | handler := func(ctx context.Context, t *Task) error { 53 | wg.Done() 54 | return nil 55 | } 56 | b.StartTimer() // end setup 57 | 58 | _ = srv.Start(HandlerFunc(handler)) 59 | wg.Wait() 60 | 61 | b.StopTimer() // begin teardown 62 | srv.Stop() 63 | b.StartTimer() // end teardown 64 | } 65 | } 66 | 67 | // E2E benchmark with scheduled tasks and retries. 68 | func BenchmarkEndToEnd(b *testing.B) { 69 | const count = 100000 70 | for n := 0; n < b.N; n++ { 71 | b.StopTimer() // begin setup 72 | setup(b) 73 | redis := getRedisConnOpt(b) 74 | client := NewClient(redis) 75 | srv := NewServer(redis, Config{ 76 | Concurrency: 10, 77 | RetryDelayFunc: func(n int, err error, t *Task) time.Duration { 78 | return time.Second 79 | }, 80 | LogLevel: testLogLevel, 81 | }) 82 | // Create a bunch of tasks 83 | for i := 0; i < count; i++ { 84 | if _, err := client.Enqueue(makeTask(i)); err != nil { 85 | b.Fatalf("could not enqueue a task: %v", err) 86 | } 87 | } 88 | for i := 0; i < count; i++ { 89 | if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil { 90 | b.Fatalf("could not enqueue a task: %v", err) 91 | } 92 | } 93 | client.Close() 94 | 95 | var wg sync.WaitGroup 96 | wg.Add(count * 2) 97 | handler := func(ctx context.Context, t *Task) error { 98 | var p map[string]int 99 | if err := json.Unmarshal(t.Payload(), &p); err != nil { 100 | b.Logf("internal error: %v", err) 101 | } 102 | n, ok := p["data"] 103 | if !ok { 104 | n = 1 105 | b.Logf("internal error: could not get data from payload") 106 | } 107 | retried, ok := GetRetryCount(ctx) 108 | if !ok { 109 | b.Logf("internal error: could not get retry count from context") 110 | } 111 | // Fail 1% of tasks for the first attempt. 112 | if retried == 0 && n%100 == 0 { 113 | return fmt.Errorf(":(") 114 | } 115 | wg.Done() 116 | return nil 117 | } 118 | b.StartTimer() // end setup 119 | 120 | _ = srv.Start(HandlerFunc(handler)) 121 | wg.Wait() 122 | 123 | b.StopTimer() // begin teardown 124 | srv.Stop() 125 | b.StartTimer() // end teardown 126 | } 127 | } 128 | 129 | // Simple E2E Benchmark testing with no scheduled tasks and retries with multiple queues. 130 | func BenchmarkEndToEndMultipleQueues(b *testing.B) { 131 | // number of tasks to create for each queue 132 | const ( 133 | highCount = 20000 134 | defaultCount = 20000 135 | lowCount = 20000 136 | ) 137 | for n := 0; n < b.N; n++ { 138 | b.StopTimer() // begin setup 139 | setup(b) 140 | redis := getRedisConnOpt(b) 141 | client := NewClient(redis) 142 | srv := NewServer(redis, Config{ 143 | Concurrency: 10, 144 | Queues: map[string]int{ 145 | "high": 6, 146 | "default": 3, 147 | "low": 1, 148 | }, 149 | LogLevel: testLogLevel, 150 | }) 151 | // Create a bunch of tasks 152 | for i := 0; i < highCount; i++ { 153 | if _, err := client.Enqueue(makeTask(i), Queue("high")); err != nil { 154 | b.Fatalf("could not enqueue a task: %v", err) 155 | } 156 | } 157 | for i := 0; i < defaultCount; i++ { 158 | if _, err := client.Enqueue(makeTask(i)); err != nil { 159 | b.Fatalf("could not enqueue a task: %v", err) 160 | } 161 | } 162 | for i := 0; i < lowCount; i++ { 163 | if _, err := client.Enqueue(makeTask(i), Queue("low")); err != nil { 164 | b.Fatalf("could not enqueue a task: %v", err) 165 | } 166 | } 167 | client.Close() 168 | 169 | var wg sync.WaitGroup 170 | wg.Add(highCount + defaultCount + lowCount) 171 | handler := func(ctx context.Context, t *Task) error { 172 | wg.Done() 173 | return nil 174 | } 175 | b.StartTimer() // end setup 176 | 177 | _ = srv.Start(HandlerFunc(handler)) 178 | wg.Wait() 179 | 180 | b.StopTimer() // begin teardown 181 | srv.Stop() 182 | b.StartTimer() // end teardown 183 | } 184 | } 185 | 186 | // E2E benchmark to check client enqueue operation performs correctly, 187 | // while server is busy processing tasks. 188 | func BenchmarkClientWhileServerRunning(b *testing.B) { 189 | const count = 10000 190 | for n := 0; n < b.N; n++ { 191 | b.StopTimer() // begin setup 192 | setup(b) 193 | redis := getRedisConnOpt(b) 194 | client := NewClient(redis) 195 | srv := NewServer(redis, Config{ 196 | Concurrency: 10, 197 | RetryDelayFunc: func(n int, err error, t *Task) time.Duration { 198 | return time.Second 199 | }, 200 | LogLevel: testLogLevel, 201 | }) 202 | // Enqueue 10,000 tasks. 203 | for i := 0; i < count; i++ { 204 | if _, err := client.Enqueue(makeTask(i)); err != nil { 205 | b.Fatalf("could not enqueue a task: %v", err) 206 | } 207 | } 208 | // Schedule 10,000 tasks. 209 | for i := 0; i < count; i++ { 210 | if _, err := client.Enqueue(makeTask(i), ProcessIn(1*time.Second)); err != nil { 211 | b.Fatalf("could not enqueue a task: %v", err) 212 | } 213 | } 214 | 215 | handler := func(ctx context.Context, t *Task) error { 216 | return nil 217 | } 218 | _ = srv.Start(HandlerFunc(handler)) 219 | 220 | b.StartTimer() // end setup 221 | 222 | b.Log("Starting enqueueing") 223 | enqueued := 0 224 | for enqueued < 100000 { 225 | t := NewTask(fmt.Sprintf("enqueued%d", enqueued), h.JSON(map[string]interface{}{"data": enqueued})) 226 | if _, err := client.Enqueue(t); err != nil { 227 | b.Logf("could not enqueue task %d: %v", enqueued, err) 228 | continue 229 | } 230 | enqueued++ 231 | } 232 | b.Logf("Finished enqueueing %d tasks", enqueued) 233 | 234 | b.StopTimer() // begin teardown 235 | srv.Stop() 236 | client.Close() 237 | b.StartTimer() // end teardown 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /context.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | 10 | asynqcontext "github.com/hibiken/asynq/internal/context" 11 | ) 12 | 13 | // GetTaskID extracts a task ID from a context, if any. 14 | // 15 | // ID of a task is guaranteed to be unique. 16 | // ID of a task doesn't change if the task is being retried. 17 | func GetTaskID(ctx context.Context) (id string, ok bool) { 18 | return asynqcontext.GetTaskID(ctx) 19 | } 20 | 21 | // GetRetryCount extracts retry count from a context, if any. 22 | // 23 | // Return value n indicates the number of times associated task has been 24 | // retried so far. 25 | func GetRetryCount(ctx context.Context) (n int, ok bool) { 26 | return asynqcontext.GetRetryCount(ctx) 27 | } 28 | 29 | // GetMaxRetry extracts maximum retry from a context, if any. 30 | // 31 | // Return value n indicates the maximum number of times the associated task 32 | // can be retried if ProcessTask returns a non-nil error. 33 | func GetMaxRetry(ctx context.Context) (n int, ok bool) { 34 | return asynqcontext.GetMaxRetry(ctx) 35 | } 36 | 37 | // GetQueueName extracts queue name from a context, if any. 38 | // 39 | // Return value queue indicates which queue the task was pulled from. 40 | func GetQueueName(ctx context.Context) (queue string, ok bool) { 41 | return asynqcontext.GetQueueName(ctx) 42 | } 43 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | /* 6 | Package asynq provides a framework for Redis based distrubted task queue. 7 | 8 | Asynq uses Redis as a message broker. To connect to redis, 9 | specify the connection using one of RedisConnOpt types. 10 | 11 | redisConnOpt = asynq.RedisClientOpt{ 12 | Addr: "127.0.0.1:6379", 13 | Password: "xxxxx", 14 | DB: 2, 15 | } 16 | 17 | The Client is used to enqueue a task. 18 | 19 | 20 | client := asynq.NewClient(redisConnOpt) 21 | 22 | // Task is created with two parameters: its type and payload. 23 | // Payload data is simply an array of bytes. It can be encoded in JSON, Protocol Buffer, Gob, etc. 24 | b, err := json.Marshal(ExamplePayload{UserID: 42}) 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | 29 | task := asynq.NewTask("example", b) 30 | 31 | // Enqueue the task to be processed immediately. 32 | info, err := client.Enqueue(task) 33 | 34 | // Schedule the task to be processed after one minute. 35 | info, err = client.Enqueue(t, asynq.ProcessIn(1*time.Minute)) 36 | 37 | The Server is used to run the task processing workers with a given 38 | handler. 39 | srv := asynq.NewServer(redisConnOpt, asynq.Config{ 40 | Concurrency: 10, 41 | }) 42 | 43 | if err := srv.Run(handler); err != nil { 44 | log.Fatal(err) 45 | } 46 | 47 | Handler is an interface type with a method which 48 | takes a task and returns an error. Handler should return nil if 49 | the processing is successful, otherwise return a non-nil error. 50 | If handler panics or returns a non-nil error, the task will be retried in the future. 51 | 52 | Example of a type that implements the Handler interface. 53 | type TaskHandler struct { 54 | // ... 55 | } 56 | 57 | func (h *TaskHandler) ProcessTask(ctx context.Context, task *asynq.Task) error { 58 | switch task.Type { 59 | case "example": 60 | var data ExamplePayload 61 | if err := json.Unmarshal(task.Payload(), &data); err != nil { 62 | return err 63 | } 64 | // perform task with the data 65 | 66 | default: 67 | return fmt.Errorf("unexpected task type %q", task.Type) 68 | } 69 | return nil 70 | } 71 | */ 72 | package asynq 73 | -------------------------------------------------------------------------------- /docs/assets/asynq_history.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/asynq_history.gif -------------------------------------------------------------------------------- /docs/assets/asynq_ps.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/asynq_ps.gif -------------------------------------------------------------------------------- /docs/assets/asynq_stats.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/asynq_stats.gif -------------------------------------------------------------------------------- /docs/assets/asynqmon-queues-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/asynqmon-queues-view.png -------------------------------------------------------------------------------- /docs/assets/asynqmon-task-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/asynqmon-task-view.png -------------------------------------------------------------------------------- /docs/assets/cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/cluster.png -------------------------------------------------------------------------------- /docs/assets/dash.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/dash.gif -------------------------------------------------------------------------------- /docs/assets/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/demo.gif -------------------------------------------------------------------------------- /docs/assets/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/overview.png -------------------------------------------------------------------------------- /docs/assets/task-queue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hibiken/asynq/c327bc40a28e4db45195cfe082d88faa808ce87d/docs/assets/task-queue.png -------------------------------------------------------------------------------- /example_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq_test 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "log" 11 | "os" 12 | "os/signal" 13 | "time" 14 | 15 | "github.com/hibiken/asynq" 16 | "golang.org/x/sys/unix" 17 | ) 18 | 19 | func ExampleServer_Run() { 20 | srv := asynq.NewServer( 21 | asynq.RedisClientOpt{Addr: ":6379"}, 22 | asynq.Config{Concurrency: 20}, 23 | ) 24 | 25 | h := asynq.NewServeMux() 26 | // ... Register handlers 27 | 28 | // Run blocks and waits for os signal to terminate the program. 29 | if err := srv.Run(h); err != nil { 30 | log.Fatal(err) 31 | } 32 | } 33 | 34 | func ExampleServer_Shutdown() { 35 | srv := asynq.NewServer( 36 | asynq.RedisClientOpt{Addr: ":6379"}, 37 | asynq.Config{Concurrency: 20}, 38 | ) 39 | 40 | h := asynq.NewServeMux() 41 | // ... Register handlers 42 | 43 | if err := srv.Start(h); err != nil { 44 | log.Fatal(err) 45 | } 46 | 47 | sigs := make(chan os.Signal, 1) 48 | signal.Notify(sigs, unix.SIGTERM, unix.SIGINT) 49 | <-sigs // wait for termination signal 50 | 51 | srv.Shutdown() 52 | } 53 | 54 | func ExampleServer_Stop() { 55 | srv := asynq.NewServer( 56 | asynq.RedisClientOpt{Addr: ":6379"}, 57 | asynq.Config{Concurrency: 20}, 58 | ) 59 | 60 | h := asynq.NewServeMux() 61 | // ... Register handlers 62 | 63 | if err := srv.Start(h); err != nil { 64 | log.Fatal(err) 65 | } 66 | 67 | sigs := make(chan os.Signal, 1) 68 | signal.Notify(sigs, unix.SIGTERM, unix.SIGINT, unix.SIGTSTP) 69 | // Handle SIGTERM, SIGINT to exit the program. 70 | // Handle SIGTSTP to stop processing new tasks. 71 | for { 72 | s := <-sigs 73 | if s == unix.SIGTSTP { 74 | srv.Stop() // stop processing new tasks 75 | continue 76 | } 77 | break // received SIGTERM or SIGINT signal 78 | } 79 | 80 | srv.Shutdown() 81 | } 82 | 83 | func ExampleScheduler() { 84 | scheduler := asynq.NewScheduler( 85 | asynq.RedisClientOpt{Addr: ":6379"}, 86 | &asynq.SchedulerOpts{Location: time.Local}, 87 | ) 88 | 89 | if _, err := scheduler.Register("* * * * *", asynq.NewTask("task1", nil)); err != nil { 90 | log.Fatal(err) 91 | } 92 | if _, err := scheduler.Register("@every 30s", asynq.NewTask("task2", nil)); err != nil { 93 | log.Fatal(err) 94 | } 95 | 96 | // Run blocks and waits for os signal to terminate the program. 97 | if err := scheduler.Run(); err != nil { 98 | log.Fatal(err) 99 | } 100 | } 101 | 102 | func ExampleParseRedisURI() { 103 | rconn, err := asynq.ParseRedisURI("redis://localhost:6379/10") 104 | if err != nil { 105 | log.Fatal(err) 106 | } 107 | r, ok := rconn.(asynq.RedisClientOpt) 108 | if !ok { 109 | log.Fatal("unexpected type") 110 | } 111 | fmt.Println(r.Addr) 112 | fmt.Println(r.DB) 113 | // Output: 114 | // localhost:6379 115 | // 10 116 | } 117 | 118 | func ExampleResultWriter() { 119 | // ResultWriter is only accessible in Handler. 120 | h := func(ctx context.Context, task *asynq.Task) error { 121 | // .. do task processing work 122 | 123 | res := []byte("task result data") 124 | n, err := task.ResultWriter().Write(res) // implements io.Writer 125 | if err != nil { 126 | return fmt.Errorf("failed to write task result: %v", err) 127 | } 128 | log.Printf(" %d bytes written", n) 129 | return nil 130 | } 131 | 132 | _ = h 133 | } 134 | -------------------------------------------------------------------------------- /forwarder.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/hibiken/asynq/internal/base" 12 | "github.com/hibiken/asynq/internal/log" 13 | ) 14 | 15 | // A forwarder is responsible for moving scheduled and retry tasks to pending state 16 | // so that the tasks get processed by the workers. 17 | type forwarder struct { 18 | logger *log.Logger 19 | broker base.Broker 20 | 21 | // channel to communicate back to the long running "forwarder" goroutine. 22 | done chan struct{} 23 | 24 | // list of queue names to check and enqueue. 25 | queues []string 26 | 27 | // poll interval on average 28 | avgInterval time.Duration 29 | } 30 | 31 | type forwarderParams struct { 32 | logger *log.Logger 33 | broker base.Broker 34 | queues []string 35 | interval time.Duration 36 | } 37 | 38 | func newForwarder(params forwarderParams) *forwarder { 39 | return &forwarder{ 40 | logger: params.logger, 41 | broker: params.broker, 42 | done: make(chan struct{}), 43 | queues: params.queues, 44 | avgInterval: params.interval, 45 | } 46 | } 47 | 48 | func (f *forwarder) shutdown() { 49 | f.logger.Debug("Forwarder shutting down...") 50 | // Signal the forwarder goroutine to stop polling. 51 | f.done <- struct{}{} 52 | } 53 | 54 | // start starts the "forwarder" goroutine. 55 | func (f *forwarder) start(wg *sync.WaitGroup) { 56 | wg.Add(1) 57 | go func() { 58 | defer wg.Done() 59 | timer := time.NewTimer(f.avgInterval) 60 | for { 61 | select { 62 | case <-f.done: 63 | f.logger.Debug("Forwarder done") 64 | return 65 | case <-timer.C: 66 | f.exec() 67 | timer.Reset(f.avgInterval) 68 | } 69 | } 70 | }() 71 | } 72 | 73 | func (f *forwarder) exec() { 74 | if err := f.broker.ForwardIfReady(f.queues...); err != nil { 75 | f.logger.Errorf("Failed to forward scheduled tasks: %v", err) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /forwarder_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/hibiken/asynq/internal/base" 14 | "github.com/hibiken/asynq/internal/rdb" 15 | h "github.com/hibiken/asynq/internal/testutil" 16 | ) 17 | 18 | func TestForwarder(t *testing.T) { 19 | r := setup(t) 20 | defer r.Close() 21 | rdbClient := rdb.NewRDB(r) 22 | const pollInterval = time.Second 23 | s := newForwarder(forwarderParams{ 24 | logger: testLogger, 25 | broker: rdbClient, 26 | queues: []string{"default", "critical"}, 27 | interval: pollInterval, 28 | }) 29 | t1 := h.NewTaskMessageWithQueue("gen_thumbnail", nil, "default") 30 | t2 := h.NewTaskMessageWithQueue("send_email", nil, "critical") 31 | t3 := h.NewTaskMessageWithQueue("reindex", nil, "default") 32 | t4 := h.NewTaskMessageWithQueue("sync", nil, "critical") 33 | now := time.Now() 34 | 35 | tests := []struct { 36 | initScheduled map[string][]base.Z // scheduled queue initial state 37 | initRetry map[string][]base.Z // retry queue initial state 38 | initPending map[string][]*base.TaskMessage // default queue initial state 39 | wait time.Duration // wait duration before checking for final state 40 | wantScheduled map[string][]*base.TaskMessage // schedule queue final state 41 | wantRetry map[string][]*base.TaskMessage // retry queue final state 42 | wantPending map[string][]*base.TaskMessage // default queue final state 43 | }{ 44 | { 45 | initScheduled: map[string][]base.Z{ 46 | "default": {{Message: t1, Score: now.Add(time.Hour).Unix()}}, 47 | "critical": {{Message: t2, Score: now.Add(-2 * time.Second).Unix()}}, 48 | }, 49 | initRetry: map[string][]base.Z{ 50 | "default": {{Message: t3, Score: time.Now().Add(-500 * time.Millisecond).Unix()}}, 51 | "critical": {}, 52 | }, 53 | initPending: map[string][]*base.TaskMessage{ 54 | "default": {}, 55 | "critical": {t4}, 56 | }, 57 | wait: pollInterval * 2, 58 | wantScheduled: map[string][]*base.TaskMessage{ 59 | "default": {t1}, 60 | "critical": {}, 61 | }, 62 | wantRetry: map[string][]*base.TaskMessage{ 63 | "default": {}, 64 | "critical": {}, 65 | }, 66 | wantPending: map[string][]*base.TaskMessage{ 67 | "default": {t3}, 68 | "critical": {t2, t4}, 69 | }, 70 | }, 71 | { 72 | initScheduled: map[string][]base.Z{ 73 | "default": { 74 | {Message: t1, Score: now.Unix()}, 75 | {Message: t3, Score: now.Add(-500 * time.Millisecond).Unix()}, 76 | }, 77 | "critical": { 78 | {Message: t2, Score: now.Add(-2 * time.Second).Unix()}, 79 | }, 80 | }, 81 | initRetry: map[string][]base.Z{ 82 | "default": {}, 83 | "critical": {}, 84 | }, 85 | initPending: map[string][]*base.TaskMessage{ 86 | "default": {}, 87 | "critical": {t4}, 88 | }, 89 | wait: pollInterval * 2, 90 | wantScheduled: map[string][]*base.TaskMessage{ 91 | "default": {}, 92 | "critical": {}, 93 | }, 94 | wantRetry: map[string][]*base.TaskMessage{ 95 | "default": {}, 96 | "critical": {}, 97 | }, 98 | wantPending: map[string][]*base.TaskMessage{ 99 | "default": {t1, t3}, 100 | "critical": {t2, t4}, 101 | }, 102 | }, 103 | } 104 | 105 | for _, tc := range tests { 106 | h.FlushDB(t, r) // clean up db before each test case. 107 | h.SeedAllScheduledQueues(t, r, tc.initScheduled) // initialize scheduled queue 108 | h.SeedAllRetryQueues(t, r, tc.initRetry) // initialize retry queue 109 | h.SeedAllPendingQueues(t, r, tc.initPending) // initialize default queue 110 | 111 | var wg sync.WaitGroup 112 | s.start(&wg) 113 | time.Sleep(tc.wait) 114 | s.shutdown() 115 | 116 | for qname, want := range tc.wantScheduled { 117 | gotScheduled := h.GetScheduledMessages(t, r, qname) 118 | if diff := cmp.Diff(want, gotScheduled, h.SortMsgOpt); diff != "" { 119 | t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.ScheduledKey(qname), diff) 120 | } 121 | } 122 | 123 | for qname, want := range tc.wantRetry { 124 | gotRetry := h.GetRetryMessages(t, r, qname) 125 | if diff := cmp.Diff(want, gotRetry, h.SortMsgOpt); diff != "" { 126 | t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.RetryKey(qname), diff) 127 | } 128 | } 129 | 130 | for qname, want := range tc.wantPending { 131 | gotPending := h.GetPendingMessages(t, r, qname) 132 | if diff := cmp.Diff(want, gotPending, h.SortMsgOpt); diff != "" { 133 | t.Errorf("mismatch found in %q after running forwarder: (-want, +got)\n%s", base.PendingKey(qname), diff) 134 | } 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hibiken/asynq 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/google/go-cmp v0.6.0 7 | github.com/google/uuid v1.6.0 8 | github.com/redis/go-redis/v9 v9.7.0 9 | github.com/robfig/cron/v3 v3.0.1 10 | github.com/spf13/cast v1.7.0 11 | go.uber.org/goleak v1.3.0 12 | golang.org/x/sys v0.27.0 13 | golang.org/x/time v0.8.0 14 | google.golang.org/protobuf v1.35.2 15 | ) 16 | 17 | require ( 18 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 19 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 20 | ) 21 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 2 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 3 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 4 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 5 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 6 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 10 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 11 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 12 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 13 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 14 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 15 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 16 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 17 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 18 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 19 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 20 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 21 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 22 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 23 | github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= 24 | github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= 25 | github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= 26 | github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= 27 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 28 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 29 | github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= 30 | github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 31 | github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= 32 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 33 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 34 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 35 | golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= 36 | golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 37 | golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= 38 | golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 39 | google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= 40 | google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 41 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 42 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 43 | -------------------------------------------------------------------------------- /healthcheck.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/hibiken/asynq/internal/base" 12 | "github.com/hibiken/asynq/internal/log" 13 | ) 14 | 15 | // healthchecker is responsible for pinging broker periodically 16 | // and call user provided HeathCheckFunc with the ping result. 17 | type healthchecker struct { 18 | logger *log.Logger 19 | broker base.Broker 20 | 21 | // channel to communicate back to the long running "healthchecker" goroutine. 22 | done chan struct{} 23 | 24 | // interval between healthchecks. 25 | interval time.Duration 26 | 27 | // function to call periodically. 28 | healthcheckFunc func(error) 29 | } 30 | 31 | type healthcheckerParams struct { 32 | logger *log.Logger 33 | broker base.Broker 34 | interval time.Duration 35 | healthcheckFunc func(error) 36 | } 37 | 38 | func newHealthChecker(params healthcheckerParams) *healthchecker { 39 | return &healthchecker{ 40 | logger: params.logger, 41 | broker: params.broker, 42 | done: make(chan struct{}), 43 | interval: params.interval, 44 | healthcheckFunc: params.healthcheckFunc, 45 | } 46 | } 47 | 48 | func (hc *healthchecker) shutdown() { 49 | if hc.healthcheckFunc == nil { 50 | return 51 | } 52 | 53 | hc.logger.Debug("Healthchecker shutting down...") 54 | // Signal the healthchecker goroutine to stop. 55 | hc.done <- struct{}{} 56 | } 57 | 58 | func (hc *healthchecker) start(wg *sync.WaitGroup) { 59 | if hc.healthcheckFunc == nil { 60 | return 61 | } 62 | 63 | wg.Add(1) 64 | go func() { 65 | defer wg.Done() 66 | timer := time.NewTimer(hc.interval) 67 | for { 68 | select { 69 | case <-hc.done: 70 | hc.logger.Debug("Healthchecker done") 71 | timer.Stop() 72 | return 73 | case <-timer.C: 74 | err := hc.broker.Ping() 75 | hc.healthcheckFunc(err) 76 | timer.Reset(hc.interval) 77 | } 78 | } 79 | }() 80 | } 81 | -------------------------------------------------------------------------------- /healthcheck_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/hibiken/asynq/internal/rdb" 13 | "github.com/hibiken/asynq/internal/testbroker" 14 | ) 15 | 16 | func TestHealthChecker(t *testing.T) { 17 | r := setup(t) 18 | defer r.Close() 19 | rdbClient := rdb.NewRDB(r) 20 | 21 | var ( 22 | // mu guards called and e variables. 23 | mu sync.Mutex 24 | called int 25 | e error 26 | ) 27 | checkFn := func(err error) { 28 | mu.Lock() 29 | defer mu.Unlock() 30 | called++ 31 | e = err 32 | } 33 | 34 | hc := newHealthChecker(healthcheckerParams{ 35 | logger: testLogger, 36 | broker: rdbClient, 37 | interval: 1 * time.Second, 38 | healthcheckFunc: checkFn, 39 | }) 40 | 41 | hc.start(&sync.WaitGroup{}) 42 | 43 | time.Sleep(2 * time.Second) 44 | 45 | mu.Lock() 46 | if called == 0 { 47 | t.Errorf("Healthchecker did not call the provided HealthCheckFunc") 48 | } 49 | if e != nil { 50 | t.Errorf("HealthCheckFunc was called with non-nil error: %v", e) 51 | } 52 | mu.Unlock() 53 | 54 | hc.shutdown() 55 | } 56 | 57 | func TestHealthCheckerWhenRedisDown(t *testing.T) { 58 | // Make sure that healthchecker goroutine doesn't panic 59 | // if it cannot connect to redis. 60 | defer func() { 61 | if r := recover(); r != nil { 62 | t.Errorf("panic occurred: %v", r) 63 | } 64 | }() 65 | r := rdb.NewRDB(setup(t)) 66 | defer r.Close() 67 | testBroker := testbroker.NewTestBroker(r) 68 | var ( 69 | // mu guards called and e variables. 70 | mu sync.Mutex 71 | called int 72 | e error 73 | ) 74 | checkFn := func(err error) { 75 | mu.Lock() 76 | defer mu.Unlock() 77 | called++ 78 | e = err 79 | } 80 | 81 | hc := newHealthChecker(healthcheckerParams{ 82 | logger: testLogger, 83 | broker: testBroker, 84 | interval: 1 * time.Second, 85 | healthcheckFunc: checkFn, 86 | }) 87 | 88 | testBroker.Sleep() 89 | hc.start(&sync.WaitGroup{}) 90 | 91 | time.Sleep(2 * time.Second) 92 | 93 | mu.Lock() 94 | if called == 0 { 95 | t.Errorf("Healthchecker did not call the provided HealthCheckFunc") 96 | } 97 | if e == nil { 98 | t.Errorf("HealthCheckFunc was called with nil; want non-nil error") 99 | } 100 | mu.Unlock() 101 | 102 | hc.shutdown() 103 | } 104 | -------------------------------------------------------------------------------- /heartbeat.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "os" 9 | "sync" 10 | "time" 11 | 12 | "github.com/google/uuid" 13 | "github.com/hibiken/asynq/internal/base" 14 | "github.com/hibiken/asynq/internal/log" 15 | "github.com/hibiken/asynq/internal/timeutil" 16 | ) 17 | 18 | // heartbeater is responsible for writing process info to redis periodically to 19 | // indicate that the background worker process is up. 20 | type heartbeater struct { 21 | logger *log.Logger 22 | broker base.Broker 23 | clock timeutil.Clock 24 | 25 | // channel to communicate back to the long running "heartbeater" goroutine. 26 | done chan struct{} 27 | 28 | // interval between heartbeats. 29 | interval time.Duration 30 | 31 | // following fields are initialized at construction time and are immutable. 32 | host string 33 | pid int 34 | serverID string 35 | concurrency int 36 | queues map[string]int 37 | strictPriority bool 38 | 39 | // following fields are mutable and should be accessed only by the 40 | // heartbeater goroutine. In other words, confine these variables 41 | // to this goroutine only. 42 | started time.Time 43 | workers map[string]*workerInfo 44 | 45 | // state is shared with other goroutine but is concurrency safe. 46 | state *serverState 47 | 48 | // channels to receive updates on active workers. 49 | starting <-chan *workerInfo 50 | finished <-chan *base.TaskMessage 51 | } 52 | 53 | type heartbeaterParams struct { 54 | logger *log.Logger 55 | broker base.Broker 56 | interval time.Duration 57 | concurrency int 58 | queues map[string]int 59 | strictPriority bool 60 | state *serverState 61 | starting <-chan *workerInfo 62 | finished <-chan *base.TaskMessage 63 | } 64 | 65 | func newHeartbeater(params heartbeaterParams) *heartbeater { 66 | host, err := os.Hostname() 67 | if err != nil { 68 | host = "unknown-host" 69 | } 70 | 71 | return &heartbeater{ 72 | logger: params.logger, 73 | broker: params.broker, 74 | clock: timeutil.NewRealClock(), 75 | done: make(chan struct{}), 76 | interval: params.interval, 77 | 78 | host: host, 79 | pid: os.Getpid(), 80 | serverID: uuid.New().String(), 81 | concurrency: params.concurrency, 82 | queues: params.queues, 83 | strictPriority: params.strictPriority, 84 | 85 | state: params.state, 86 | workers: make(map[string]*workerInfo), 87 | starting: params.starting, 88 | finished: params.finished, 89 | } 90 | } 91 | 92 | func (h *heartbeater) shutdown() { 93 | h.logger.Debug("Heartbeater shutting down...") 94 | // Signal the heartbeater goroutine to stop. 95 | h.done <- struct{}{} 96 | } 97 | 98 | // A workerInfo holds an active worker information. 99 | type workerInfo struct { 100 | // the task message the worker is processing. 101 | msg *base.TaskMessage 102 | // the time the worker has started processing the message. 103 | started time.Time 104 | // deadline the worker has to finish processing the task by. 105 | deadline time.Time 106 | // lease the worker holds for the task. 107 | lease *base.Lease 108 | } 109 | 110 | func (h *heartbeater) start(wg *sync.WaitGroup) { 111 | wg.Add(1) 112 | go func() { 113 | defer wg.Done() 114 | 115 | h.started = h.clock.Now() 116 | 117 | h.beat() 118 | 119 | timer := time.NewTimer(h.interval) 120 | for { 121 | select { 122 | case <-h.done: 123 | if err := h.broker.ClearServerState(h.host, h.pid, h.serverID); err != nil { 124 | h.logger.Errorf("Failed to clear server state: %v", err) 125 | } 126 | h.logger.Debug("Heartbeater done") 127 | timer.Stop() 128 | return 129 | 130 | case <-timer.C: 131 | h.beat() 132 | timer.Reset(h.interval) 133 | 134 | case w := <-h.starting: 135 | h.workers[w.msg.ID] = w 136 | 137 | case msg := <-h.finished: 138 | delete(h.workers, msg.ID) 139 | } 140 | } 141 | }() 142 | } 143 | 144 | // beat extends lease for workers and writes server/worker info to redis. 145 | func (h *heartbeater) beat() { 146 | h.state.mu.Lock() 147 | srvStatus := h.state.value.String() 148 | h.state.mu.Unlock() 149 | 150 | info := base.ServerInfo{ 151 | Host: h.host, 152 | PID: h.pid, 153 | ServerID: h.serverID, 154 | Concurrency: h.concurrency, 155 | Queues: h.queues, 156 | StrictPriority: h.strictPriority, 157 | Status: srvStatus, 158 | Started: h.started, 159 | ActiveWorkerCount: len(h.workers), 160 | } 161 | 162 | var ws []*base.WorkerInfo 163 | idsByQueue := make(map[string][]string) 164 | for id, w := range h.workers { 165 | ws = append(ws, &base.WorkerInfo{ 166 | Host: h.host, 167 | PID: h.pid, 168 | ServerID: h.serverID, 169 | ID: id, 170 | Type: w.msg.Type, 171 | Queue: w.msg.Queue, 172 | Payload: w.msg.Payload, 173 | Started: w.started, 174 | Deadline: w.deadline, 175 | }) 176 | // Check lease before adding to the set to make sure not to extend the lease if the lease is already expired. 177 | if w.lease.IsValid() { 178 | idsByQueue[w.msg.Queue] = append(idsByQueue[w.msg.Queue], id) 179 | } else { 180 | w.lease.NotifyExpiration() // notify processor if the lease is expired 181 | } 182 | } 183 | 184 | // Note: Set TTL to be long enough so that it won't expire before we write again 185 | // and short enough to expire quickly once the process is shut down or killed. 186 | if err := h.broker.WriteServerState(&info, ws, h.interval*2); err != nil { 187 | h.logger.Errorf("Failed to write server state data: %v", err) 188 | } 189 | 190 | for qname, ids := range idsByQueue { 191 | expirationTime, err := h.broker.ExtendLease(qname, ids...) 192 | if err != nil { 193 | h.logger.Errorf("Failed to extend lease for tasks %v: %v", ids, err) 194 | continue 195 | } 196 | for _, id := range ids { 197 | if l := h.workers[id].lease; !l.Reset(expirationTime) { 198 | h.logger.Warnf("Lease reset failed for %s; lease deadline: %v", id, l.Deadline()) 199 | } 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /internal/context/context.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package context 6 | 7 | import ( 8 | "context" 9 | "time" 10 | 11 | "github.com/hibiken/asynq/internal/base" 12 | ) 13 | 14 | // A taskMetadata holds task scoped data to put in context. 15 | type taskMetadata struct { 16 | id string 17 | maxRetry int 18 | retryCount int 19 | qname string 20 | } 21 | 22 | // ctxKey type is unexported to prevent collisions with context keys defined in 23 | // other packages. 24 | type ctxKey int 25 | 26 | // metadataCtxKey is the context key for the task metadata. 27 | // Its value of zero is arbitrary. 28 | const metadataCtxKey ctxKey = 0 29 | 30 | // New returns a context and cancel function for a given task message. 31 | func New(base context.Context, msg *base.TaskMessage, deadline time.Time) (context.Context, context.CancelFunc) { 32 | metadata := taskMetadata{ 33 | id: msg.ID, 34 | maxRetry: msg.Retry, 35 | retryCount: msg.Retried, 36 | qname: msg.Queue, 37 | } 38 | ctx := context.WithValue(base, metadataCtxKey, metadata) 39 | return context.WithDeadline(ctx, deadline) 40 | } 41 | 42 | // GetTaskID extracts a task ID from a context, if any. 43 | // 44 | // ID of a task is guaranteed to be unique. 45 | // ID of a task doesn't change if the task is being retried. 46 | func GetTaskID(ctx context.Context) (id string, ok bool) { 47 | metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) 48 | if !ok { 49 | return "", false 50 | } 51 | return metadata.id, true 52 | } 53 | 54 | // GetRetryCount extracts retry count from a context, if any. 55 | // 56 | // Return value n indicates the number of times associated task has been 57 | // retried so far. 58 | func GetRetryCount(ctx context.Context) (n int, ok bool) { 59 | metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) 60 | if !ok { 61 | return 0, false 62 | } 63 | return metadata.retryCount, true 64 | } 65 | 66 | // GetMaxRetry extracts maximum retry from a context, if any. 67 | // 68 | // Return value n indicates the maximum number of times the associated task 69 | // can be retried if ProcessTask returns a non-nil error. 70 | func GetMaxRetry(ctx context.Context) (n int, ok bool) { 71 | metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) 72 | if !ok { 73 | return 0, false 74 | } 75 | return metadata.maxRetry, true 76 | } 77 | 78 | // GetQueueName extracts queue name from a context, if any. 79 | // 80 | // Return value qname indicates which queue the task was pulled from. 81 | func GetQueueName(ctx context.Context) (qname string, ok bool) { 82 | metadata, ok := ctx.Value(metadataCtxKey).(taskMetadata) 83 | if !ok { 84 | return "", false 85 | } 86 | return metadata.qname, true 87 | } 88 | -------------------------------------------------------------------------------- /internal/context/context_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package context 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "testing" 11 | "time" 12 | 13 | "github.com/google/go-cmp/cmp" 14 | "github.com/google/uuid" 15 | "github.com/hibiken/asynq/internal/base" 16 | ) 17 | 18 | func TestCreateContextWithFutureDeadline(t *testing.T) { 19 | tests := []struct { 20 | deadline time.Time 21 | }{ 22 | {time.Now().Add(time.Hour)}, 23 | } 24 | 25 | for _, tc := range tests { 26 | msg := &base.TaskMessage{ 27 | Type: "something", 28 | ID: uuid.NewString(), 29 | Payload: nil, 30 | } 31 | 32 | ctx, cancel := New(context.Background(), msg, tc.deadline) 33 | select { 34 | case x := <-ctx.Done(): 35 | t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) 36 | default: 37 | } 38 | 39 | got, ok := ctx.Deadline() 40 | if !ok { 41 | t.Errorf("ctx.Deadline() returned false, want deadline to be set") 42 | } 43 | if !cmp.Equal(tc.deadline, got) { 44 | t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline) 45 | } 46 | 47 | cancel() 48 | 49 | select { 50 | case <-ctx.Done(): 51 | default: 52 | t.Errorf("ctx.Done() blocked, want it to be non-blocking") 53 | } 54 | } 55 | } 56 | 57 | func TestCreateContextWithBaseContext(t *testing.T) { 58 | type ctxKey string 59 | type ctxValue string 60 | var key ctxKey = "key" 61 | var value ctxValue = "value" 62 | 63 | tests := []struct { 64 | baseCtx context.Context 65 | validate func(ctx context.Context, t *testing.T) error 66 | }{ 67 | { 68 | baseCtx: context.WithValue(context.Background(), key, value), 69 | validate: func(ctx context.Context, t *testing.T) error { 70 | got, ok := ctx.Value(key).(ctxValue) 71 | if !ok { 72 | return fmt.Errorf("ctx.Value().(ctxValue) returned false, expected to be true") 73 | } 74 | if want := value; got != want { 75 | return fmt.Errorf("ctx.Value().(ctxValue) returned unknown value (%v), expected to be %s", got, value) 76 | } 77 | return nil 78 | }, 79 | }, 80 | } 81 | 82 | for _, tc := range tests { 83 | msg := &base.TaskMessage{ 84 | Type: "something", 85 | ID: uuid.NewString(), 86 | Payload: nil, 87 | } 88 | 89 | ctx, cancel := New(tc.baseCtx, msg, time.Now().Add(30*time.Minute)) 90 | defer cancel() 91 | 92 | select { 93 | case x := <-ctx.Done(): 94 | t.Errorf("<-ctx.Done() == %v, want nothing (it should block)", x) 95 | default: 96 | } 97 | 98 | if err := tc.validate(ctx, t); err != nil { 99 | t.Errorf("%v", err) 100 | } 101 | } 102 | } 103 | 104 | func TestCreateContextWithPastDeadline(t *testing.T) { 105 | tests := []struct { 106 | deadline time.Time 107 | }{ 108 | {time.Now().Add(-2 * time.Hour)}, 109 | } 110 | 111 | for _, tc := range tests { 112 | msg := &base.TaskMessage{ 113 | Type: "something", 114 | ID: uuid.NewString(), 115 | Payload: nil, 116 | } 117 | 118 | ctx, cancel := New(context.Background(), msg, tc.deadline) 119 | defer cancel() 120 | 121 | select { 122 | case <-ctx.Done(): 123 | default: 124 | t.Errorf("ctx.Done() blocked, want it to be non-blocking") 125 | } 126 | 127 | got, ok := ctx.Deadline() 128 | if !ok { 129 | t.Errorf("ctx.Deadline() returned false, want deadline to be set") 130 | } 131 | if !cmp.Equal(tc.deadline, got) { 132 | t.Errorf("ctx.Deadline() returned %v, want %v", got, tc.deadline) 133 | } 134 | } 135 | } 136 | 137 | func TestGetTaskMetadataFromContext(t *testing.T) { 138 | tests := []struct { 139 | desc string 140 | msg *base.TaskMessage 141 | }{ 142 | {"with zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "default"}}, 143 | {"with non-zero retried message", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 10, Retried: 5, Timeout: 1800, Queue: "default"}}, 144 | {"with custom queue name", &base.TaskMessage{Type: "something", ID: uuid.NewString(), Retry: 25, Retried: 0, Timeout: 1800, Queue: "custom"}}, 145 | } 146 | 147 | for _, tc := range tests { 148 | ctx, cancel := New(context.Background(), tc.msg, time.Now().Add(30*time.Minute)) 149 | defer cancel() 150 | 151 | id, ok := GetTaskID(ctx) 152 | if !ok { 153 | t.Errorf("%s: GetTaskID(ctx) returned ok == false", tc.desc) 154 | } 155 | if ok && id != tc.msg.ID { 156 | t.Errorf("%s: GetTaskID(ctx) returned id == %q, want %q", tc.desc, id, tc.msg.ID) 157 | } 158 | 159 | retried, ok := GetRetryCount(ctx) 160 | if !ok { 161 | t.Errorf("%s: GetRetryCount(ctx) returned ok == false", tc.desc) 162 | } 163 | if ok && retried != tc.msg.Retried { 164 | t.Errorf("%s: GetRetryCount(ctx) returned n == %d want %d", tc.desc, retried, tc.msg.Retried) 165 | } 166 | 167 | maxRetry, ok := GetMaxRetry(ctx) 168 | if !ok { 169 | t.Errorf("%s: GetMaxRetry(ctx) returned ok == false", tc.desc) 170 | } 171 | if ok && maxRetry != tc.msg.Retry { 172 | t.Errorf("%s: GetMaxRetry(ctx) returned n == %d want %d", tc.desc, maxRetry, tc.msg.Retry) 173 | } 174 | 175 | qname, ok := GetQueueName(ctx) 176 | if !ok { 177 | t.Errorf("%s: GetQueueName(ctx) returned ok == false", tc.desc) 178 | } 179 | if ok && qname != tc.msg.Queue { 180 | t.Errorf("%s: GetQueueName(ctx) returned qname == %q, want %q", tc.desc, qname, tc.msg.Queue) 181 | } 182 | } 183 | } 184 | 185 | func TestGetTaskMetadataFromContextError(t *testing.T) { 186 | tests := []struct { 187 | desc string 188 | ctx context.Context 189 | }{ 190 | {"with background context", context.Background()}, 191 | } 192 | 193 | for _, tc := range tests { 194 | if _, ok := GetTaskID(tc.ctx); ok { 195 | t.Errorf("%s: GetTaskID(ctx) returned ok == true", tc.desc) 196 | } 197 | if _, ok := GetRetryCount(tc.ctx); ok { 198 | t.Errorf("%s: GetRetryCount(ctx) returned ok == true", tc.desc) 199 | } 200 | if _, ok := GetMaxRetry(tc.ctx); ok { 201 | t.Errorf("%s: GetMaxRetry(ctx) returned ok == true", tc.desc) 202 | } 203 | if _, ok := GetQueueName(tc.ctx); ok { 204 | t.Errorf("%s: GetQueueName(ctx) returned ok == true", tc.desc) 205 | } 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /internal/errors/errors_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package errors 6 | 7 | import "testing" 8 | 9 | func TestErrorDebugString(t *testing.T) { 10 | // DebugString should include Op since its meant to be used by 11 | // maintainers/contributors of the asynq package. 12 | tests := []struct { 13 | desc string 14 | err error 15 | want string 16 | }{ 17 | { 18 | desc: "With Op, Code, and string", 19 | err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"), 20 | want: "rdb.DeleteTask: NOT_FOUND: cannot find task with id=123", 21 | }, 22 | { 23 | desc: "With Op, Code and error", 24 | err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}), 25 | want: `rdb.DeleteTask: NOT_FOUND: cannot find task with id=123 in queue "default"`, 26 | }, 27 | } 28 | 29 | for _, tc := range tests { 30 | if got := tc.err.(*Error).DebugString(); got != tc.want { 31 | t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want) 32 | } 33 | } 34 | } 35 | 36 | func TestErrorString(t *testing.T) { 37 | // String method should omit Op since op is an internal detail 38 | // and we don't want to provide it to users of the package. 39 | tests := []struct { 40 | desc string 41 | err error 42 | want string 43 | }{ 44 | { 45 | desc: "With Op, Code, and string", 46 | err: E(Op("rdb.DeleteTask"), NotFound, "cannot find task with id=123"), 47 | want: "NOT_FOUND: cannot find task with id=123", 48 | }, 49 | { 50 | desc: "With Op, Code and error", 51 | err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}), 52 | want: `NOT_FOUND: cannot find task with id=123 in queue "default"`, 53 | }, 54 | } 55 | 56 | for _, tc := range tests { 57 | if got := tc.err.Error(); got != tc.want { 58 | t.Errorf("%s: got=%q, want=%q", tc.desc, got, tc.want) 59 | } 60 | } 61 | } 62 | 63 | func TestErrorIs(t *testing.T) { 64 | var ErrCustom = New("custom sentinel error") 65 | 66 | tests := []struct { 67 | desc string 68 | err error 69 | target error 70 | want bool 71 | }{ 72 | { 73 | desc: "should unwrap one level", 74 | err: E(Op("rdb.DeleteTask"), ErrCustom), 75 | target: ErrCustom, 76 | want: true, 77 | }, 78 | } 79 | 80 | for _, tc := range tests { 81 | if got := Is(tc.err, tc.target); got != tc.want { 82 | t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want) 83 | } 84 | } 85 | } 86 | 87 | func TestErrorAs(t *testing.T) { 88 | tests := []struct { 89 | desc string 90 | err error 91 | target interface{} 92 | want bool 93 | }{ 94 | { 95 | desc: "should unwrap one level", 96 | err: E(Op("rdb.DeleteTask"), NotFound, &QueueNotFoundError{Queue: "email"}), 97 | target: &QueueNotFoundError{}, 98 | want: true, 99 | }, 100 | } 101 | 102 | for _, tc := range tests { 103 | if got := As(tc.err, &tc.target); got != tc.want { 104 | t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want) 105 | } 106 | } 107 | } 108 | 109 | func TestErrorPredicates(t *testing.T) { 110 | tests := []struct { 111 | desc string 112 | fn func(err error) bool 113 | err error 114 | want bool 115 | }{ 116 | { 117 | desc: "IsTaskNotFound should detect presence of TaskNotFoundError in err's chain", 118 | fn: IsTaskNotFound, 119 | err: E(Op("rdb.ArchiveTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "9876"}), 120 | want: true, 121 | }, 122 | { 123 | desc: "IsTaskNotFound should detect absence of TaskNotFoundError in err's chain", 124 | fn: IsTaskNotFound, 125 | err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}), 126 | want: false, 127 | }, 128 | { 129 | desc: "IsQueueNotFound should detect presence of QueueNotFoundError in err's chain", 130 | fn: IsQueueNotFound, 131 | err: E(Op("rdb.ArchiveTask"), NotFound, &QueueNotFoundError{Queue: "default"}), 132 | want: true, 133 | }, 134 | { 135 | desc: "IsPanicError should detect presence of PanicError in err's chain", 136 | fn: IsPanicError, 137 | err: E(Op("unknown"), Unknown, &PanicError{ErrMsg: "Something went wrong"}), 138 | want: true, 139 | }, 140 | } 141 | 142 | for _, tc := range tests { 143 | if got := tc.fn(tc.err); got != tc.want { 144 | t.Errorf("%s: got=%t, want=%t", tc.desc, got, tc.want) 145 | } 146 | } 147 | } 148 | 149 | func TestCanonicalCode(t *testing.T) { 150 | tests := []struct { 151 | desc string 152 | err error 153 | want Code 154 | }{ 155 | { 156 | desc: "without nesting", 157 | err: E(Op("rdb.DeleteTask"), NotFound, &TaskNotFoundError{Queue: "default", ID: "123"}), 158 | want: NotFound, 159 | }, 160 | { 161 | desc: "with nesting", 162 | err: E(FailedPrecondition, E(NotFound)), 163 | want: FailedPrecondition, 164 | }, 165 | { 166 | desc: "returns Unspecified if err is not *Error", 167 | err: New("some other error"), 168 | want: Unspecified, 169 | }, 170 | { 171 | desc: "returns Unspecified if err is nil", 172 | err: nil, 173 | want: Unspecified, 174 | }, 175 | } 176 | 177 | for _, tc := range tests { 178 | if got := CanonicalCode(tc.err); got != tc.want { 179 | t.Errorf("%s: got=%s, want=%s", tc.desc, got, tc.want) 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /internal/log/log.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | // Package log exports logging related types and functions. 6 | package log 7 | 8 | import ( 9 | "fmt" 10 | "io" 11 | stdlog "log" 12 | "os" 13 | "sync" 14 | ) 15 | 16 | // Base supports logging at various log levels. 17 | type Base interface { 18 | // Debug logs a message at Debug level. 19 | Debug(args ...interface{}) 20 | 21 | // Info logs a message at Info level. 22 | Info(args ...interface{}) 23 | 24 | // Warn logs a message at Warning level. 25 | Warn(args ...interface{}) 26 | 27 | // Error logs a message at Error level. 28 | Error(args ...interface{}) 29 | 30 | // Fatal logs a message at Fatal level 31 | // and process will exit with status set to 1. 32 | Fatal(args ...interface{}) 33 | } 34 | 35 | // baseLogger is a wrapper object around log.Logger from the standard library. 36 | // It supports logging at various log levels. 37 | type baseLogger struct { 38 | *stdlog.Logger 39 | } 40 | 41 | // Debug logs a message at Debug level. 42 | func (l *baseLogger) Debug(args ...interface{}) { 43 | l.prefixPrint("DEBUG: ", args...) 44 | } 45 | 46 | // Info logs a message at Info level. 47 | func (l *baseLogger) Info(args ...interface{}) { 48 | l.prefixPrint("INFO: ", args...) 49 | } 50 | 51 | // Warn logs a message at Warning level. 52 | func (l *baseLogger) Warn(args ...interface{}) { 53 | l.prefixPrint("WARN: ", args...) 54 | } 55 | 56 | // Error logs a message at Error level. 57 | func (l *baseLogger) Error(args ...interface{}) { 58 | l.prefixPrint("ERROR: ", args...) 59 | } 60 | 61 | // Fatal logs a message at Fatal level 62 | // and process will exit with status set to 1. 63 | func (l *baseLogger) Fatal(args ...interface{}) { 64 | l.prefixPrint("FATAL: ", args...) 65 | os.Exit(1) 66 | } 67 | 68 | func (l *baseLogger) prefixPrint(prefix string, args ...interface{}) { 69 | args = append([]interface{}{prefix}, args...) 70 | l.Print(args...) 71 | } 72 | 73 | // newBase creates and returns a new instance of baseLogger. 74 | func newBase(out io.Writer) *baseLogger { 75 | prefix := fmt.Sprintf("asynq: pid=%d ", os.Getpid()) 76 | return &baseLogger{ 77 | stdlog.New(out, prefix, stdlog.Ldate|stdlog.Ltime|stdlog.Lmicroseconds|stdlog.LUTC), 78 | } 79 | } 80 | 81 | // NewLogger creates and returns a new instance of Logger. 82 | // Log level is set to DebugLevel by default. 83 | func NewLogger(base Base) *Logger { 84 | if base == nil { 85 | base = newBase(os.Stderr) 86 | } 87 | return &Logger{base: base, level: DebugLevel} 88 | } 89 | 90 | // Logger logs message to io.Writer at various log levels. 91 | type Logger struct { 92 | base Base 93 | 94 | mu sync.Mutex 95 | // Minimum log level for this logger. 96 | // Message with level lower than this level won't be outputted. 97 | level Level 98 | } 99 | 100 | // Level represents a log level. 101 | type Level int32 102 | 103 | const ( 104 | // DebugLevel is the lowest level of logging. 105 | // Debug logs are intended for debugging and development purposes. 106 | DebugLevel Level = iota 107 | 108 | // InfoLevel is used for general informational log messages. 109 | InfoLevel 110 | 111 | // WarnLevel is used for undesired but relatively expected events, 112 | // which may indicate a problem. 113 | WarnLevel 114 | 115 | // ErrorLevel is used for undesired and unexpected events that 116 | // the program can recover from. 117 | ErrorLevel 118 | 119 | // FatalLevel is used for undesired and unexpected events that 120 | // the program cannot recover from. 121 | FatalLevel 122 | ) 123 | 124 | // String is part of the fmt.Stringer interface. 125 | // 126 | // Used for testing and debugging purposes. 127 | func (l Level) String() string { 128 | switch l { 129 | case DebugLevel: 130 | return "debug" 131 | case InfoLevel: 132 | return "info" 133 | case WarnLevel: 134 | return "warning" 135 | case ErrorLevel: 136 | return "error" 137 | case FatalLevel: 138 | return "fatal" 139 | default: 140 | return "unknown" 141 | } 142 | } 143 | 144 | // canLogAt reports whether logger can log at level v. 145 | func (l *Logger) canLogAt(v Level) bool { 146 | l.mu.Lock() 147 | defer l.mu.Unlock() 148 | return v >= l.level 149 | } 150 | 151 | func (l *Logger) Debug(args ...interface{}) { 152 | if !l.canLogAt(DebugLevel) { 153 | return 154 | } 155 | l.base.Debug(args...) 156 | } 157 | 158 | func (l *Logger) Info(args ...interface{}) { 159 | if !l.canLogAt(InfoLevel) { 160 | return 161 | } 162 | l.base.Info(args...) 163 | } 164 | 165 | func (l *Logger) Warn(args ...interface{}) { 166 | if !l.canLogAt(WarnLevel) { 167 | return 168 | } 169 | l.base.Warn(args...) 170 | } 171 | 172 | func (l *Logger) Error(args ...interface{}) { 173 | if !l.canLogAt(ErrorLevel) { 174 | return 175 | } 176 | l.base.Error(args...) 177 | } 178 | 179 | func (l *Logger) Fatal(args ...interface{}) { 180 | if !l.canLogAt(FatalLevel) { 181 | return 182 | } 183 | l.base.Fatal(args...) 184 | } 185 | 186 | func (l *Logger) Debugf(format string, args ...interface{}) { 187 | l.Debug(fmt.Sprintf(format, args...)) 188 | } 189 | 190 | func (l *Logger) Infof(format string, args ...interface{}) { 191 | l.Info(fmt.Sprintf(format, args...)) 192 | } 193 | 194 | func (l *Logger) Warnf(format string, args ...interface{}) { 195 | l.Warn(fmt.Sprintf(format, args...)) 196 | } 197 | 198 | func (l *Logger) Errorf(format string, args ...interface{}) { 199 | l.Error(fmt.Sprintf(format, args...)) 200 | } 201 | 202 | func (l *Logger) Fatalf(format string, args ...interface{}) { 203 | l.Fatal(fmt.Sprintf(format, args...)) 204 | } 205 | 206 | // SetLevel sets the logger level. 207 | // It panics if v is less than DebugLevel or greater than FatalLevel. 208 | func (l *Logger) SetLevel(v Level) { 209 | l.mu.Lock() 210 | defer l.mu.Unlock() 211 | if v < DebugLevel || v > FatalLevel { 212 | panic("log: invalid log level") 213 | } 214 | l.level = v 215 | } 216 | -------------------------------------------------------------------------------- /internal/proto/asynq.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | syntax = "proto3"; 6 | package asynq; 7 | 8 | import "google/protobuf/timestamp.proto"; 9 | 10 | option go_package = "github.com/hibiken/asynq/internal/proto"; 11 | 12 | // TaskMessage is the internal representation of a task with additional 13 | // metadata fields. 14 | // Next ID: 15 15 | message TaskMessage { 16 | // Type indicates the kind of the task to be performed. 17 | string type = 1; 18 | 19 | // Payload holds data needed to process the task. 20 | bytes payload = 2; 21 | 22 | // Unique identifier for the task. 23 | string id = 3; 24 | 25 | // Name of the queue to which this task belongs. 26 | string queue = 4; 27 | 28 | // Max number of retries for this task. 29 | int32 retry = 5; 30 | 31 | // Number of times this task has been retried so far. 32 | int32 retried = 6; 33 | 34 | // Error message from the last failure. 35 | string error_msg = 7; 36 | 37 | // Time of last failure in Unix time, 38 | // the number of seconds elapsed since January 1, 1970 UTC. 39 | // Use zero to indicate no last failure. 40 | int64 last_failed_at = 11; 41 | 42 | // Timeout specifies timeout in seconds. 43 | // Use zero to indicate no timeout. 44 | int64 timeout = 8; 45 | 46 | // Deadline specifies the deadline for the task in Unix time, 47 | // the number of seconds elapsed since January 1, 1970 UTC. 48 | // Use zero to indicate no deadline. 49 | int64 deadline = 9; 50 | 51 | // UniqueKey holds the redis key used for uniqueness lock for this task. 52 | // Empty string indicates that no uniqueness lock was used. 53 | string unique_key = 10; 54 | 55 | // GroupKey is a name of the group used for task aggregation. 56 | // This field is optional and empty value means no aggregation for the task. 57 | string group_key = 14; 58 | 59 | // Retention period specified in a number of seconds. 60 | // The task will be stored in redis as a completed task until the TTL 61 | // expires. 62 | int64 retention = 12; 63 | 64 | // Time when the task completed in success in Unix time, 65 | // the number of seconds elapsed since January 1, 1970 UTC. 66 | // This field is populated if result_ttl > 0 upon completion. 67 | int64 completed_at = 13; 68 | }; 69 | 70 | // ServerInfo holds information about a running server. 71 | message ServerInfo { 72 | // Host machine the server is running on. 73 | string host = 1; 74 | 75 | // PID of the server process. 76 | int32 pid = 2; 77 | 78 | // Unique identifier for this server. 79 | string server_id = 3; 80 | 81 | // Maximum number of concurrency this server will use. 82 | int32 concurrency = 4; 83 | 84 | // List of queue names with their priorities. 85 | // The server will consume tasks from the queues and prioritize 86 | // queues with higher priority numbers. 87 | map queues = 5; 88 | 89 | // If set, the server will always consume tasks from a queue with higher 90 | // priority. 91 | bool strict_priority = 6; 92 | 93 | // Status indicates the status of the server. 94 | string status = 7; 95 | 96 | // Time this server was started. 97 | google.protobuf.Timestamp start_time = 8; 98 | 99 | // Number of workers currently processing tasks. 100 | int32 active_worker_count = 9; 101 | }; 102 | 103 | // WorkerInfo holds information about a running worker. 104 | message WorkerInfo { 105 | // Host matchine this worker is running on. 106 | string host = 1; 107 | 108 | // PID of the process in which this worker is running. 109 | int32 pid = 2; 110 | 111 | // ID of the server in which this worker is running. 112 | string server_id = 3; 113 | 114 | // ID of the task this worker is processing. 115 | string task_id = 4; 116 | 117 | // Type of the task this worker is processing. 118 | string task_type = 5; 119 | 120 | // Payload of the task this worker is processing. 121 | bytes task_payload = 6; 122 | 123 | // Name of the queue the task the worker is processing belongs. 124 | string queue = 7; 125 | 126 | // Time this worker started processing the task. 127 | google.protobuf.Timestamp start_time = 8; 128 | 129 | // Deadline by which the worker needs to complete processing 130 | // the task. If worker exceeds the deadline, the task will fail. 131 | google.protobuf.Timestamp deadline = 9; 132 | }; 133 | 134 | // SchedulerEntry holds information about a periodic task registered 135 | // with a scheduler. 136 | message SchedulerEntry { 137 | // Identifier of the scheduler entry. 138 | string id = 1; 139 | 140 | // Periodic schedule spec of the entry. 141 | string spec = 2; 142 | 143 | // Task type of the periodic task. 144 | string task_type = 3; 145 | 146 | // Task payload of the periodic task. 147 | bytes task_payload = 4; 148 | 149 | // Options used to enqueue the periodic task. 150 | repeated string enqueue_options = 5; 151 | 152 | // Next time the task will be enqueued. 153 | google.protobuf.Timestamp next_enqueue_time = 6; 154 | 155 | // Last time the task was enqueued. 156 | // Zero time if task was never enqueued. 157 | google.protobuf.Timestamp prev_enqueue_time = 7; 158 | }; 159 | 160 | // SchedulerEnqueueEvent holds information about an enqueue event 161 | // by a scheduler. 162 | message SchedulerEnqueueEvent { 163 | // ID of the task that was enqueued. 164 | string task_id = 1; 165 | 166 | // Time the task was enqueued. 167 | google.protobuf.Timestamp enqueue_time = 2; 168 | }; 169 | -------------------------------------------------------------------------------- /internal/testutil/builder.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package testutil 6 | 7 | import ( 8 | "time" 9 | 10 | "github.com/google/uuid" 11 | "github.com/hibiken/asynq/internal/base" 12 | ) 13 | 14 | func makeDefaultTaskMessage() *base.TaskMessage { 15 | return &base.TaskMessage{ 16 | ID: uuid.NewString(), 17 | Type: "default_task", 18 | Queue: "default", 19 | Retry: 25, 20 | Timeout: 1800, // default timeout of 30 mins 21 | Deadline: 0, // no deadline 22 | } 23 | } 24 | 25 | type TaskMessageBuilder struct { 26 | msg *base.TaskMessage 27 | } 28 | 29 | func NewTaskMessageBuilder() *TaskMessageBuilder { 30 | return &TaskMessageBuilder{} 31 | } 32 | 33 | func (b *TaskMessageBuilder) lazyInit() { 34 | if b.msg == nil { 35 | b.msg = makeDefaultTaskMessage() 36 | } 37 | } 38 | 39 | func (b *TaskMessageBuilder) Build() *base.TaskMessage { 40 | b.lazyInit() 41 | return b.msg 42 | } 43 | 44 | func (b *TaskMessageBuilder) SetType(typename string) *TaskMessageBuilder { 45 | b.lazyInit() 46 | b.msg.Type = typename 47 | return b 48 | } 49 | 50 | func (b *TaskMessageBuilder) SetPayload(payload []byte) *TaskMessageBuilder { 51 | b.lazyInit() 52 | b.msg.Payload = payload 53 | return b 54 | } 55 | 56 | func (b *TaskMessageBuilder) SetQueue(qname string) *TaskMessageBuilder { 57 | b.lazyInit() 58 | b.msg.Queue = qname 59 | return b 60 | } 61 | 62 | func (b *TaskMessageBuilder) SetRetry(n int) *TaskMessageBuilder { 63 | b.lazyInit() 64 | b.msg.Retry = n 65 | return b 66 | } 67 | 68 | func (b *TaskMessageBuilder) SetTimeout(timeout time.Duration) *TaskMessageBuilder { 69 | b.lazyInit() 70 | b.msg.Timeout = int64(timeout.Seconds()) 71 | return b 72 | } 73 | 74 | func (b *TaskMessageBuilder) SetDeadline(deadline time.Time) *TaskMessageBuilder { 75 | b.lazyInit() 76 | b.msg.Deadline = deadline.Unix() 77 | return b 78 | } 79 | 80 | func (b *TaskMessageBuilder) SetGroup(gname string) *TaskMessageBuilder { 81 | b.lazyInit() 82 | b.msg.GroupKey = gname 83 | return b 84 | } 85 | -------------------------------------------------------------------------------- /internal/testutil/builder_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package testutil 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | 11 | "github.com/google/go-cmp/cmp" 12 | "github.com/google/go-cmp/cmp/cmpopts" 13 | "github.com/hibiken/asynq/internal/base" 14 | ) 15 | 16 | func TestTaskMessageBuilder(t *testing.T) { 17 | tests := []struct { 18 | desc string 19 | ops func(b *TaskMessageBuilder) // operations to perform on the builder 20 | want *base.TaskMessage 21 | }{ 22 | { 23 | desc: "zero value and build", 24 | ops: nil, 25 | want: &base.TaskMessage{ 26 | Type: "default_task", 27 | Queue: "default", 28 | Payload: nil, 29 | Retry: 25, 30 | Timeout: 1800, // 30m 31 | Deadline: 0, 32 | }, 33 | }, 34 | { 35 | desc: "with type, payload, and queue", 36 | ops: func(b *TaskMessageBuilder) { 37 | b.SetType("foo").SetPayload([]byte("hello")).SetQueue("myqueue") 38 | }, 39 | want: &base.TaskMessage{ 40 | Type: "foo", 41 | Queue: "myqueue", 42 | Payload: []byte("hello"), 43 | Retry: 25, 44 | Timeout: 1800, // 30m 45 | Deadline: 0, 46 | }, 47 | }, 48 | { 49 | desc: "with retry, timeout, and deadline", 50 | ops: func(b *TaskMessageBuilder) { 51 | b.SetRetry(1). 52 | SetTimeout(20 * time.Second). 53 | SetDeadline(time.Date(2017, 3, 6, 0, 0, 0, 0, time.UTC)) 54 | }, 55 | want: &base.TaskMessage{ 56 | Type: "default_task", 57 | Queue: "default", 58 | Payload: nil, 59 | Retry: 1, 60 | Timeout: 20, 61 | Deadline: time.Date(2017, 3, 6, 0, 0, 0, 0, time.UTC).Unix(), 62 | }, 63 | }, 64 | { 65 | desc: "with group", 66 | ops: func(b *TaskMessageBuilder) { 67 | b.SetGroup("mygroup") 68 | }, 69 | want: &base.TaskMessage{ 70 | Type: "default_task", 71 | Queue: "default", 72 | Payload: nil, 73 | Retry: 25, 74 | Timeout: 1800, 75 | Deadline: 0, 76 | GroupKey: "mygroup", 77 | }, 78 | }, 79 | } 80 | cmpOpts := []cmp.Option{cmpopts.IgnoreFields(base.TaskMessage{}, "ID")} 81 | 82 | for _, tc := range tests { 83 | var b TaskMessageBuilder 84 | if tc.ops != nil { 85 | tc.ops(&b) 86 | } 87 | 88 | got := b.Build() 89 | if diff := cmp.Diff(tc.want, got, cmpOpts...); diff != "" { 90 | t.Errorf("%s: TaskMessageBuilder.Build() = %+v, want %+v;\n(-want,+got)\n%s", 91 | tc.desc, got, tc.want, diff) 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /internal/timeutil/timeutil.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | // Package timeutil exports functions and types related to time and date. 6 | package timeutil 7 | 8 | import ( 9 | "sync" 10 | "time" 11 | ) 12 | 13 | // A Clock is an object that can tell you the current time. 14 | // 15 | // This interface allows decoupling code that uses time from the code that creates 16 | // a point in time. You can use this to your advantage by injecting Clocks into interfaces 17 | // rather than having implementations call time.Now() directly. 18 | // 19 | // Use RealClock() in production. 20 | // Use SimulatedClock() in test. 21 | type Clock interface { 22 | Now() time.Time 23 | } 24 | 25 | func NewRealClock() Clock { return &realTimeClock{} } 26 | 27 | type realTimeClock struct{} 28 | 29 | func (_ *realTimeClock) Now() time.Time { return time.Now() } 30 | 31 | // A SimulatedClock is a concrete Clock implementation that doesn't "tick" on its own. 32 | // Time is advanced by explicit call to the AdvanceTime() or SetTime() functions. 33 | // This object is concurrency safe. 34 | type SimulatedClock struct { 35 | mu sync.Mutex 36 | t time.Time // guarded by mu 37 | } 38 | 39 | func NewSimulatedClock(t time.Time) *SimulatedClock { 40 | return &SimulatedClock{t: t} 41 | } 42 | 43 | func (c *SimulatedClock) Now() time.Time { 44 | c.mu.Lock() 45 | defer c.mu.Unlock() 46 | return c.t 47 | } 48 | 49 | func (c *SimulatedClock) SetTime(t time.Time) { 50 | c.mu.Lock() 51 | defer c.mu.Unlock() 52 | c.t = t 53 | } 54 | 55 | func (c *SimulatedClock) AdvanceTime(d time.Duration) { 56 | c.mu.Lock() 57 | defer c.mu.Unlock() 58 | c.t = c.t.Add(d) 59 | } 60 | -------------------------------------------------------------------------------- /internal/timeutil/timeutil_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package timeutil 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestSimulatedClock(t *testing.T) { 13 | now := time.Now() 14 | 15 | tests := []struct { 16 | desc string 17 | initTime time.Time 18 | advanceBy time.Duration 19 | wantTime time.Time 20 | }{ 21 | { 22 | desc: "advance time forward", 23 | initTime: now, 24 | advanceBy: 30 * time.Second, 25 | wantTime: now.Add(30 * time.Second), 26 | }, 27 | { 28 | desc: "advance time backward", 29 | initTime: now, 30 | advanceBy: -10 * time.Second, 31 | wantTime: now.Add(-10 * time.Second), 32 | }, 33 | } 34 | 35 | for _, tc := range tests { 36 | c := NewSimulatedClock(tc.initTime) 37 | 38 | if c.Now() != tc.initTime { 39 | t.Errorf("%s: Before Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.initTime) 40 | } 41 | 42 | c.AdvanceTime(tc.advanceBy) 43 | 44 | if c.Now() != tc.wantTime { 45 | t.Errorf("%s: After Advance; SimulatedClock.Now() = %v, want %v", tc.desc, c.Now(), tc.wantTime) 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /janitor.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/hibiken/asynq/internal/base" 12 | "github.com/hibiken/asynq/internal/log" 13 | ) 14 | 15 | // A janitor is responsible for deleting expired completed tasks from the specified 16 | // queues. It periodically checks for any expired tasks in the completed set, and 17 | // deletes them. 18 | type janitor struct { 19 | logger *log.Logger 20 | broker base.Broker 21 | 22 | // channel to communicate back to the long running "janitor" goroutine. 23 | done chan struct{} 24 | 25 | // list of queue names to check. 26 | queues []string 27 | 28 | // average interval between checks. 29 | avgInterval time.Duration 30 | 31 | // number of tasks to be deleted when janitor runs to delete the expired completed tasks. 32 | batchSize int 33 | } 34 | 35 | type janitorParams struct { 36 | logger *log.Logger 37 | broker base.Broker 38 | queues []string 39 | interval time.Duration 40 | batchSize int 41 | } 42 | 43 | func newJanitor(params janitorParams) *janitor { 44 | return &janitor{ 45 | logger: params.logger, 46 | broker: params.broker, 47 | done: make(chan struct{}), 48 | queues: params.queues, 49 | avgInterval: params.interval, 50 | batchSize: params.batchSize, 51 | } 52 | } 53 | 54 | func (j *janitor) shutdown() { 55 | j.logger.Debug("Janitor shutting down...") 56 | // Signal the janitor goroutine to stop. 57 | j.done <- struct{}{} 58 | } 59 | 60 | // start starts the "janitor" goroutine. 61 | func (j *janitor) start(wg *sync.WaitGroup) { 62 | wg.Add(1) 63 | timer := time.NewTimer(j.avgInterval) // randomize this interval with margin of 1s 64 | go func() { 65 | defer wg.Done() 66 | for { 67 | select { 68 | case <-j.done: 69 | j.logger.Debug("Janitor done") 70 | return 71 | case <-timer.C: 72 | j.exec() 73 | timer.Reset(j.avgInterval) 74 | } 75 | } 76 | }() 77 | } 78 | 79 | func (j *janitor) exec() { 80 | for _, qname := range j.queues { 81 | if err := j.broker.DeleteExpiredCompletedTasks(qname, j.batchSize); err != nil { 82 | j.logger.Errorf("Failed to delete expired completed tasks from queue %q: %v", 83 | qname, err) 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /janitor_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/hibiken/asynq/internal/base" 14 | "github.com/hibiken/asynq/internal/rdb" 15 | h "github.com/hibiken/asynq/internal/testutil" 16 | ) 17 | 18 | func newCompletedTask(qname, tasktype string, payload []byte, completedAt time.Time) *base.TaskMessage { 19 | msg := h.NewTaskMessageWithQueue(tasktype, payload, qname) 20 | msg.CompletedAt = completedAt.Unix() 21 | return msg 22 | } 23 | 24 | func TestJanitor(t *testing.T) { 25 | r := setup(t) 26 | defer r.Close() 27 | rdbClient := rdb.NewRDB(r) 28 | const interval = 1 * time.Second 29 | const batchSize = 100 30 | janitor := newJanitor(janitorParams{ 31 | logger: testLogger, 32 | broker: rdbClient, 33 | queues: []string{"default", "custom"}, 34 | interval: interval, 35 | batchSize: batchSize, 36 | }) 37 | 38 | now := time.Now() 39 | hourAgo := now.Add(-1 * time.Hour) 40 | minuteAgo := now.Add(-1 * time.Minute) 41 | halfHourAgo := now.Add(-30 * time.Minute) 42 | halfHourFromNow := now.Add(30 * time.Minute) 43 | fiveMinFromNow := now.Add(5 * time.Minute) 44 | msg1 := newCompletedTask("default", "task1", nil, hourAgo) 45 | msg2 := newCompletedTask("default", "task2", nil, minuteAgo) 46 | msg3 := newCompletedTask("custom", "task3", nil, hourAgo) 47 | msg4 := newCompletedTask("custom", "task4", nil, minuteAgo) 48 | 49 | tests := []struct { 50 | completed map[string][]base.Z // initial completed sets 51 | wantCompleted map[string][]base.Z // expected completed sets after janitor runs 52 | }{ 53 | { 54 | completed: map[string][]base.Z{ 55 | "default": { 56 | {Message: msg1, Score: halfHourAgo.Unix()}, 57 | {Message: msg2, Score: fiveMinFromNow.Unix()}, 58 | }, 59 | "custom": { 60 | {Message: msg3, Score: halfHourFromNow.Unix()}, 61 | {Message: msg4, Score: minuteAgo.Unix()}, 62 | }, 63 | }, 64 | wantCompleted: map[string][]base.Z{ 65 | "default": { 66 | {Message: msg2, Score: fiveMinFromNow.Unix()}, 67 | }, 68 | "custom": { 69 | {Message: msg3, Score: halfHourFromNow.Unix()}, 70 | }, 71 | }, 72 | }, 73 | } 74 | 75 | for _, tc := range tests { 76 | h.FlushDB(t, r) 77 | h.SeedAllCompletedQueues(t, r, tc.completed) 78 | 79 | var wg sync.WaitGroup 80 | janitor.start(&wg) 81 | time.Sleep(2 * interval) // make sure to let janitor run at least one time 82 | janitor.shutdown() 83 | 84 | for qname, want := range tc.wantCompleted { 85 | got := h.GetCompletedEntries(t, r, qname) 86 | if diff := cmp.Diff(want, got, h.SortZSetEntryOpt); diff != "" { 87 | t.Errorf("diff found in %q after running janitor: (-want, +got)\n%s", base.CompletedKey(qname), diff) 88 | } 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /recoverer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "sync" 10 | "time" 11 | 12 | "github.com/hibiken/asynq/internal/base" 13 | "github.com/hibiken/asynq/internal/errors" 14 | "github.com/hibiken/asynq/internal/log" 15 | ) 16 | 17 | type recoverer struct { 18 | logger *log.Logger 19 | broker base.Broker 20 | retryDelayFunc RetryDelayFunc 21 | isFailureFunc func(error) bool 22 | 23 | // channel to communicate back to the long running "recoverer" goroutine. 24 | done chan struct{} 25 | 26 | // list of queues to check for deadline. 27 | queues []string 28 | 29 | // poll interval. 30 | interval time.Duration 31 | } 32 | 33 | type recovererParams struct { 34 | logger *log.Logger 35 | broker base.Broker 36 | queues []string 37 | interval time.Duration 38 | retryDelayFunc RetryDelayFunc 39 | isFailureFunc func(error) bool 40 | } 41 | 42 | func newRecoverer(params recovererParams) *recoverer { 43 | return &recoverer{ 44 | logger: params.logger, 45 | broker: params.broker, 46 | done: make(chan struct{}), 47 | queues: params.queues, 48 | interval: params.interval, 49 | retryDelayFunc: params.retryDelayFunc, 50 | isFailureFunc: params.isFailureFunc, 51 | } 52 | } 53 | 54 | func (r *recoverer) shutdown() { 55 | r.logger.Debug("Recoverer shutting down...") 56 | // Signal the recoverer goroutine to stop polling. 57 | r.done <- struct{}{} 58 | } 59 | 60 | func (r *recoverer) start(wg *sync.WaitGroup) { 61 | wg.Add(1) 62 | go func() { 63 | defer wg.Done() 64 | r.recover() 65 | timer := time.NewTimer(r.interval) 66 | for { 67 | select { 68 | case <-r.done: 69 | r.logger.Debug("Recoverer done") 70 | timer.Stop() 71 | return 72 | case <-timer.C: 73 | r.recover() 74 | timer.Reset(r.interval) 75 | } 76 | } 77 | }() 78 | } 79 | 80 | // ErrLeaseExpired error indicates that the task failed because the worker working on the task 81 | // could not extend its lease due to missing heartbeats. The worker may have crashed or got cutoff from the network. 82 | var ErrLeaseExpired = errors.New("asynq: task lease expired") 83 | 84 | func (r *recoverer) recover() { 85 | r.recoverLeaseExpiredTasks() 86 | r.recoverStaleAggregationSets() 87 | } 88 | 89 | func (r *recoverer) recoverLeaseExpiredTasks() { 90 | // Get all tasks which have expired 30 seconds ago or earlier to accommodate certain amount of clock skew. 91 | cutoff := time.Now().Add(-30 * time.Second) 92 | msgs, err := r.broker.ListLeaseExpired(cutoff, r.queues...) 93 | if err != nil { 94 | r.logger.Warnf("recoverer: could not list lease expired tasks: %v", err) 95 | return 96 | } 97 | for _, msg := range msgs { 98 | if msg.Retried >= msg.Retry { 99 | r.archive(msg, ErrLeaseExpired) 100 | } else { 101 | r.retry(msg, ErrLeaseExpired) 102 | } 103 | } 104 | } 105 | 106 | func (r *recoverer) recoverStaleAggregationSets() { 107 | for _, qname := range r.queues { 108 | if err := r.broker.ReclaimStaleAggregationSets(qname); err != nil { 109 | r.logger.Warnf("recoverer: could not reclaim stale aggregation sets in queue %q: %v", qname, err) 110 | } 111 | } 112 | } 113 | 114 | func (r *recoverer) retry(msg *base.TaskMessage, err error) { 115 | delay := r.retryDelayFunc(msg.Retried, err, NewTask(msg.Type, msg.Payload)) 116 | retryAt := time.Now().Add(delay) 117 | if err := r.broker.Retry(context.Background(), msg, retryAt, err.Error(), r.isFailureFunc(err)); err != nil { 118 | r.logger.Warnf("recoverer: could not retry lease expired task: %v", err) 119 | } 120 | } 121 | 122 | func (r *recoverer) archive(msg *base.TaskMessage, err error) { 123 | if err := r.broker.Archive(context.Background(), msg, err.Error()); err != nil { 124 | r.logger.Warnf("recoverer: could not move task to archive: %v", err) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /scheduler_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/redis/go-redis/v9" 14 | 15 | "github.com/hibiken/asynq/internal/base" 16 | "github.com/hibiken/asynq/internal/testutil" 17 | ) 18 | 19 | func TestSchedulerRegister(t *testing.T) { 20 | tests := []struct { 21 | cronspec string 22 | task *Task 23 | opts []Option 24 | wait time.Duration 25 | queue string 26 | want []*base.TaskMessage 27 | }{ 28 | { 29 | cronspec: "@every 3s", 30 | task: NewTask("task1", nil), 31 | opts: []Option{MaxRetry(10)}, 32 | wait: 10 * time.Second, 33 | queue: "default", 34 | want: []*base.TaskMessage{ 35 | { 36 | Type: "task1", 37 | Payload: nil, 38 | Retry: 10, 39 | Timeout: int64(defaultTimeout.Seconds()), 40 | Queue: "default", 41 | }, 42 | { 43 | Type: "task1", 44 | Payload: nil, 45 | Retry: 10, 46 | Timeout: int64(defaultTimeout.Seconds()), 47 | Queue: "default", 48 | }, 49 | { 50 | Type: "task1", 51 | Payload: nil, 52 | Retry: 10, 53 | Timeout: int64(defaultTimeout.Seconds()), 54 | Queue: "default", 55 | }, 56 | }, 57 | }, 58 | } 59 | 60 | r := setup(t) 61 | 62 | // Tests for new redis connection. 63 | for _, tc := range tests { 64 | scheduler := NewScheduler(getRedisConnOpt(t), nil) 65 | if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil { 66 | t.Fatal(err) 67 | } 68 | 69 | if err := scheduler.Start(); err != nil { 70 | t.Fatal(err) 71 | } 72 | time.Sleep(tc.wait) 73 | scheduler.Shutdown() 74 | 75 | got := testutil.GetPendingMessages(t, r, tc.queue) 76 | if diff := cmp.Diff(tc.want, got, testutil.IgnoreIDOpt); diff != "" { 77 | t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff) 78 | } 79 | } 80 | 81 | r = setup(t) 82 | 83 | // Tests for existing redis connection. 84 | for _, tc := range tests { 85 | redisClient := getRedisConnOpt(t).MakeRedisClient().(redis.UniversalClient) 86 | scheduler := NewSchedulerFromRedisClient(redisClient, nil) 87 | if _, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...); err != nil { 88 | t.Fatal(err) 89 | } 90 | 91 | if err := scheduler.Start(); err != nil { 92 | t.Fatal(err) 93 | } 94 | time.Sleep(tc.wait) 95 | scheduler.Shutdown() 96 | 97 | got := testutil.GetPendingMessages(t, r, tc.queue) 98 | if diff := cmp.Diff(tc.want, got, testutil.IgnoreIDOpt); diff != "" { 99 | t.Errorf("mismatch found in queue %q: (-want,+got)\n%s", tc.queue, diff) 100 | } 101 | } 102 | } 103 | 104 | func TestSchedulerWhenRedisDown(t *testing.T) { 105 | var ( 106 | mu sync.Mutex 107 | counter int 108 | ) 109 | errorHandler := func(task *Task, opts []Option, err error) { 110 | mu.Lock() 111 | counter++ 112 | mu.Unlock() 113 | } 114 | 115 | // Connect to non-existent redis instance to simulate a redis server being down. 116 | scheduler := NewScheduler( 117 | RedisClientOpt{Addr: ":9876"}, // no Redis listening to this port. 118 | &SchedulerOpts{EnqueueErrorHandler: errorHandler}, 119 | ) 120 | 121 | task := NewTask("test", nil) 122 | 123 | if _, err := scheduler.Register("@every 3s", task); err != nil { 124 | t.Fatal(err) 125 | } 126 | 127 | if err := scheduler.Start(); err != nil { 128 | t.Fatal(err) 129 | } 130 | // Scheduler should attempt to enqueue the task three times (every 3s). 131 | time.Sleep(10 * time.Second) 132 | scheduler.Shutdown() 133 | 134 | mu.Lock() 135 | if counter != 3 { 136 | t.Errorf("EnqueueErrorHandler was called %d times, want 3", counter) 137 | } 138 | mu.Unlock() 139 | } 140 | 141 | func TestSchedulerUnregister(t *testing.T) { 142 | tests := []struct { 143 | cronspec string 144 | task *Task 145 | opts []Option 146 | wait time.Duration 147 | queue string 148 | }{ 149 | { 150 | cronspec: "@every 3s", 151 | task: NewTask("task1", nil), 152 | opts: []Option{MaxRetry(10)}, 153 | wait: 10 * time.Second, 154 | queue: "default", 155 | }, 156 | } 157 | 158 | r := setup(t) 159 | 160 | for _, tc := range tests { 161 | scheduler := NewScheduler(getRedisConnOpt(t), nil) 162 | entryID, err := scheduler.Register(tc.cronspec, tc.task, tc.opts...) 163 | if err != nil { 164 | t.Fatal(err) 165 | } 166 | if err := scheduler.Unregister(entryID); err != nil { 167 | t.Fatal(err) 168 | } 169 | 170 | if err := scheduler.Start(); err != nil { 171 | t.Fatal(err) 172 | } 173 | time.Sleep(tc.wait) 174 | scheduler.Shutdown() 175 | 176 | got := testutil.GetPendingMessages(t, r, tc.queue) 177 | if len(got) != 0 { 178 | t.Errorf("%d tasks were enqueued, want zero", len(got)) 179 | } 180 | } 181 | } 182 | 183 | func TestSchedulerPostAndPreEnqueueHandler(t *testing.T) { 184 | var ( 185 | preMu sync.Mutex 186 | preCounter int 187 | postMu sync.Mutex 188 | postCounter int 189 | ) 190 | preHandler := func(task *Task, opts []Option) { 191 | preMu.Lock() 192 | preCounter++ 193 | preMu.Unlock() 194 | } 195 | postHandler := func(info *TaskInfo, err error) { 196 | postMu.Lock() 197 | postCounter++ 198 | postMu.Unlock() 199 | } 200 | 201 | // Connect to non-existent redis instance to simulate a redis server being down. 202 | scheduler := NewScheduler( 203 | getRedisConnOpt(t), 204 | &SchedulerOpts{ 205 | PreEnqueueFunc: preHandler, 206 | PostEnqueueFunc: postHandler, 207 | }, 208 | ) 209 | 210 | task := NewTask("test", nil) 211 | 212 | if _, err := scheduler.Register("@every 3s", task); err != nil { 213 | t.Fatal(err) 214 | } 215 | 216 | if err := scheduler.Start(); err != nil { 217 | t.Fatal(err) 218 | } 219 | // Scheduler should attempt to enqueue the task three times (every 3s). 220 | time.Sleep(10 * time.Second) 221 | scheduler.Shutdown() 222 | 223 | preMu.Lock() 224 | if preCounter != 3 { 225 | t.Errorf("PreEnqueueFunc was called %d times, want 3", preCounter) 226 | } 227 | preMu.Unlock() 228 | 229 | postMu.Lock() 230 | if postCounter != 3 { 231 | t.Errorf("PostEnqueueFunc was called %d times, want 3", postCounter) 232 | } 233 | postMu.Unlock() 234 | } 235 | -------------------------------------------------------------------------------- /servemux.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "sort" 11 | "strings" 12 | "sync" 13 | ) 14 | 15 | // ServeMux is a multiplexer for asynchronous tasks. 16 | // It matches the type of each task against a list of registered patterns 17 | // and calls the handler for the pattern that most closely matches the 18 | // task's type name. 19 | // 20 | // Longer patterns take precedence over shorter ones, so that if there are 21 | // handlers registered for both "images" and "images:thumbnails", 22 | // the latter handler will be called for tasks with a type name beginning with 23 | // "images:thumbnails" and the former will receive tasks with type name beginning 24 | // with "images". 25 | type ServeMux struct { 26 | mu sync.RWMutex 27 | m map[string]muxEntry 28 | es []muxEntry // slice of entries sorted from longest to shortest. 29 | mws []MiddlewareFunc 30 | } 31 | 32 | type muxEntry struct { 33 | h Handler 34 | pattern string 35 | } 36 | 37 | // MiddlewareFunc is a function which receives an asynq.Handler and returns another asynq.Handler. 38 | // Typically, the returned handler is a closure which does something with the context and task passed 39 | // to it, and then calls the handler passed as parameter to the MiddlewareFunc. 40 | type MiddlewareFunc func(Handler) Handler 41 | 42 | // NewServeMux allocates and returns a new ServeMux. 43 | func NewServeMux() *ServeMux { 44 | return new(ServeMux) 45 | } 46 | 47 | // ProcessTask dispatches the task to the handler whose 48 | // pattern most closely matches the task type. 49 | func (mux *ServeMux) ProcessTask(ctx context.Context, task *Task) error { 50 | h, _ := mux.Handler(task) 51 | return h.ProcessTask(ctx, task) 52 | } 53 | 54 | // Handler returns the handler to use for the given task. 55 | // It always return a non-nil handler. 56 | // 57 | // Handler also returns the registered pattern that matches the task. 58 | // 59 | // If there is no registered handler that applies to the task, 60 | // handler returns a 'not found' handler which returns an error. 61 | func (mux *ServeMux) Handler(t *Task) (h Handler, pattern string) { 62 | mux.mu.RLock() 63 | defer mux.mu.RUnlock() 64 | 65 | h, pattern = mux.match(t.Type()) 66 | if h == nil { 67 | h, pattern = NotFoundHandler(), "" 68 | } 69 | for i := len(mux.mws) - 1; i >= 0; i-- { 70 | h = mux.mws[i](h) 71 | } 72 | return h, pattern 73 | } 74 | 75 | // Find a handler on a handler map given a typename string. 76 | // Most-specific (longest) pattern wins. 77 | func (mux *ServeMux) match(typename string) (h Handler, pattern string) { 78 | // Check for exact match first. 79 | v, ok := mux.m[typename] 80 | if ok { 81 | return v.h, v.pattern 82 | } 83 | 84 | // Check for longest valid match. 85 | // mux.es contains all patterns from longest to shortest. 86 | for _, e := range mux.es { 87 | if strings.HasPrefix(typename, e.pattern) { 88 | return e.h, e.pattern 89 | } 90 | } 91 | return nil, "" 92 | 93 | } 94 | 95 | // Handle registers the handler for the given pattern. 96 | // If a handler already exists for pattern, Handle panics. 97 | func (mux *ServeMux) Handle(pattern string, handler Handler) { 98 | mux.mu.Lock() 99 | defer mux.mu.Unlock() 100 | 101 | if strings.TrimSpace(pattern) == "" { 102 | panic("asynq: invalid pattern") 103 | } 104 | if handler == nil { 105 | panic("asynq: nil handler") 106 | } 107 | if _, exist := mux.m[pattern]; exist { 108 | panic("asynq: multiple registrations for " + pattern) 109 | } 110 | 111 | if mux.m == nil { 112 | mux.m = make(map[string]muxEntry) 113 | } 114 | e := muxEntry{h: handler, pattern: pattern} 115 | mux.m[pattern] = e 116 | mux.es = appendSorted(mux.es, e) 117 | } 118 | 119 | func appendSorted(es []muxEntry, e muxEntry) []muxEntry { 120 | n := len(es) 121 | i := sort.Search(n, func(i int) bool { 122 | return len(es[i].pattern) < len(e.pattern) 123 | }) 124 | if i == n { 125 | return append(es, e) 126 | } 127 | // we now know that i points at where we want to insert. 128 | es = append(es, muxEntry{}) // try to grow the slice in place, any entry works. 129 | copy(es[i+1:], es[i:]) // shift shorter entries down. 130 | es[i] = e 131 | return es 132 | } 133 | 134 | // HandleFunc registers the handler function for the given pattern. 135 | func (mux *ServeMux) HandleFunc(pattern string, handler func(context.Context, *Task) error) { 136 | if handler == nil { 137 | panic("asynq: nil handler") 138 | } 139 | mux.Handle(pattern, HandlerFunc(handler)) 140 | } 141 | 142 | // Use appends a MiddlewareFunc to the chain. 143 | // Middlewares are executed in the order that they are applied to the ServeMux. 144 | func (mux *ServeMux) Use(mws ...MiddlewareFunc) { 145 | mux.mu.Lock() 146 | defer mux.mu.Unlock() 147 | mux.mws = append(mux.mws, mws...) 148 | } 149 | 150 | // NotFound returns an error indicating that the handler was not found for the given task. 151 | func NotFound(ctx context.Context, task *Task) error { 152 | return fmt.Errorf("handler not found for task %q", task.Type()) 153 | } 154 | 155 | // NotFoundHandler returns a simple task handler that returns a ``not found`` error. 156 | func NotFoundHandler() Handler { return HandlerFunc(NotFound) } 157 | -------------------------------------------------------------------------------- /servemux_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "testing" 10 | 11 | "github.com/google/go-cmp/cmp" 12 | ) 13 | 14 | var called string // identity of the handler that was called. 15 | var invoked []string // list of middlewares in the order they were invoked. 16 | 17 | // makeFakeHandler returns a handler that updates the global called variable 18 | // to the given identity. 19 | func makeFakeHandler(identity string) Handler { 20 | return HandlerFunc(func(ctx context.Context, t *Task) error { 21 | called = identity 22 | return nil 23 | }) 24 | } 25 | 26 | // makeFakeMiddleware returns a middleware function that appends the given identity 27 | //to the global invoked slice. 28 | func makeFakeMiddleware(identity string) MiddlewareFunc { 29 | return func(next Handler) Handler { 30 | return HandlerFunc(func(ctx context.Context, t *Task) error { 31 | invoked = append(invoked, identity) 32 | return next.ProcessTask(ctx, t) 33 | }) 34 | } 35 | } 36 | 37 | // A list of pattern, handler pair that is registered with mux. 38 | var serveMuxRegister = []struct { 39 | pattern string 40 | h Handler 41 | }{ 42 | {"email:", makeFakeHandler("default email handler")}, 43 | {"email:signup", makeFakeHandler("signup email handler")}, 44 | {"csv:export", makeFakeHandler("csv export handler")}, 45 | } 46 | 47 | var serveMuxTests = []struct { 48 | typename string // task's type name 49 | want string // identifier of the handler that should be called 50 | }{ 51 | {"email:signup", "signup email handler"}, 52 | {"csv:export", "csv export handler"}, 53 | {"email:daily", "default email handler"}, 54 | } 55 | 56 | func TestServeMux(t *testing.T) { 57 | mux := NewServeMux() 58 | for _, e := range serveMuxRegister { 59 | mux.Handle(e.pattern, e.h) 60 | } 61 | 62 | for _, tc := range serveMuxTests { 63 | called = "" // reset to zero value 64 | 65 | task := NewTask(tc.typename, nil) 66 | if err := mux.ProcessTask(context.Background(), task); err != nil { 67 | t.Fatal(err) 68 | } 69 | 70 | if called != tc.want { 71 | t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want) 72 | } 73 | } 74 | } 75 | 76 | func TestServeMuxRegisterNilHandler(t *testing.T) { 77 | defer func() { 78 | if err := recover(); err == nil { 79 | t.Error("expected call to mux.HandleFunc to panic") 80 | } 81 | }() 82 | 83 | mux := NewServeMux() 84 | mux.HandleFunc("email:signup", nil) 85 | } 86 | 87 | func TestServeMuxRegisterEmptyPattern(t *testing.T) { 88 | defer func() { 89 | if err := recover(); err == nil { 90 | t.Error("expected call to mux.HandleFunc to panic") 91 | } 92 | }() 93 | 94 | mux := NewServeMux() 95 | mux.Handle("", makeFakeHandler("email")) 96 | } 97 | 98 | func TestServeMuxRegisterDuplicatePattern(t *testing.T) { 99 | defer func() { 100 | if err := recover(); err == nil { 101 | t.Error("expected call to mux.HandleFunc to panic") 102 | } 103 | }() 104 | 105 | mux := NewServeMux() 106 | mux.Handle("email", makeFakeHandler("email")) 107 | mux.Handle("email", makeFakeHandler("email:default")) 108 | } 109 | 110 | var notFoundTests = []struct { 111 | typename string // task's type name 112 | }{ 113 | {"image:minimize"}, 114 | {"csv:"}, // registered patterns match the task's type prefix, not the other way around. 115 | } 116 | 117 | func TestServeMuxNotFound(t *testing.T) { 118 | mux := NewServeMux() 119 | for _, e := range serveMuxRegister { 120 | mux.Handle(e.pattern, e.h) 121 | } 122 | 123 | for _, tc := range notFoundTests { 124 | task := NewTask(tc.typename, nil) 125 | err := mux.ProcessTask(context.Background(), task) 126 | if err == nil { 127 | t.Errorf("ProcessTask did not return error for task %q, should return 'not found' error", task.Type()) 128 | } 129 | } 130 | } 131 | 132 | var middlewareTests = []struct { 133 | typename string // task's type name 134 | middlewares []string // middlewares to use. They should be called in this order. 135 | want string // identifier of the handler that should be called 136 | }{ 137 | {"email:signup", []string{"logging", "expiration"}, "signup email handler"}, 138 | {"csv:export", []string{}, "csv export handler"}, 139 | {"email:daily", []string{"expiration", "logging"}, "default email handler"}, 140 | } 141 | 142 | func TestServeMuxMiddlewares(t *testing.T) { 143 | for _, tc := range middlewareTests { 144 | mux := NewServeMux() 145 | for _, e := range serveMuxRegister { 146 | mux.Handle(e.pattern, e.h) 147 | } 148 | var mws []MiddlewareFunc 149 | for _, s := range tc.middlewares { 150 | mws = append(mws, makeFakeMiddleware(s)) 151 | } 152 | mux.Use(mws...) 153 | 154 | invoked = []string{} // reset to empty slice 155 | called = "" // reset to zero value 156 | 157 | task := NewTask(tc.typename, nil) 158 | if err := mux.ProcessTask(context.Background(), task); err != nil { 159 | t.Fatal(err) 160 | } 161 | 162 | if diff := cmp.Diff(invoked, tc.middlewares); diff != "" { 163 | t.Errorf("invoked middlewares were %v, want %v", invoked, tc.middlewares) 164 | } 165 | 166 | if called != tc.want { 167 | t.Errorf("%q handler was called for task %q, want %q to be called", called, task.Type(), tc.want) 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /server_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "syscall" 11 | "testing" 12 | "time" 13 | 14 | "github.com/hibiken/asynq/internal/rdb" 15 | "github.com/hibiken/asynq/internal/testbroker" 16 | "github.com/hibiken/asynq/internal/testutil" 17 | 18 | "github.com/redis/go-redis/v9" 19 | "go.uber.org/goleak" 20 | ) 21 | 22 | func testServer(t *testing.T, c *Client, srv *Server) { 23 | // no-op handler 24 | h := func(ctx context.Context, task *Task) error { 25 | return nil 26 | } 27 | 28 | err := srv.Start(HandlerFunc(h)) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | 33 | _, err = c.Enqueue(NewTask("send_email", testutil.JSON(map[string]interface{}{"recipient_id": 123}))) 34 | if err != nil { 35 | t.Errorf("could not enqueue a task: %v", err) 36 | } 37 | 38 | _, err = c.Enqueue(NewTask("send_email", testutil.JSON(map[string]interface{}{"recipient_id": 456})), ProcessIn(1*time.Hour)) 39 | if err != nil { 40 | t.Errorf("could not enqueue a task: %v", err) 41 | } 42 | 43 | srv.Shutdown() 44 | } 45 | 46 | func TestServer(t *testing.T) { 47 | // https://github.com/go-redis/redis/issues/1029 48 | ignoreOpt := goleak.IgnoreTopFunction("github.com/redis/go-redis/v9/internal/pool.(*ConnPool).reaper") 49 | defer goleak.VerifyNone(t, ignoreOpt) 50 | 51 | redisConnOpt := getRedisConnOpt(t) 52 | c := NewClient(redisConnOpt) 53 | defer c.Close() 54 | srv := NewServer(redisConnOpt, Config{ 55 | Concurrency: 10, 56 | LogLevel: testLogLevel, 57 | }) 58 | 59 | testServer(t, c, srv) 60 | } 61 | 62 | func TestServerFromRedisClient(t *testing.T) { 63 | // https://github.com/go-redis/redis/issues/1029 64 | ignoreOpt := goleak.IgnoreTopFunction("github.com/redis/go-redis/v9/internal/pool.(*ConnPool).reaper") 65 | defer goleak.VerifyNone(t, ignoreOpt) 66 | 67 | redisConnOpt := getRedisConnOpt(t) 68 | redisClient := redisConnOpt.MakeRedisClient().(redis.UniversalClient) 69 | c := NewClientFromRedisClient(redisClient) 70 | srv := NewServerFromRedisClient(redisClient, Config{ 71 | Concurrency: 10, 72 | LogLevel: testLogLevel, 73 | }) 74 | 75 | testServer(t, c, srv) 76 | 77 | err := c.Close() 78 | if err == nil { 79 | t.Error("client.Close() should have failed because of a shared client but it didn't") 80 | } 81 | } 82 | 83 | func TestServerRun(t *testing.T) { 84 | // https://github.com/go-redis/redis/issues/1029 85 | ignoreOpt := goleak.IgnoreTopFunction("github.com/redis/go-redis/v9/internal/pool.(*ConnPool).reaper") 86 | defer goleak.VerifyNone(t, ignoreOpt) 87 | 88 | srv := NewServer(getRedisConnOpt(t), Config{LogLevel: testLogLevel}) 89 | 90 | done := make(chan struct{}) 91 | // Make sure server exits when receiving TERM signal. 92 | go func() { 93 | time.Sleep(2 * time.Second) 94 | _ = syscall.Kill(syscall.Getpid(), syscall.SIGTERM) 95 | done <- struct{}{} 96 | }() 97 | 98 | go func() { 99 | select { 100 | case <-time.After(10 * time.Second): 101 | panic("server did not stop after receiving TERM signal") 102 | case <-done: 103 | } 104 | }() 105 | 106 | mux := NewServeMux() 107 | if err := srv.Run(mux); err != nil { 108 | t.Fatal(err) 109 | } 110 | } 111 | 112 | func TestServerErrServerClosed(t *testing.T) { 113 | srv := NewServer(getRedisConnOpt(t), Config{LogLevel: testLogLevel}) 114 | handler := NewServeMux() 115 | if err := srv.Start(handler); err != nil { 116 | t.Fatal(err) 117 | } 118 | srv.Shutdown() 119 | err := srv.Start(handler) 120 | if err != ErrServerClosed { 121 | t.Errorf("Restarting server: (*Server).Start(handler) = %v, want ErrServerClosed error", err) 122 | } 123 | } 124 | 125 | func TestServerErrNilHandler(t *testing.T) { 126 | srv := NewServer(getRedisConnOpt(t), Config{LogLevel: testLogLevel}) 127 | err := srv.Start(nil) 128 | if err == nil { 129 | t.Error("Starting server with nil handler: (*Server).Start(nil) did not return error") 130 | srv.Shutdown() 131 | } 132 | } 133 | 134 | func TestServerErrServerRunning(t *testing.T) { 135 | srv := NewServer(getRedisConnOpt(t), Config{LogLevel: testLogLevel}) 136 | handler := NewServeMux() 137 | if err := srv.Start(handler); err != nil { 138 | t.Fatal(err) 139 | } 140 | err := srv.Start(handler) 141 | if err == nil { 142 | t.Error("Calling (*Server).Start(handler) on already running server did not return error") 143 | } 144 | srv.Shutdown() 145 | } 146 | 147 | func TestServerWithRedisDown(t *testing.T) { 148 | // Make sure that server does not panic and exit if redis is down. 149 | defer func() { 150 | if r := recover(); r != nil { 151 | t.Errorf("panic occurred: %v", r) 152 | } 153 | }() 154 | r := rdb.NewRDB(setup(t)) 155 | testBroker := testbroker.NewTestBroker(r) 156 | srv := NewServer(getRedisConnOpt(t), Config{LogLevel: testLogLevel}) 157 | srv.broker = testBroker 158 | srv.forwarder.broker = testBroker 159 | srv.heartbeater.broker = testBroker 160 | srv.processor.broker = testBroker 161 | srv.subscriber.broker = testBroker 162 | testBroker.Sleep() 163 | 164 | // no-op handler 165 | h := func(ctx context.Context, task *Task) error { 166 | return nil 167 | } 168 | 169 | err := srv.Start(HandlerFunc(h)) 170 | if err != nil { 171 | t.Fatal(err) 172 | } 173 | 174 | time.Sleep(3 * time.Second) 175 | 176 | srv.Shutdown() 177 | } 178 | 179 | func TestServerWithFlakyBroker(t *testing.T) { 180 | // Make sure that server does not panic and exit if redis is down. 181 | defer func() { 182 | if r := recover(); r != nil { 183 | t.Errorf("panic occurred: %v", r) 184 | } 185 | }() 186 | r := rdb.NewRDB(setup(t)) 187 | testBroker := testbroker.NewTestBroker(r) 188 | redisConnOpt := getRedisConnOpt(t) 189 | srv := NewServer(redisConnOpt, Config{LogLevel: testLogLevel}) 190 | srv.broker = testBroker 191 | srv.forwarder.broker = testBroker 192 | srv.heartbeater.broker = testBroker 193 | srv.processor.broker = testBroker 194 | srv.subscriber.broker = testBroker 195 | 196 | c := NewClient(redisConnOpt) 197 | 198 | h := func(ctx context.Context, task *Task) error { 199 | // force task retry. 200 | if task.Type() == "bad_task" { 201 | return fmt.Errorf("could not process %q", task.Type()) 202 | } 203 | time.Sleep(2 * time.Second) 204 | return nil 205 | } 206 | 207 | err := srv.Start(HandlerFunc(h)) 208 | if err != nil { 209 | t.Fatal(err) 210 | } 211 | 212 | for i := 0; i < 10; i++ { 213 | _, err := c.Enqueue(NewTask("enqueued", nil), MaxRetry(i)) 214 | if err != nil { 215 | t.Fatal(err) 216 | } 217 | _, err = c.Enqueue(NewTask("bad_task", nil)) 218 | if err != nil { 219 | t.Fatal(err) 220 | } 221 | _, err = c.Enqueue(NewTask("scheduled", nil), ProcessIn(time.Duration(i)*time.Second)) 222 | if err != nil { 223 | t.Fatal(err) 224 | } 225 | } 226 | 227 | // simulate redis going down. 228 | testBroker.Sleep() 229 | 230 | time.Sleep(3 * time.Second) 231 | 232 | // simulate redis comes back online. 233 | testBroker.Wakeup() 234 | 235 | time.Sleep(3 * time.Second) 236 | 237 | srv.Shutdown() 238 | } 239 | 240 | func TestLogLevel(t *testing.T) { 241 | tests := []struct { 242 | flagVal string 243 | want LogLevel 244 | wantStr string 245 | }{ 246 | {"debug", DebugLevel, "debug"}, 247 | {"Info", InfoLevel, "info"}, 248 | {"WARN", WarnLevel, "warn"}, 249 | {"warning", WarnLevel, "warn"}, 250 | {"Error", ErrorLevel, "error"}, 251 | {"fatal", FatalLevel, "fatal"}, 252 | } 253 | 254 | for _, tc := range tests { 255 | level := new(LogLevel) 256 | if err := level.Set(tc.flagVal); err != nil { 257 | t.Fatal(err) 258 | } 259 | if *level != tc.want { 260 | t.Errorf("Set(%q): got %v, want %v", tc.flagVal, level, &tc.want) 261 | continue 262 | } 263 | if got := level.String(); got != tc.wantStr { 264 | t.Errorf("String() returned %q, want %q", got, tc.wantStr) 265 | } 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /signals_unix.go: -------------------------------------------------------------------------------- 1 | //go:build linux || dragonfly || freebsd || netbsd || openbsd || darwin 2 | 3 | package asynq 4 | 5 | import ( 6 | "os" 7 | "os/signal" 8 | 9 | "golang.org/x/sys/unix" 10 | ) 11 | 12 | // waitForSignals waits for signals and handles them. 13 | // It handles SIGTERM, SIGINT, and SIGTSTP. 14 | // SIGTERM and SIGINT will signal the process to exit. 15 | // SIGTSTP will signal the process to stop processing new tasks. 16 | func (srv *Server) waitForSignals() { 17 | srv.logger.Info("Send signal TSTP to stop processing new tasks") 18 | srv.logger.Info("Send signal TERM or INT to terminate the process") 19 | 20 | sigs := make(chan os.Signal, 1) 21 | signal.Notify(sigs, unix.SIGTERM, unix.SIGINT, unix.SIGTSTP) 22 | for { 23 | sig := <-sigs 24 | if sig == unix.SIGTSTP { 25 | srv.Stop() 26 | continue 27 | } else { 28 | srv.Stop() 29 | break 30 | } 31 | } 32 | } 33 | 34 | func (s *Scheduler) waitForSignals() { 35 | s.logger.Info("Send signal TERM or INT to stop the scheduler") 36 | sigs := make(chan os.Signal, 1) 37 | signal.Notify(sigs, unix.SIGTERM, unix.SIGINT) 38 | <-sigs 39 | } 40 | -------------------------------------------------------------------------------- /signals_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | 3 | package asynq 4 | 5 | import ( 6 | "os" 7 | "os/signal" 8 | 9 | "golang.org/x/sys/windows" 10 | ) 11 | 12 | // waitForSignals waits for signals and handles them. 13 | // It handles SIGTERM and SIGINT. 14 | // SIGTERM and SIGINT will signal the process to exit. 15 | // 16 | // Note: Currently SIGTSTP is not supported for windows build. 17 | func (srv *Server) waitForSignals() { 18 | srv.logger.Info("Send signal TERM or INT to terminate the process") 19 | sigs := make(chan os.Signal, 1) 20 | signal.Notify(sigs, windows.SIGTERM, windows.SIGINT) 21 | <-sigs 22 | } 23 | 24 | func (s *Scheduler) waitForSignals() { 25 | s.logger.Info("Send signal TERM or INT to stop the scheduler") 26 | sigs := make(chan os.Signal, 1) 27 | signal.Notify(sigs, windows.SIGTERM, windows.SIGINT) 28 | <-sigs 29 | } 30 | -------------------------------------------------------------------------------- /subscriber.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/redis/go-redis/v9" 12 | "github.com/hibiken/asynq/internal/base" 13 | "github.com/hibiken/asynq/internal/log" 14 | ) 15 | 16 | type subscriber struct { 17 | logger *log.Logger 18 | broker base.Broker 19 | 20 | // channel to communicate back to the long running "subscriber" goroutine. 21 | done chan struct{} 22 | 23 | // cancelations hold cancel functions for all active tasks. 24 | cancelations *base.Cancelations 25 | 26 | // time to wait before retrying to connect to redis. 27 | retryTimeout time.Duration 28 | } 29 | 30 | type subscriberParams struct { 31 | logger *log.Logger 32 | broker base.Broker 33 | cancelations *base.Cancelations 34 | } 35 | 36 | func newSubscriber(params subscriberParams) *subscriber { 37 | return &subscriber{ 38 | logger: params.logger, 39 | broker: params.broker, 40 | done: make(chan struct{}), 41 | cancelations: params.cancelations, 42 | retryTimeout: 5 * time.Second, 43 | } 44 | } 45 | 46 | func (s *subscriber) shutdown() { 47 | s.logger.Debug("Subscriber shutting down...") 48 | // Signal the subscriber goroutine to stop. 49 | s.done <- struct{}{} 50 | } 51 | 52 | func (s *subscriber) start(wg *sync.WaitGroup) { 53 | wg.Add(1) 54 | go func() { 55 | defer wg.Done() 56 | var ( 57 | pubsub *redis.PubSub 58 | err error 59 | ) 60 | // Try until successfully connect to Redis. 61 | for { 62 | pubsub, err = s.broker.CancelationPubSub() 63 | if err != nil { 64 | s.logger.Errorf("cannot subscribe to cancelation channel: %v", err) 65 | select { 66 | case <-time.After(s.retryTimeout): 67 | continue 68 | case <-s.done: 69 | s.logger.Debug("Subscriber done") 70 | return 71 | } 72 | } 73 | break 74 | } 75 | cancelCh := pubsub.Channel() 76 | for { 77 | select { 78 | case <-s.done: 79 | pubsub.Close() 80 | s.logger.Debug("Subscriber done") 81 | return 82 | case msg := <-cancelCh: 83 | cancel, ok := s.cancelations.Get(msg.Payload) 84 | if ok { 85 | cancel() 86 | } 87 | } 88 | } 89 | }() 90 | } 91 | -------------------------------------------------------------------------------- /subscriber_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "testing" 10 | "time" 11 | 12 | "github.com/hibiken/asynq/internal/base" 13 | "github.com/hibiken/asynq/internal/rdb" 14 | "github.com/hibiken/asynq/internal/testbroker" 15 | ) 16 | 17 | func TestSubscriber(t *testing.T) { 18 | r := setup(t) 19 | defer r.Close() 20 | rdbClient := rdb.NewRDB(r) 21 | 22 | tests := []struct { 23 | registeredID string // ID for which cancel func is registered 24 | publishID string // ID to be published 25 | wantCalled bool // whether cancel func should be called 26 | }{ 27 | {"abc123", "abc123", true}, 28 | {"abc456", "abc123", false}, 29 | } 30 | 31 | for _, tc := range tests { 32 | var mu sync.Mutex 33 | called := false 34 | fakeCancelFunc := func() { 35 | mu.Lock() 36 | defer mu.Unlock() 37 | called = true 38 | } 39 | cancelations := base.NewCancelations() 40 | cancelations.Add(tc.registeredID, fakeCancelFunc) 41 | 42 | subscriber := newSubscriber(subscriberParams{ 43 | logger: testLogger, 44 | broker: rdbClient, 45 | cancelations: cancelations, 46 | }) 47 | var wg sync.WaitGroup 48 | subscriber.start(&wg) 49 | defer subscriber.shutdown() 50 | 51 | // wait for subscriber to establish connection to pubsub channel 52 | time.Sleep(time.Second) 53 | 54 | if err := rdbClient.PublishCancelation(tc.publishID); err != nil { 55 | t.Fatalf("could not publish cancelation message: %v", err) 56 | } 57 | 58 | // wait for redis to publish message 59 | time.Sleep(time.Second) 60 | 61 | mu.Lock() 62 | if called != tc.wantCalled { 63 | if tc.wantCalled { 64 | t.Errorf("fakeCancelFunc was not called, want the function to be called") 65 | } else { 66 | t.Errorf("fakeCancelFunc was called, want the function to not be called") 67 | } 68 | } 69 | mu.Unlock() 70 | } 71 | } 72 | 73 | func TestSubscriberWithRedisDown(t *testing.T) { 74 | defer func() { 75 | if r := recover(); r != nil { 76 | t.Errorf("panic occurred: %v", r) 77 | } 78 | }() 79 | r := rdb.NewRDB(setup(t)) 80 | defer r.Close() 81 | testBroker := testbroker.NewTestBroker(r) 82 | 83 | cancelations := base.NewCancelations() 84 | subscriber := newSubscriber(subscriberParams{ 85 | logger: testLogger, 86 | broker: testBroker, 87 | cancelations: cancelations, 88 | }) 89 | subscriber.retryTimeout = 1 * time.Second // set shorter retry timeout for testing purpose. 90 | 91 | testBroker.Sleep() // simulate a situation where subscriber cannot connect to redis. 92 | var wg sync.WaitGroup 93 | subscriber.start(&wg) 94 | defer subscriber.shutdown() 95 | 96 | time.Sleep(2 * time.Second) // subscriber should wait and retry connecting to redis. 97 | 98 | testBroker.Wakeup() // simulate a situation where redis server is back online. 99 | 100 | time.Sleep(2 * time.Second) // allow subscriber to establish pubsub channel. 101 | 102 | const id = "test" 103 | var ( 104 | mu sync.Mutex 105 | called bool 106 | ) 107 | cancelations.Add(id, func() { 108 | mu.Lock() 109 | defer mu.Unlock() 110 | called = true 111 | }) 112 | 113 | if err := r.PublishCancelation(id); err != nil { 114 | t.Fatalf("could not publish cancelation message: %v", err) 115 | } 116 | 117 | time.Sleep(time.Second) // wait for redis to publish message. 118 | 119 | mu.Lock() 120 | if !called { 121 | t.Errorf("cancel function was not called") 122 | } 123 | mu.Unlock() 124 | } 125 | -------------------------------------------------------------------------------- /syncer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "sync" 9 | "time" 10 | 11 | "github.com/hibiken/asynq/internal/log" 12 | ) 13 | 14 | // syncer is responsible for queuing up failed requests to redis and retry 15 | // those requests to sync state between the background process and redis. 16 | type syncer struct { 17 | logger *log.Logger 18 | 19 | requestsCh <-chan *syncRequest 20 | 21 | // channel to communicate back to the long running "syncer" goroutine. 22 | done chan struct{} 23 | 24 | // interval between sync operations. 25 | interval time.Duration 26 | } 27 | 28 | type syncRequest struct { 29 | fn func() error // sync operation 30 | errMsg string // error message 31 | deadline time.Time // request should be dropped if deadline has been exceeded 32 | } 33 | 34 | type syncerParams struct { 35 | logger *log.Logger 36 | requestsCh <-chan *syncRequest 37 | interval time.Duration 38 | } 39 | 40 | func newSyncer(params syncerParams) *syncer { 41 | return &syncer{ 42 | logger: params.logger, 43 | requestsCh: params.requestsCh, 44 | done: make(chan struct{}), 45 | interval: params.interval, 46 | } 47 | } 48 | 49 | func (s *syncer) shutdown() { 50 | s.logger.Debug("Syncer shutting down...") 51 | // Signal the syncer goroutine to stop. 52 | s.done <- struct{}{} 53 | } 54 | 55 | func (s *syncer) start(wg *sync.WaitGroup) { 56 | wg.Add(1) 57 | go func() { 58 | defer wg.Done() 59 | var requests []*syncRequest 60 | for { 61 | select { 62 | case <-s.done: 63 | // Try sync one last time before shutting down. 64 | for _, req := range requests { 65 | if err := req.fn(); err != nil { 66 | s.logger.Error(req.errMsg) 67 | } 68 | } 69 | s.logger.Debug("Syncer done") 70 | return 71 | case req := <-s.requestsCh: 72 | requests = append(requests, req) 73 | case <-time.After(s.interval): 74 | var temp []*syncRequest 75 | for _, req := range requests { 76 | if req.deadline.Before(time.Now()) { 77 | continue // drop stale request 78 | } 79 | if err := req.fn(); err != nil { 80 | temp = append(temp, req) 81 | } 82 | } 83 | requests = temp 84 | } 85 | } 86 | }() 87 | } 88 | -------------------------------------------------------------------------------- /syncer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package asynq 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "sync" 11 | "testing" 12 | "time" 13 | 14 | "github.com/hibiken/asynq/internal/base" 15 | "github.com/hibiken/asynq/internal/rdb" 16 | h "github.com/hibiken/asynq/internal/testutil" 17 | ) 18 | 19 | func TestSyncer(t *testing.T) { 20 | inProgress := []*base.TaskMessage{ 21 | h.NewTaskMessage("send_email", nil), 22 | h.NewTaskMessage("reindex", nil), 23 | h.NewTaskMessage("gen_thumbnail", nil), 24 | } 25 | r := setup(t) 26 | defer r.Close() 27 | rdbClient := rdb.NewRDB(r) 28 | h.SeedActiveQueue(t, r, inProgress, base.DefaultQueueName) 29 | 30 | const interval = time.Second 31 | syncRequestCh := make(chan *syncRequest) 32 | syncer := newSyncer(syncerParams{ 33 | logger: testLogger, 34 | requestsCh: syncRequestCh, 35 | interval: interval, 36 | }) 37 | var wg sync.WaitGroup 38 | syncer.start(&wg) 39 | defer syncer.shutdown() 40 | 41 | for _, msg := range inProgress { 42 | m := msg 43 | syncRequestCh <- &syncRequest{ 44 | fn: func() error { 45 | return rdbClient.Done(context.Background(), m) 46 | }, 47 | deadline: time.Now().Add(5 * time.Minute), 48 | } 49 | } 50 | 51 | time.Sleep(2 * interval) // ensure that syncer runs at least once 52 | 53 | gotActive := h.GetActiveMessages(t, r, base.DefaultQueueName) 54 | if l := len(gotActive); l != 0 { 55 | t.Errorf("%q has length %d; want 0", base.ActiveKey(base.DefaultQueueName), l) 56 | } 57 | } 58 | 59 | func TestSyncerRetry(t *testing.T) { 60 | const interval = time.Second 61 | syncRequestCh := make(chan *syncRequest) 62 | syncer := newSyncer(syncerParams{ 63 | logger: testLogger, 64 | requestsCh: syncRequestCh, 65 | interval: interval, 66 | }) 67 | 68 | var wg sync.WaitGroup 69 | syncer.start(&wg) 70 | defer syncer.shutdown() 71 | 72 | var ( 73 | mu sync.Mutex 74 | counter int 75 | ) 76 | 77 | // Increment the counter for each call. 78 | // Initial call will fail and second call will succeed. 79 | requestFunc := func() error { 80 | mu.Lock() 81 | defer mu.Unlock() 82 | if counter == 0 { 83 | counter++ 84 | return fmt.Errorf("zero") 85 | } 86 | counter++ 87 | return nil 88 | } 89 | 90 | syncRequestCh <- &syncRequest{ 91 | fn: requestFunc, 92 | errMsg: "error", 93 | deadline: time.Now().Add(5 * time.Minute), 94 | } 95 | 96 | // allow syncer to retry 97 | time.Sleep(3 * interval) 98 | 99 | mu.Lock() 100 | if counter != 2 { 101 | t.Errorf("counter = %d, want 2", counter) 102 | } 103 | mu.Unlock() 104 | } 105 | 106 | func TestSyncerDropsStaleRequests(t *testing.T) { 107 | const interval = time.Second 108 | syncRequestCh := make(chan *syncRequest) 109 | syncer := newSyncer(syncerParams{ 110 | logger: testLogger, 111 | requestsCh: syncRequestCh, 112 | interval: interval, 113 | }) 114 | var wg sync.WaitGroup 115 | syncer.start(&wg) 116 | 117 | var ( 118 | mu sync.Mutex 119 | n int // number of times request has been processed 120 | ) 121 | 122 | for i := 0; i < 10; i++ { 123 | syncRequestCh <- &syncRequest{ 124 | fn: func() error { 125 | mu.Lock() 126 | n++ 127 | mu.Unlock() 128 | return nil 129 | }, 130 | deadline: time.Now().Add(time.Duration(-i) * time.Second), // already exceeded deadline 131 | } 132 | } 133 | 134 | time.Sleep(2 * interval) // ensure that syncer runs at least once 135 | syncer.shutdown() 136 | 137 | mu.Lock() 138 | if n != 0 { 139 | t.Errorf("requests has been processed %d times, want 0", n) 140 | } 141 | mu.Unlock() 142 | } 143 | -------------------------------------------------------------------------------- /tools/asynq/README.md: -------------------------------------------------------------------------------- 1 | # Asynq CLI 2 | 3 | Asynq CLI is a command line tool to monitor the queues and tasks managed by `asynq` package. 4 | 5 | ## Table of Contents 6 | 7 | - [Installation](#installation) 8 | - [Usage](#usage) 9 | - [Config File](#config-file) 10 | 11 | ## Installation 12 | 13 | In order to use the tool, compile it using the following command: 14 | 15 | go install github.com/hibiken/asynq/tools/asynq@latest 16 | 17 | This will create the asynq executable under your `$GOPATH/bin` directory. 18 | 19 | ## Usage 20 | 21 | ### Commands 22 | 23 | To view details on any command, use `asynq help `. 24 | 25 | - `asynq dash` 26 | - `asynq stats` 27 | - `asynq queue [ls inspect history rm pause unpause]` 28 | - `asynq task [ls cancel delete archive run deleteall archiveall runall]` 29 | - `asynq server [ls]` 30 | 31 | ### Global flags 32 | 33 | Asynq CLI needs to connect to a redis-server to inspect the state of queues and tasks. Use flags to specify the options to connect to the redis-server used by your application. 34 | To connect to a redis cluster, pass `--cluster` and `--cluster_addrs` flags. 35 | 36 | By default, CLI will try to connect to a redis server running at `localhost:6379`. 37 | 38 | ``` 39 | --config string config file to set flag defaut values (default is $HOME/.asynq.yaml) 40 | -n, --db int redis database number (default is 0) 41 | -h, --help help for asynq 42 | -p, --password string password to use when connecting to redis server 43 | -u, --uri string redis server URI (default "127.0.0.1:6379") 44 | 45 | --cluster connect to redis cluster 46 | --cluster_addrs string list of comma-separated redis server addresses 47 | ``` 48 | 49 | ## Config File 50 | 51 | You can use a config file to set default values for the flags. 52 | 53 | By default, `asynq` will try to read config file located in 54 | `$HOME/.asynq.(yml|json)`. You can specify the file location via `--config` flag. 55 | 56 | Config file example: 57 | 58 | ```yaml 59 | uri: 127.0.0.1:6379 60 | db: 2 61 | password: mypassword 62 | ``` 63 | 64 | This will set the default values for `--uri`, `--db`, and `--password` flags. 65 | -------------------------------------------------------------------------------- /tools/asynq/cmd/cron.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package cmd 6 | 7 | import ( 8 | "fmt" 9 | "io" 10 | "os" 11 | "sort" 12 | "time" 13 | 14 | "github.com/MakeNowJust/heredoc/v2" 15 | "github.com/hibiken/asynq" 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | func init() { 20 | rootCmd.AddCommand(cronCmd) 21 | cronCmd.AddCommand(cronListCmd) 22 | cronCmd.AddCommand(cronHistoryCmd) 23 | cronHistoryCmd.Flags().Int("page", 1, "page number") 24 | cronHistoryCmd.Flags().Int("size", 30, "page size") 25 | } 26 | 27 | var cronCmd = &cobra.Command{ 28 | Use: "cron [flags]", 29 | Short: "Manage cron", 30 | Example: heredoc.Doc(` 31 | $ asynq cron ls 32 | $ asynq cron history 7837f142-6337-4217-9276-8f27281b67d1`), 33 | } 34 | 35 | var cronListCmd = &cobra.Command{ 36 | Use: "list", 37 | Aliases: []string{"ls"}, 38 | Short: "List cron entries", 39 | Run: cronList, 40 | } 41 | 42 | var cronHistoryCmd = &cobra.Command{ 43 | Use: "history [...]", 44 | Short: "Show history of each cron tasks", 45 | Args: cobra.MinimumNArgs(1), 46 | Run: cronHistory, 47 | Example: heredoc.Doc(` 48 | $ asynq cron history 7837f142-6337-4217-9276-8f27281b67d1 49 | $ asynq cron history 7837f142-6337-4217-9276-8f27281b67d1 bf6a8594-cd03-4968-b36a-8572c5e160dd 50 | $ asynq cron history 7837f142-6337-4217-9276-8f27281b67d1 --size=100 51 | $ asynq cron history 7837f142-6337-4217-9276-8f27281b67d1 --page=2`), 52 | } 53 | 54 | func cronList(cmd *cobra.Command, args []string) { 55 | inspector := createInspector() 56 | 57 | entries, err := inspector.SchedulerEntries() 58 | if err != nil { 59 | fmt.Println(err) 60 | os.Exit(1) 61 | } 62 | if len(entries) == 0 { 63 | fmt.Println("No scheduler entries") 64 | return 65 | } 66 | 67 | // Sort entries by spec. 68 | sort.Slice(entries, func(i, j int) bool { 69 | x, y := entries[i], entries[j] 70 | return x.Spec < y.Spec 71 | }) 72 | 73 | cols := []string{"EntryID", "Spec", "Type", "Payload", "Options", "Next", "Prev"} 74 | printRows := func(w io.Writer, tmpl string) { 75 | for _, e := range entries { 76 | fmt.Fprintf(w, tmpl, e.ID, e.Spec, e.Task.Type(), sprintBytes(e.Task.Payload()), e.Opts, 77 | nextEnqueue(e.Next), prevEnqueue(e.Prev)) 78 | } 79 | } 80 | printTable(cols, printRows) 81 | } 82 | 83 | // Returns a string describing when the next enqueue will happen. 84 | func nextEnqueue(nextEnqueueAt time.Time) string { 85 | d := nextEnqueueAt.Sub(time.Now()).Round(time.Second) 86 | if d < 0 { 87 | return "Now" 88 | } 89 | return fmt.Sprintf("In %v", d) 90 | } 91 | 92 | // Returns a string describing when the previous enqueue was. 93 | func prevEnqueue(prevEnqueuedAt time.Time) string { 94 | if prevEnqueuedAt.IsZero() { 95 | return "N/A" 96 | } 97 | return fmt.Sprintf("%v ago", time.Since(prevEnqueuedAt).Round(time.Second)) 98 | } 99 | 100 | func cronHistory(cmd *cobra.Command, args []string) { 101 | pageNum, err := cmd.Flags().GetInt("page") 102 | if err != nil { 103 | fmt.Println(err) 104 | os.Exit(1) 105 | } 106 | pageSize, err := cmd.Flags().GetInt("size") 107 | if err != nil { 108 | fmt.Println(err) 109 | os.Exit(1) 110 | } 111 | inspector := createInspector() 112 | for i, entryID := range args { 113 | if i > 0 { 114 | fmt.Printf("\n%s\n", separator) 115 | } 116 | fmt.Println() 117 | 118 | fmt.Printf("Entry: %s\n\n", entryID) 119 | 120 | events, err := inspector.ListSchedulerEnqueueEvents( 121 | entryID, asynq.PageSize(pageSize), asynq.Page(pageNum)) 122 | if err != nil { 123 | fmt.Printf("error: %v\n", err) 124 | continue 125 | } 126 | if len(events) == 0 { 127 | fmt.Printf("No scheduler enqueue events found for entry: %s\n", entryID) 128 | continue 129 | } 130 | 131 | cols := []string{"TaskID", "EnqueuedAt"} 132 | printRows := func(w io.Writer, tmpl string) { 133 | for _, e := range events { 134 | fmt.Fprintf(w, tmpl, e.TaskID, e.EnqueuedAt) 135 | } 136 | } 137 | printTable(cols, printRows) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package cmd 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | "time" 11 | 12 | "github.com/MakeNowJust/heredoc/v2" 13 | "github.com/hibiken/asynq/tools/asynq/cmd/dash" 14 | "github.com/spf13/cobra" 15 | ) 16 | 17 | var ( 18 | flagPollInterval = 8 * time.Second 19 | ) 20 | 21 | func init() { 22 | rootCmd.AddCommand(dashCmd) 23 | dashCmd.Flags().DurationVar(&flagPollInterval, "refresh", 8*time.Second, "Interval between data refresh (default: 8s, min allowed: 1s)") 24 | } 25 | 26 | var dashCmd = &cobra.Command{ 27 | Use: "dash", 28 | Short: "View dashboard", 29 | Long: heredoc.Doc(` 30 | Display interactive dashboard.`), 31 | Args: cobra.NoArgs, 32 | Example: heredoc.Doc(` 33 | $ asynq dash 34 | $ asynq dash --refresh=3s`), 35 | Run: func(cmd *cobra.Command, args []string) { 36 | if flagPollInterval < 1*time.Second { 37 | fmt.Println("error: --refresh cannot be less than 1s") 38 | os.Exit(1) 39 | } 40 | dash.Run(dash.Options{ 41 | PollInterval: flagPollInterval, 42 | RedisConnOpt: getRedisConnOpt(), 43 | }) 44 | }, 45 | } 46 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/dash.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import ( 8 | "errors" 9 | "fmt" 10 | "os" 11 | "strings" 12 | "time" 13 | 14 | "github.com/gdamore/tcell/v2" 15 | "github.com/hibiken/asynq" 16 | ) 17 | 18 | // viewType is an enum for dashboard views. 19 | type viewType int 20 | 21 | const ( 22 | viewTypeQueues viewType = iota 23 | viewTypeQueueDetails 24 | viewTypeHelp 25 | ) 26 | 27 | // State holds dashboard state. 28 | type State struct { 29 | queues []*asynq.QueueInfo 30 | tasks []*asynq.TaskInfo 31 | groups []*asynq.GroupInfo 32 | err error 33 | 34 | // Note: index zero corresponds to the table header; index=1 correctponds to the first element 35 | queueTableRowIdx int // highlighted row in queue table 36 | taskTableRowIdx int // highlighted row in task table 37 | groupTableRowIdx int // highlighted row in group table 38 | taskState asynq.TaskState // highlighted task state in queue details view 39 | taskID string // selected task ID 40 | 41 | selectedQueue *asynq.QueueInfo // queue shown on queue details view 42 | selectedGroup *asynq.GroupInfo 43 | selectedTask *asynq.TaskInfo 44 | 45 | pageNum int // pagination page number 46 | 47 | view viewType // current view type 48 | prevView viewType // to support "go back" 49 | } 50 | 51 | func (s *State) DebugString() string { 52 | var b strings.Builder 53 | b.WriteString(fmt.Sprintf("len(queues)=%d ", len(s.queues))) 54 | b.WriteString(fmt.Sprintf("len(tasks)=%d ", len(s.tasks))) 55 | b.WriteString(fmt.Sprintf("len(groups)=%d ", len(s.groups))) 56 | b.WriteString(fmt.Sprintf("err=%v ", s.err)) 57 | 58 | if s.taskState != 0 { 59 | b.WriteString(fmt.Sprintf("taskState=%s ", s.taskState.String())) 60 | } else { 61 | b.WriteString(fmt.Sprintf("taskState=0")) 62 | } 63 | b.WriteString(fmt.Sprintf("taskID=%s ", s.taskID)) 64 | 65 | b.WriteString(fmt.Sprintf("queueTableRowIdx=%d ", s.queueTableRowIdx)) 66 | b.WriteString(fmt.Sprintf("taskTableRowIdx=%d ", s.taskTableRowIdx)) 67 | b.WriteString(fmt.Sprintf("groupTableRowIdx=%d ", s.groupTableRowIdx)) 68 | 69 | if s.selectedQueue != nil { 70 | b.WriteString(fmt.Sprintf("selectedQueue={Queue:%s} ", s.selectedQueue.Queue)) 71 | } else { 72 | b.WriteString("selectedQueue=nil ") 73 | } 74 | 75 | if s.selectedGroup != nil { 76 | b.WriteString(fmt.Sprintf("selectedGroup={Group:%s} ", s.selectedGroup.Group)) 77 | } else { 78 | b.WriteString("selectedGroup=nil ") 79 | } 80 | 81 | if s.selectedTask != nil { 82 | b.WriteString(fmt.Sprintf("selectedTask={ID:%s} ", s.selectedTask.ID)) 83 | } else { 84 | b.WriteString("selectedTask=nil ") 85 | } 86 | 87 | b.WriteString(fmt.Sprintf("pageNum=%d", s.pageNum)) 88 | return b.String() 89 | } 90 | 91 | type Options struct { 92 | DebugMode bool 93 | PollInterval time.Duration 94 | RedisConnOpt asynq.RedisConnOpt 95 | } 96 | 97 | func Run(opts Options) { 98 | s, err := tcell.NewScreen() 99 | if err != nil { 100 | fmt.Printf("failed to create a screen: %v\n", err) 101 | os.Exit(1) 102 | } 103 | if err := s.Init(); err != nil { 104 | fmt.Printf("failed to initialize screen: %v\n", err) 105 | os.Exit(1) 106 | } 107 | s.SetStyle(baseStyle) // set default text style 108 | 109 | var ( 110 | state = State{} // confined in this goroutine only; DO NOT SHARE 111 | 112 | inspector = asynq.NewInspector(opts.RedisConnOpt) 113 | ticker = time.NewTicker(opts.PollInterval) 114 | 115 | eventCh = make(chan tcell.Event) 116 | done = make(chan struct{}) 117 | 118 | // channels to send/receive data fetched asynchronously 119 | errorCh = make(chan error) 120 | queueCh = make(chan *asynq.QueueInfo) 121 | taskCh = make(chan *asynq.TaskInfo) 122 | queuesCh = make(chan []*asynq.QueueInfo) 123 | groupsCh = make(chan []*asynq.GroupInfo) 124 | tasksCh = make(chan []*asynq.TaskInfo) 125 | ) 126 | defer ticker.Stop() 127 | 128 | f := dataFetcher{ 129 | inspector, 130 | opts, 131 | s, 132 | errorCh, 133 | queueCh, 134 | taskCh, 135 | queuesCh, 136 | groupsCh, 137 | tasksCh, 138 | } 139 | 140 | d := dashDrawer{ 141 | s, 142 | opts, 143 | } 144 | 145 | h := keyEventHandler{ 146 | s: s, 147 | fetcher: &f, 148 | drawer: &d, 149 | state: &state, 150 | done: done, 151 | ticker: ticker, 152 | pollInterval: opts.PollInterval, 153 | } 154 | 155 | go fetchQueues(inspector, queuesCh, errorCh, opts) 156 | go s.ChannelEvents(eventCh, done) // TODO: Double check that we are not leaking goroutine with this one. 157 | d.Draw(&state) // draw initial screen 158 | 159 | for { 160 | // Update screen 161 | s.Show() 162 | 163 | select { 164 | case ev := <-eventCh: 165 | // Process event 166 | switch ev := ev.(type) { 167 | case *tcell.EventResize: 168 | s.Sync() 169 | case *tcell.EventKey: 170 | h.HandleKeyEvent(ev) 171 | } 172 | 173 | case <-ticker.C: 174 | f.Fetch(&state) 175 | 176 | case queues := <-queuesCh: 177 | state.queues = queues 178 | state.err = nil 179 | if len(queues) < state.queueTableRowIdx { 180 | state.queueTableRowIdx = len(queues) 181 | } 182 | d.Draw(&state) 183 | 184 | case q := <-queueCh: 185 | state.selectedQueue = q 186 | state.err = nil 187 | d.Draw(&state) 188 | 189 | case groups := <-groupsCh: 190 | state.groups = groups 191 | state.err = nil 192 | if len(groups) < state.groupTableRowIdx { 193 | state.groupTableRowIdx = len(groups) 194 | } 195 | d.Draw(&state) 196 | 197 | case tasks := <-tasksCh: 198 | state.tasks = tasks 199 | state.err = nil 200 | if len(tasks) < state.taskTableRowIdx { 201 | state.taskTableRowIdx = len(tasks) 202 | } 203 | d.Draw(&state) 204 | 205 | case t := <-taskCh: 206 | state.selectedTask = t 207 | state.err = nil 208 | d.Draw(&state) 209 | 210 | case err := <-errorCh: 211 | if errors.Is(err, asynq.ErrTaskNotFound) { 212 | state.selectedTask = nil 213 | } else { 214 | state.err = err 215 | } 216 | d.Draw(&state) 217 | } 218 | } 219 | 220 | } 221 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/draw_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import "testing" 8 | 9 | func TestTruncate(t *testing.T) { 10 | tests := []struct { 11 | s string 12 | max int 13 | want string 14 | }{ 15 | { 16 | s: "hello world!", 17 | max: 15, 18 | want: "hello world!", 19 | }, 20 | { 21 | s: "hello world!", 22 | max: 6, 23 | want: "hello…", 24 | }, 25 | } 26 | 27 | for _, tc := range tests { 28 | got := truncate(tc.s, tc.max) 29 | if tc.want != got { 30 | t.Errorf("truncate(%q, %d) = %q, want %q", tc.s, tc.max, got, tc.want) 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/fetch.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import ( 8 | "sort" 9 | 10 | "github.com/gdamore/tcell/v2" 11 | "github.com/hibiken/asynq" 12 | ) 13 | 14 | type fetcher interface { 15 | // Fetch retries data required by the given state of the dashboard. 16 | Fetch(state *State) 17 | } 18 | 19 | type dataFetcher struct { 20 | inspector *asynq.Inspector 21 | opts Options 22 | s tcell.Screen 23 | 24 | errorCh chan<- error 25 | queueCh chan<- *asynq.QueueInfo 26 | taskCh chan<- *asynq.TaskInfo 27 | queuesCh chan<- []*asynq.QueueInfo 28 | groupsCh chan<- []*asynq.GroupInfo 29 | tasksCh chan<- []*asynq.TaskInfo 30 | } 31 | 32 | func (f *dataFetcher) Fetch(state *State) { 33 | switch state.view { 34 | case viewTypeQueues: 35 | f.fetchQueues() 36 | case viewTypeQueueDetails: 37 | if shouldShowGroupTable(state) { 38 | f.fetchGroups(state.selectedQueue.Queue) 39 | } else if state.taskState == asynq.TaskStateAggregating { 40 | f.fetchAggregatingTasks(state.selectedQueue.Queue, state.selectedGroup.Group, taskPageSize(f.s), state.pageNum) 41 | } else { 42 | f.fetchTasks(state.selectedQueue.Queue, state.taskState, taskPageSize(f.s), state.pageNum) 43 | } 44 | // if the task modal is open, additionally fetch the selected task's info 45 | if state.taskID != "" { 46 | f.fetchTaskInfo(state.selectedQueue.Queue, state.taskID) 47 | } 48 | } 49 | } 50 | 51 | func (f *dataFetcher) fetchQueues() { 52 | var ( 53 | inspector = f.inspector 54 | queuesCh = f.queuesCh 55 | errorCh = f.errorCh 56 | opts = f.opts 57 | ) 58 | go fetchQueues(inspector, queuesCh, errorCh, opts) 59 | } 60 | 61 | func fetchQueues(i *asynq.Inspector, queuesCh chan<- []*asynq.QueueInfo, errorCh chan<- error, opts Options) { 62 | queues, err := i.Queues() 63 | if err != nil { 64 | errorCh <- err 65 | return 66 | } 67 | sort.Strings(queues) 68 | var res []*asynq.QueueInfo 69 | for _, q := range queues { 70 | info, err := i.GetQueueInfo(q) 71 | if err != nil { 72 | errorCh <- err 73 | return 74 | } 75 | res = append(res, info) 76 | } 77 | queuesCh <- res 78 | } 79 | 80 | func fetchQueueInfo(i *asynq.Inspector, qname string, queueCh chan<- *asynq.QueueInfo, errorCh chan<- error) { 81 | q, err := i.GetQueueInfo(qname) 82 | if err != nil { 83 | errorCh <- err 84 | return 85 | } 86 | queueCh <- q 87 | } 88 | 89 | func (f *dataFetcher) fetchGroups(qname string) { 90 | var ( 91 | i = f.inspector 92 | groupsCh = f.groupsCh 93 | errorCh = f.errorCh 94 | queueCh = f.queueCh 95 | ) 96 | go fetchGroups(i, qname, groupsCh, errorCh) 97 | go fetchQueueInfo(i, qname, queueCh, errorCh) 98 | } 99 | 100 | func fetchGroups(i *asynq.Inspector, qname string, groupsCh chan<- []*asynq.GroupInfo, errorCh chan<- error) { 101 | groups, err := i.Groups(qname) 102 | if err != nil { 103 | errorCh <- err 104 | return 105 | } 106 | groupsCh <- groups 107 | } 108 | 109 | func (f *dataFetcher) fetchAggregatingTasks(qname, group string, pageSize, pageNum int) { 110 | var ( 111 | i = f.inspector 112 | tasksCh = f.tasksCh 113 | errorCh = f.errorCh 114 | queueCh = f.queueCh 115 | ) 116 | go fetchAggregatingTasks(i, qname, group, pageSize, pageNum, tasksCh, errorCh) 117 | go fetchQueueInfo(i, qname, queueCh, errorCh) 118 | } 119 | 120 | func fetchAggregatingTasks(i *asynq.Inspector, qname, group string, pageSize, pageNum int, 121 | tasksCh chan<- []*asynq.TaskInfo, errorCh chan<- error) { 122 | tasks, err := i.ListAggregatingTasks(qname, group, asynq.PageSize(pageSize), asynq.Page(pageNum)) 123 | if err != nil { 124 | errorCh <- err 125 | return 126 | } 127 | tasksCh <- tasks 128 | } 129 | 130 | func (f *dataFetcher) fetchTasks(qname string, taskState asynq.TaskState, pageSize, pageNum int) { 131 | var ( 132 | i = f.inspector 133 | tasksCh = f.tasksCh 134 | errorCh = f.errorCh 135 | queueCh = f.queueCh 136 | ) 137 | go fetchTasks(i, qname, taskState, pageSize, pageNum, tasksCh, errorCh) 138 | go fetchQueueInfo(i, qname, queueCh, errorCh) 139 | } 140 | 141 | func fetchTasks(i *asynq.Inspector, qname string, taskState asynq.TaskState, pageSize, pageNum int, 142 | tasksCh chan<- []*asynq.TaskInfo, errorCh chan<- error) { 143 | var ( 144 | tasks []*asynq.TaskInfo 145 | err error 146 | ) 147 | opts := []asynq.ListOption{asynq.PageSize(pageSize), asynq.Page(pageNum)} 148 | switch taskState { 149 | case asynq.TaskStateActive: 150 | tasks, err = i.ListActiveTasks(qname, opts...) 151 | case asynq.TaskStatePending: 152 | tasks, err = i.ListPendingTasks(qname, opts...) 153 | case asynq.TaskStateScheduled: 154 | tasks, err = i.ListScheduledTasks(qname, opts...) 155 | case asynq.TaskStateRetry: 156 | tasks, err = i.ListRetryTasks(qname, opts...) 157 | case asynq.TaskStateArchived: 158 | tasks, err = i.ListArchivedTasks(qname, opts...) 159 | case asynq.TaskStateCompleted: 160 | tasks, err = i.ListCompletedTasks(qname, opts...) 161 | } 162 | if err != nil { 163 | errorCh <- err 164 | return 165 | } 166 | tasksCh <- tasks 167 | } 168 | 169 | func (f *dataFetcher) fetchTaskInfo(qname, taskID string) { 170 | var ( 171 | i = f.inspector 172 | taskCh = f.taskCh 173 | errorCh = f.errorCh 174 | ) 175 | go fetchTaskInfo(i, qname, taskID, taskCh, errorCh) 176 | } 177 | 178 | func fetchTaskInfo(i *asynq.Inspector, qname, taskID string, taskCh chan<- *asynq.TaskInfo, errorCh chan<- error) { 179 | info, err := i.GetTaskInfo(qname, taskID) 180 | if err != nil { 181 | errorCh <- err 182 | return 183 | } 184 | taskCh <- info 185 | } 186 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/key_event_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | 11 | "github.com/gdamore/tcell/v2" 12 | "github.com/google/go-cmp/cmp" 13 | "github.com/hibiken/asynq" 14 | ) 15 | 16 | func makeKeyEventHandler(t *testing.T, state *State) *keyEventHandler { 17 | ticker := time.NewTicker(time.Second) 18 | t.Cleanup(func() { ticker.Stop() }) 19 | return &keyEventHandler{ 20 | s: tcell.NewSimulationScreen("UTF-8"), 21 | state: state, 22 | done: make(chan struct{}), 23 | fetcher: &fakeFetcher{}, 24 | drawer: &fakeDrawer{}, 25 | ticker: ticker, 26 | pollInterval: time.Second, 27 | } 28 | } 29 | 30 | type keyEventHandlerTest struct { 31 | desc string // test description 32 | state *State // initial state, to be mutated by the handler 33 | events []*tcell.EventKey // keyboard events 34 | wantState State // expected state after the events 35 | } 36 | 37 | func TestKeyEventHandler(t *testing.T) { 38 | tests := []*keyEventHandlerTest{ 39 | { 40 | desc: "navigates to help view", 41 | state: &State{view: viewTypeQueues}, 42 | events: []*tcell.EventKey{tcell.NewEventKey(tcell.KeyRune, '?', tcell.ModNone)}, 43 | wantState: State{view: viewTypeHelp}, 44 | }, 45 | { 46 | desc: "navigates to queue details view", 47 | state: &State{ 48 | view: viewTypeQueues, 49 | queues: []*asynq.QueueInfo{ 50 | {Queue: "default", Size: 100, Active: 10, Pending: 40, Scheduled: 40, Completed: 10}, 51 | }, 52 | queueTableRowIdx: 0, 53 | }, 54 | events: []*tcell.EventKey{ 55 | tcell.NewEventKey(tcell.KeyRune, 'j', tcell.ModNone), // down 56 | tcell.NewEventKey(tcell.KeyEnter, '\n', tcell.ModNone), // Enter 57 | }, 58 | wantState: State{ 59 | view: viewTypeQueueDetails, 60 | queues: []*asynq.QueueInfo{ 61 | {Queue: "default", Size: 100, Active: 10, Pending: 40, Scheduled: 40, Completed: 10}, 62 | }, 63 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 100, Active: 10, Pending: 40, Scheduled: 40, Completed: 10}, 64 | queueTableRowIdx: 1, 65 | taskState: asynq.TaskStateActive, 66 | pageNum: 1, 67 | }, 68 | }, 69 | { 70 | desc: "does nothing if no queues are present", 71 | state: &State{ 72 | view: viewTypeQueues, 73 | queues: []*asynq.QueueInfo{}, // empty 74 | queueTableRowIdx: 0, 75 | }, 76 | events: []*tcell.EventKey{ 77 | tcell.NewEventKey(tcell.KeyRune, 'j', tcell.ModNone), // down 78 | tcell.NewEventKey(tcell.KeyEnter, '\n', tcell.ModNone), // Enter 79 | }, 80 | wantState: State{ 81 | view: viewTypeQueues, 82 | queues: []*asynq.QueueInfo{}, 83 | queueTableRowIdx: 0, 84 | }, 85 | }, 86 | { 87 | desc: "opens task info modal", 88 | state: &State{ 89 | view: viewTypeQueueDetails, 90 | queues: []*asynq.QueueInfo{ 91 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 92 | }, 93 | queueTableRowIdx: 1, 94 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 95 | taskState: asynq.TaskStatePending, 96 | pageNum: 1, 97 | tasks: []*asynq.TaskInfo{ 98 | {ID: "xxxx", Type: "foo"}, 99 | {ID: "yyyy", Type: "bar"}, 100 | {ID: "zzzz", Type: "baz"}, 101 | }, 102 | taskTableRowIdx: 2, 103 | }, 104 | events: []*tcell.EventKey{ 105 | tcell.NewEventKey(tcell.KeyEnter, '\n', tcell.ModNone), // Enter 106 | }, 107 | wantState: State{ 108 | view: viewTypeQueueDetails, 109 | queues: []*asynq.QueueInfo{ 110 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 111 | }, 112 | queueTableRowIdx: 1, 113 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 114 | taskState: asynq.TaskStatePending, 115 | pageNum: 1, 116 | tasks: []*asynq.TaskInfo{ 117 | {ID: "xxxx", Type: "foo"}, 118 | {ID: "yyyy", Type: "bar"}, 119 | {ID: "zzzz", Type: "baz"}, 120 | }, 121 | taskTableRowIdx: 2, 122 | // new states 123 | taskID: "yyyy", 124 | selectedTask: &asynq.TaskInfo{ID: "yyyy", Type: "bar"}, 125 | }, 126 | }, 127 | { 128 | desc: "Esc closes task info modal", 129 | state: &State{ 130 | view: viewTypeQueueDetails, 131 | queues: []*asynq.QueueInfo{ 132 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 133 | }, 134 | queueTableRowIdx: 1, 135 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 136 | taskState: asynq.TaskStatePending, 137 | pageNum: 1, 138 | tasks: []*asynq.TaskInfo{ 139 | {ID: "xxxx", Type: "foo"}, 140 | {ID: "yyyy", Type: "bar"}, 141 | {ID: "zzzz", Type: "baz"}, 142 | }, 143 | taskTableRowIdx: 2, 144 | taskID: "yyyy", // presence of this field opens the modal 145 | }, 146 | events: []*tcell.EventKey{ 147 | tcell.NewEventKey(tcell.KeyEscape, ' ', tcell.ModNone), // Esc 148 | }, 149 | wantState: State{ 150 | view: viewTypeQueueDetails, 151 | queues: []*asynq.QueueInfo{ 152 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 153 | }, 154 | queueTableRowIdx: 1, 155 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 156 | taskState: asynq.TaskStatePending, 157 | pageNum: 1, 158 | tasks: []*asynq.TaskInfo{ 159 | {ID: "xxxx", Type: "foo"}, 160 | {ID: "yyyy", Type: "bar"}, 161 | {ID: "zzzz", Type: "baz"}, 162 | }, 163 | taskTableRowIdx: 2, 164 | taskID: "", // this field should be unset 165 | }, 166 | }, 167 | { 168 | desc: "Arrow keys are disabled while task info modal is open", 169 | state: &State{ 170 | view: viewTypeQueueDetails, 171 | queues: []*asynq.QueueInfo{ 172 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 173 | }, 174 | queueTableRowIdx: 1, 175 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 176 | taskState: asynq.TaskStatePending, 177 | pageNum: 1, 178 | tasks: []*asynq.TaskInfo{ 179 | {ID: "xxxx", Type: "foo"}, 180 | {ID: "yyyy", Type: "bar"}, 181 | {ID: "zzzz", Type: "baz"}, 182 | }, 183 | taskTableRowIdx: 2, 184 | taskID: "yyyy", // presence of this field opens the modal 185 | }, 186 | events: []*tcell.EventKey{ 187 | tcell.NewEventKey(tcell.KeyLeft, ' ', tcell.ModNone), 188 | }, 189 | 190 | // no change 191 | wantState: State{ 192 | view: viewTypeQueueDetails, 193 | queues: []*asynq.QueueInfo{ 194 | {Queue: "default", Size: 500, Active: 10, Pending: 40}, 195 | }, 196 | queueTableRowIdx: 1, 197 | selectedQueue: &asynq.QueueInfo{Queue: "default", Size: 50, Active: 10, Pending: 40}, 198 | taskState: asynq.TaskStatePending, 199 | pageNum: 1, 200 | tasks: []*asynq.TaskInfo{ 201 | {ID: "xxxx", Type: "foo"}, 202 | {ID: "yyyy", Type: "bar"}, 203 | {ID: "zzzz", Type: "baz"}, 204 | }, 205 | taskTableRowIdx: 2, 206 | taskID: "yyyy", // presence of this field opens the modal 207 | }, 208 | }, 209 | // TODO: Add more tests 210 | } 211 | 212 | for _, tc := range tests { 213 | t.Run(tc.desc, func(t *testing.T) { 214 | h := makeKeyEventHandler(t, tc.state) 215 | for _, e := range tc.events { 216 | h.HandleKeyEvent(e) 217 | } 218 | if diff := cmp.Diff(tc.wantState, *tc.state, cmp.AllowUnexported(State{})); diff != "" { 219 | t.Errorf("after state was %+v, want %+v: (-want,+got)\n%s", *tc.state, tc.wantState, diff) 220 | } 221 | }) 222 | } 223 | 224 | } 225 | 226 | /*** fake implementation for tests ***/ 227 | 228 | type fakeFetcher struct{} 229 | 230 | func (f *fakeFetcher) Fetch(s *State) {} 231 | 232 | type fakeDrawer struct{} 233 | 234 | func (d *fakeDrawer) Draw(s *State) {} 235 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/screen_drawer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import ( 8 | "strings" 9 | 10 | "github.com/gdamore/tcell/v2" 11 | "github.com/mattn/go-runewidth" 12 | ) 13 | 14 | /*** Screen Drawer ***/ 15 | 16 | // ScreenDrawer is used to draw contents on screen. 17 | // 18 | // Usage example: 19 | // d := NewScreenDrawer(s) 20 | // d.Println("Hello world", mystyle) 21 | // d.NL() // adds newline 22 | // d.Print("foo", mystyle.Bold(true)) 23 | // d.Print("bar", mystyle.Italic(true)) 24 | type ScreenDrawer struct { 25 | l *LineDrawer 26 | } 27 | 28 | func NewScreenDrawer(s tcell.Screen) *ScreenDrawer { 29 | return &ScreenDrawer{l: NewLineDrawer(0, s)} 30 | } 31 | 32 | func (d *ScreenDrawer) Print(s string, style tcell.Style) { 33 | d.l.Draw(s, style) 34 | } 35 | 36 | func (d *ScreenDrawer) Println(s string, style tcell.Style) { 37 | d.Print(s, style) 38 | d.NL() 39 | } 40 | 41 | // FillLine prints the given rune until the end of the current line 42 | // and adds a newline. 43 | func (d *ScreenDrawer) FillLine(r rune, style tcell.Style) { 44 | w, _ := d.Screen().Size() 45 | if w-d.l.col < 0 { 46 | d.NL() 47 | return 48 | } 49 | s := strings.Repeat(string(r), w-d.l.col) 50 | d.Print(s, style) 51 | d.NL() 52 | } 53 | 54 | func (d *ScreenDrawer) FillUntil(r rune, style tcell.Style, limit int) { 55 | if d.l.col > limit { 56 | return // already passed the limit 57 | } 58 | s := strings.Repeat(string(r), limit-d.l.col) 59 | d.Print(s, style) 60 | } 61 | 62 | // NL adds a newline (i.e., moves to the next line). 63 | func (d *ScreenDrawer) NL() { 64 | d.l.row++ 65 | d.l.col = 0 66 | } 67 | 68 | func (d *ScreenDrawer) Screen() tcell.Screen { 69 | return d.l.s 70 | } 71 | 72 | // Goto moves the screendrawer to the specified cell. 73 | func (d *ScreenDrawer) Goto(x, y int) { 74 | d.l.row = y 75 | d.l.col = x 76 | } 77 | 78 | // Go to the bottom of the screen. 79 | func (d *ScreenDrawer) GoToBottom() { 80 | _, h := d.Screen().Size() 81 | d.l.row = h - 1 82 | d.l.col = 0 83 | } 84 | 85 | type LineDrawer struct { 86 | s tcell.Screen 87 | row int 88 | col int 89 | } 90 | 91 | func NewLineDrawer(row int, s tcell.Screen) *LineDrawer { 92 | return &LineDrawer{row: row, col: 0, s: s} 93 | } 94 | 95 | func (d *LineDrawer) Draw(s string, style tcell.Style) { 96 | for _, r := range s { 97 | d.s.SetContent(d.col, d.row, r, nil, style) 98 | d.col += runewidth.RuneWidth(r) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /tools/asynq/cmd/dash/table.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package dash 6 | 7 | import ( 8 | "github.com/gdamore/tcell/v2" 9 | "github.com/mattn/go-runewidth" 10 | ) 11 | 12 | type columnAlignment int 13 | 14 | const ( 15 | alignRight columnAlignment = iota 16 | alignLeft 17 | ) 18 | 19 | type columnConfig[V any] struct { 20 | name string 21 | alignment columnAlignment 22 | displayFn func(v V) string 23 | } 24 | 25 | type column[V any] struct { 26 | *columnConfig[V] 27 | width int 28 | } 29 | 30 | // Helper to draw a table. 31 | func drawTable[V any](d *ScreenDrawer, style tcell.Style, configs []*columnConfig[V], data []V, highlightRowIdx int) { 32 | const colBuffer = " " // extra buffer between columns 33 | cols := make([]*column[V], len(configs)) 34 | for i, cfg := range configs { 35 | cols[i] = &column[V]{cfg, runewidth.StringWidth(cfg.name)} 36 | } 37 | // adjust the column width to accommodate the widest value. 38 | for _, v := range data { 39 | for _, col := range cols { 40 | if w := runewidth.StringWidth(col.displayFn(v)); col.width < w { 41 | col.width = w 42 | } 43 | } 44 | } 45 | // print header 46 | headerStyle := style.Background(tcell.ColorDimGray).Foreground(tcell.ColorWhite) 47 | for _, col := range cols { 48 | if col.alignment == alignLeft { 49 | d.Print(rpad(col.name, col.width)+colBuffer, headerStyle) 50 | } else { 51 | d.Print(lpad(col.name, col.width)+colBuffer, headerStyle) 52 | } 53 | } 54 | d.FillLine(' ', headerStyle) 55 | // print body 56 | for i, v := range data { 57 | rowStyle := style 58 | if highlightRowIdx == i { 59 | rowStyle = style.Background(tcell.ColorDarkOliveGreen) 60 | } 61 | for _, col := range cols { 62 | if col.alignment == alignLeft { 63 | d.Print(rpad(col.displayFn(v), col.width)+colBuffer, rowStyle) 64 | } else { 65 | d.Print(lpad(col.displayFn(v), col.width)+colBuffer, rowStyle) 66 | } 67 | } 68 | d.FillLine(' ', rowStyle) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /tools/asynq/cmd/group.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package cmd 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | 11 | "github.com/MakeNowJust/heredoc/v2" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | func init() { 16 | rootCmd.AddCommand(groupCmd) 17 | groupCmd.AddCommand(groupListCmd) 18 | groupListCmd.Flags().StringP("queue", "q", "", "queue to inspect") 19 | groupListCmd.MarkFlagRequired("queue") 20 | } 21 | 22 | var groupCmd = &cobra.Command{ 23 | Use: "group [flags]", 24 | Short: "Manage groups", 25 | Example: heredoc.Doc(` 26 | $ asynq group list --queue=myqueue`), 27 | } 28 | 29 | var groupListCmd = &cobra.Command{ 30 | Use: "list", 31 | Aliases: []string{"ls"}, 32 | Short: "List groups", 33 | Args: cobra.NoArgs, 34 | Run: groupLists, 35 | } 36 | 37 | func groupLists(cmd *cobra.Command, args []string) { 38 | qname, err := cmd.Flags().GetString("queue") 39 | if err != nil { 40 | fmt.Println(err) 41 | os.Exit(1) 42 | } 43 | inspector := createInspector() 44 | groups, err := inspector.Groups(qname) 45 | if len(groups) == 0 { 46 | fmt.Printf("No groups found in queue %q\n", qname) 47 | return 48 | } 49 | for _, g := range groups { 50 | fmt.Println(g.Group) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tools/asynq/cmd/server.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package cmd 6 | 7 | import ( 8 | "fmt" 9 | "io" 10 | "os" 11 | "sort" 12 | "strings" 13 | "time" 14 | 15 | "github.com/MakeNowJust/heredoc/v2" 16 | "github.com/spf13/cobra" 17 | ) 18 | 19 | func init() { 20 | rootCmd.AddCommand(serverCmd) 21 | serverCmd.AddCommand(serverListCmd) 22 | } 23 | 24 | var serverCmd = &cobra.Command{ 25 | Use: "server [flags]", 26 | Short: "Manage servers", 27 | Example: heredoc.Doc(` 28 | $ asynq server list`), 29 | } 30 | 31 | var serverListCmd = &cobra.Command{ 32 | Use: "list", 33 | Aliases: []string{"ls"}, 34 | Short: "List servers", 35 | Long: `Server list (asynq server ls) shows all running worker servers 36 | pulling tasks from the given redis instance. 37 | 38 | The command shows the following for each server: 39 | * Host and PID of the process in which the server is running 40 | * Number of active workers out of worker pool 41 | * Queue configuration 42 | * State of the worker server ("active" | "stopped") 43 | * Time the server was started 44 | 45 | A "active" server is pulling tasks from queues and processing them. 46 | A "stopped" server is no longer pulling new tasks from queues`, 47 | Run: serverList, 48 | } 49 | 50 | func serverList(cmd *cobra.Command, args []string) { 51 | r := createRDB() 52 | 53 | servers, err := r.ListServers() 54 | if err != nil { 55 | fmt.Println(err) 56 | os.Exit(1) 57 | } 58 | if len(servers) == 0 { 59 | fmt.Println("No running servers") 60 | return 61 | } 62 | 63 | // sort by hostname and pid 64 | sort.Slice(servers, func(i, j int) bool { 65 | x, y := servers[i], servers[j] 66 | if x.Host != y.Host { 67 | return x.Host < y.Host 68 | } 69 | return x.PID < y.PID 70 | }) 71 | 72 | // print server info 73 | cols := []string{"Host", "PID", "State", "Active Workers", "Queues", "Started"} 74 | printRows := func(w io.Writer, tmpl string) { 75 | for _, info := range servers { 76 | fmt.Fprintf(w, tmpl, 77 | info.Host, info.PID, info.Status, 78 | fmt.Sprintf("%d/%d", info.ActiveWorkerCount, info.Concurrency), 79 | formatQueues(info.Queues), timeAgo(info.Started)) 80 | } 81 | } 82 | printTable(cols, printRows) 83 | } 84 | 85 | func formatQueues(qmap map[string]int) string { 86 | // sort queues by priority and name 87 | type queue struct { 88 | name string 89 | priority int 90 | } 91 | var queues []*queue 92 | for qname, p := range qmap { 93 | queues = append(queues, &queue{qname, p}) 94 | } 95 | sort.Slice(queues, func(i, j int) bool { 96 | x, y := queues[i], queues[j] 97 | if x.priority != y.priority { 98 | return x.priority > y.priority 99 | } 100 | return x.name < y.name 101 | }) 102 | 103 | var b strings.Builder 104 | l := len(queues) 105 | for _, q := range queues { 106 | fmt.Fprintf(&b, "%s:%d", q.name, q.priority) 107 | l-- 108 | if l > 0 { 109 | b.WriteString(" ") 110 | } 111 | } 112 | return b.String() 113 | } 114 | 115 | // timeAgo takes a time and returns a string of the format " ago". 116 | func timeAgo(since time.Time) string { 117 | d := time.Since(since).Round(time.Second) 118 | return fmt.Sprintf("%v ago", d) 119 | } 120 | -------------------------------------------------------------------------------- /tools/asynq/cmd/stats.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package cmd 6 | 7 | import ( 8 | "encoding/json" 9 | "fmt" 10 | "io" 11 | "math" 12 | "os" 13 | "strconv" 14 | "strings" 15 | "text/tabwriter" 16 | "time" 17 | "unicode/utf8" 18 | 19 | "github.com/MakeNowJust/heredoc/v2" 20 | "github.com/fatih/color" 21 | "github.com/hibiken/asynq/internal/rdb" 22 | "github.com/spf13/cobra" 23 | ) 24 | 25 | // statsCmd represents the stats command 26 | var statsCmd = &cobra.Command{ 27 | Use: "stats", 28 | Short: "View current state", 29 | Long: heredoc.Doc(` 30 | Stats shows the overview of tasks and queues at that instant. 31 | 32 | The command shows the following: 33 | * Number of tasks in each state 34 | * Number of tasks in each queue 35 | * Aggregate data for the current day 36 | * Basic information about the running redis instance`), 37 | Args: cobra.NoArgs, 38 | Run: stats, 39 | } 40 | 41 | var jsonFlag bool 42 | 43 | func init() { 44 | rootCmd.AddCommand(statsCmd) 45 | statsCmd.Flags().BoolVar(&jsonFlag, "json", false, "Output stats in JSON format.") 46 | 47 | // Here you will define your flags and configuration settings. 48 | 49 | // Cobra supports Persistent Flags which will work for this command 50 | // and all subcommands, e.g.: 51 | // statsCmd.PersistentFlags().String("foo", "", "A help for foo") 52 | 53 | // Cobra supports local flags which will only run when this command 54 | // is called directly, e.g.: 55 | // statsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") 56 | } 57 | 58 | type AggregateStats struct { 59 | Active int `json:"active"` 60 | Pending int `json:"pending"` 61 | Aggregating int `json:"aggregating"` 62 | Scheduled int `json:"scheduled"` 63 | Retry int `json:"retry"` 64 | Archived int `json:"archived"` 65 | Completed int `json:"completed"` 66 | Processed int `json:"processed"` 67 | Failed int `json:"failed"` 68 | Timestamp time.Time `json:"timestamp"` 69 | } 70 | 71 | type FullStats struct { 72 | Aggregate AggregateStats `json:"aggregate"` 73 | QueueStats []*rdb.Stats `json:"queues"` 74 | RedisInfo map[string]string `json:"redis"` 75 | } 76 | 77 | func stats(cmd *cobra.Command, args []string) { 78 | r := createRDB() 79 | 80 | queues, err := r.AllQueues() 81 | if err != nil { 82 | fmt.Println(err) 83 | os.Exit(1) 84 | } 85 | 86 | var aggStats AggregateStats 87 | var stats []*rdb.Stats 88 | for _, qname := range queues { 89 | s, err := r.CurrentStats(qname) 90 | if err != nil { 91 | fmt.Println(err) 92 | os.Exit(1) 93 | } 94 | aggStats.Active += s.Active 95 | aggStats.Pending += s.Pending 96 | aggStats.Aggregating += s.Aggregating 97 | aggStats.Scheduled += s.Scheduled 98 | aggStats.Retry += s.Retry 99 | aggStats.Archived += s.Archived 100 | aggStats.Completed += s.Completed 101 | aggStats.Processed += s.Processed 102 | aggStats.Failed += s.Failed 103 | aggStats.Timestamp = s.Timestamp 104 | stats = append(stats, s) 105 | } 106 | var info map[string]string 107 | if useRedisCluster { 108 | info, err = r.RedisClusterInfo() 109 | } else { 110 | info, err = r.RedisInfo() 111 | } 112 | if err != nil { 113 | fmt.Println(err) 114 | os.Exit(1) 115 | } 116 | 117 | if jsonFlag { 118 | statsJSON, err := json.Marshal(FullStats{ 119 | Aggregate: aggStats, 120 | QueueStats: stats, 121 | RedisInfo: info, 122 | }) 123 | 124 | if err != nil { 125 | fmt.Println(err) 126 | os.Exit(1) 127 | } 128 | 129 | fmt.Println(string(statsJSON)) 130 | return 131 | } 132 | 133 | bold := color.New(color.Bold) 134 | bold.Println("Task Count by State") 135 | printStatsByState(&aggStats) 136 | fmt.Println() 137 | 138 | bold.Println("Task Count by Queue") 139 | printStatsByQueue(stats) 140 | fmt.Println() 141 | 142 | bold.Printf("Daily Stats %s UTC\n", aggStats.Timestamp.UTC().Format("2006-01-02")) 143 | printSuccessFailureStats(&aggStats) 144 | fmt.Println() 145 | 146 | if useRedisCluster { 147 | bold.Println("Redis Cluster Info") 148 | printClusterInfo(info) 149 | } else { 150 | bold.Println("Redis Info") 151 | printInfo(info) 152 | } 153 | fmt.Println() 154 | } 155 | 156 | func printStatsByState(s *AggregateStats) { 157 | format := strings.Repeat("%v\t", 7) + "\n" 158 | tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) 159 | fmt.Fprintf(tw, format, "active", "pending", "aggregating", "scheduled", "retry", "archived", "completed") 160 | width := maxInt(9 /* defaultWidth */, maxWidthOf(s.Active, s.Pending, s.Aggregating, s.Scheduled, s.Retry, s.Archived, s.Completed)) // length of widest column 161 | sep := strings.Repeat("-", width) 162 | fmt.Fprintf(tw, format, sep, sep, sep, sep, sep, sep, sep) 163 | fmt.Fprintf(tw, format, s.Active, s.Pending, s.Aggregating, s.Scheduled, s.Retry, s.Archived, s.Completed) 164 | tw.Flush() 165 | } 166 | 167 | // numDigits returns the number of digits in n. 168 | func numDigits(n int) int { 169 | return len(strconv.Itoa(n)) 170 | } 171 | 172 | // maxWidthOf returns the max number of digits amount the provided vals. 173 | func maxWidthOf(vals ...int) int { 174 | max := 0 175 | for _, v := range vals { 176 | if vw := numDigits(v); vw > max { 177 | max = vw 178 | } 179 | } 180 | return max 181 | } 182 | 183 | func maxInt(a, b int) int { 184 | return int(math.Max(float64(a), float64(b))) 185 | } 186 | 187 | func printStatsByQueue(stats []*rdb.Stats) { 188 | var headers, seps, counts []string 189 | maxHeaderWidth := 0 190 | for _, s := range stats { 191 | title := queueTitle(s) 192 | headers = append(headers, title) 193 | if w := utf8.RuneCountInString(title); w > maxHeaderWidth { 194 | maxHeaderWidth = w 195 | } 196 | counts = append(counts, strconv.Itoa(s.Size)) 197 | } 198 | for i := 0; i < len(headers); i++ { 199 | seps = append(seps, strings.Repeat("-", maxHeaderWidth)) 200 | } 201 | format := strings.Repeat("%v\t", len(headers)) + "\n" 202 | tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) 203 | fmt.Fprintf(tw, format, toInterfaceSlice(headers)...) 204 | fmt.Fprintf(tw, format, toInterfaceSlice(seps)...) 205 | fmt.Fprintf(tw, format, toInterfaceSlice(counts)...) 206 | tw.Flush() 207 | } 208 | 209 | func queueTitle(s *rdb.Stats) string { 210 | var b strings.Builder 211 | b.WriteString(s.Queue) 212 | if s.Paused { 213 | b.WriteString(" (paused)") 214 | } 215 | return b.String() 216 | } 217 | 218 | func printSuccessFailureStats(s *AggregateStats) { 219 | format := strings.Repeat("%v\t", 3) + "\n" 220 | tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) 221 | fmt.Fprintf(tw, format, "processed", "failed", "error rate") 222 | fmt.Fprintf(tw, format, "---------", "------", "----------") 223 | var errrate string 224 | if s.Processed == 0 { 225 | errrate = "N/A" 226 | } else { 227 | errrate = fmt.Sprintf("%.2f%%", float64(s.Failed)/float64(s.Processed)*100) 228 | } 229 | fmt.Fprintf(tw, format, s.Processed, s.Failed, errrate) 230 | tw.Flush() 231 | } 232 | 233 | func printInfo(info map[string]string) { 234 | format := strings.Repeat("%v\t", 5) + "\n" 235 | tw := new(tabwriter.Writer).Init(os.Stdout, 0, 8, 2, ' ', 0) 236 | fmt.Fprintf(tw, format, "version", "uptime", "connections", "memory usage", "peak memory usage") 237 | fmt.Fprintf(tw, format, "-------", "------", "-----------", "------------", "-----------------") 238 | fmt.Fprintf(tw, format, 239 | info["redis_version"], 240 | fmt.Sprintf("%s days", info["uptime_in_days"]), 241 | info["connected_clients"], 242 | fmt.Sprintf("%sB", info["used_memory_human"]), 243 | fmt.Sprintf("%sB", info["used_memory_peak_human"]), 244 | ) 245 | tw.Flush() 246 | } 247 | 248 | func printClusterInfo(info map[string]string) { 249 | printTable( 250 | []string{"State", "Known Nodes", "Cluster Size"}, 251 | func(w io.Writer, tmpl string) { 252 | fmt.Fprintf(w, tmpl, 253 | strings.ToUpper(info["cluster_state"]), 254 | info["cluster_known_nodes"], 255 | info["cluster_size"], 256 | ) 257 | }, 258 | ) 259 | } 260 | 261 | func toInterfaceSlice(strs []string) []interface{} { 262 | var res []interface{} 263 | for _, s := range strs { 264 | res = append(res, s) 265 | } 266 | return res 267 | } 268 | -------------------------------------------------------------------------------- /tools/asynq/main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Kentaro Hibino. All rights reserved. 2 | // Use of this source code is governed by a MIT license 3 | // that can be found in the LICENSE file. 4 | 5 | package main 6 | 7 | import "github.com/hibiken/asynq/tools/asynq/cmd" 8 | 9 | func main() { 10 | cmd.Execute() 11 | } 12 | -------------------------------------------------------------------------------- /tools/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hibiken/asynq/tools 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/MakeNowJust/heredoc/v2 v2.0.1 7 | github.com/fatih/color v1.18.0 8 | github.com/gdamore/tcell/v2 v2.5.1 9 | github.com/google/go-cmp v0.6.0 10 | github.com/hibiken/asynq v0.25.0 11 | github.com/hibiken/asynq/x v0.0.0-20220131170841-349f4c50fb1d 12 | github.com/mattn/go-runewidth v0.0.16 13 | github.com/mitchellh/go-homedir v1.1.0 14 | github.com/prometheus/client_golang v1.11.1 15 | github.com/redis/go-redis/v9 v9.7.0 16 | github.com/spf13/cobra v1.1.1 17 | github.com/spf13/pflag v1.0.5 18 | github.com/spf13/viper v1.7.0 19 | golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 20 | ) 21 | 22 | require ( 23 | github.com/beorn7/perks v1.0.1 // indirect 24 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 25 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 26 | github.com/fsnotify/fsnotify v1.4.9 // indirect 27 | github.com/gdamore/encoding v1.0.0 // indirect 28 | github.com/golang/protobuf v1.5.3 // indirect 29 | github.com/google/uuid v1.6.0 // indirect 30 | github.com/hashicorp/hcl v1.0.0 // indirect 31 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 32 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 33 | github.com/magiconair/properties v1.8.1 // indirect 34 | github.com/mattn/go-colorable v0.1.13 // indirect 35 | github.com/mattn/go-isatty v0.0.20 // indirect 36 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 37 | github.com/mitchellh/mapstructure v1.1.2 // indirect 38 | github.com/pelletier/go-toml v1.2.0 // indirect 39 | github.com/prometheus/client_model v0.2.0 // indirect 40 | github.com/prometheus/common v0.26.0 // indirect 41 | github.com/prometheus/procfs v0.6.0 // indirect 42 | github.com/rivo/uniseg v0.2.0 // indirect 43 | github.com/robfig/cron/v3 v3.0.1 // indirect 44 | github.com/spf13/afero v1.1.2 // indirect 45 | github.com/spf13/cast v1.7.0 // indirect 46 | github.com/spf13/jwalterweatherman v1.0.0 // indirect 47 | github.com/subosito/gotenv v1.2.0 // indirect 48 | golang.org/x/sys v0.26.0 // indirect 49 | golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect 50 | golang.org/x/text v0.3.8 // indirect 51 | golang.org/x/time v0.7.0 // indirect 52 | google.golang.org/protobuf v1.35.1 // indirect 53 | gopkg.in/ini.v1 v1.51.0 // indirect 54 | gopkg.in/yaml.v2 v2.4.0 // indirect 55 | ) 56 | -------------------------------------------------------------------------------- /tools/metrics_exporter/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | 9 | "github.com/hibiken/asynq" 10 | "github.com/hibiken/asynq/x/metrics" 11 | "github.com/prometheus/client_golang/prometheus" 12 | "github.com/prometheus/client_golang/prometheus/collectors" 13 | "github.com/prometheus/client_golang/prometheus/promhttp" 14 | ) 15 | 16 | // Declare command-line flags. 17 | // These variables are binded to flags in init(). 18 | var ( 19 | flagRedisAddr string 20 | flagRedisDB int 21 | flagRedisPassword string 22 | flagRedisUsername string 23 | flagPort int 24 | ) 25 | 26 | func init() { 27 | flag.StringVar(&flagRedisAddr, "redis-addr", "127.0.0.1:6379", "host:port of redis server to connect to") 28 | flag.IntVar(&flagRedisDB, "redis-db", 0, "redis DB number to use") 29 | flag.StringVar(&flagRedisPassword, "redis-password", "", "password used to connect to redis server") 30 | flag.StringVar(&flagRedisUsername, "redis-username", "", "username used to connect to redis server") 31 | flag.IntVar(&flagPort, "port", 9876, "port to use for the HTTP server") 32 | } 33 | 34 | func main() { 35 | flag.Parse() 36 | // Using NewPedanticRegistry here to test the implementation of Collectors and Metrics. 37 | reg := prometheus.NewPedanticRegistry() 38 | 39 | inspector := asynq.NewInspector(asynq.RedisClientOpt{ 40 | Addr: flagRedisAddr, 41 | DB: flagRedisDB, 42 | Password: flagRedisPassword, 43 | Username: flagRedisUsername, 44 | }) 45 | 46 | reg.MustRegister( 47 | metrics.NewQueueMetricsCollector(inspector), 48 | // Add the standard process and go metrics to the registry 49 | collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), 50 | collectors.NewGoCollector(), 51 | ) 52 | 53 | http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) 54 | log.Printf("exporter server is listening on port: %d\n", flagPort) 55 | log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", flagPort), nil)) 56 | } 57 | -------------------------------------------------------------------------------- /x/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hibiken/asynq/x 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/google/uuid v1.6.0 7 | github.com/hibiken/asynq v0.25.0 8 | github.com/prometheus/client_golang v1.20.5 9 | github.com/redis/go-redis/v9 v9.7.0 10 | ) 11 | 12 | require ( 13 | github.com/beorn7/perks v1.0.1 // indirect 14 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 15 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 16 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 17 | github.com/prometheus/client_model v0.6.1 // indirect 18 | github.com/prometheus/common v0.55.0 // indirect 19 | github.com/prometheus/procfs v0.15.1 // indirect 20 | github.com/robfig/cron/v3 v3.0.1 // indirect 21 | github.com/spf13/cast v1.7.0 // indirect 22 | golang.org/x/sys v0.26.0 // indirect 23 | golang.org/x/time v0.7.0 // indirect 24 | google.golang.org/protobuf v1.35.1 // indirect 25 | ) 26 | -------------------------------------------------------------------------------- /x/go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 4 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 5 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 6 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 7 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 8 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 9 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 10 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 11 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 12 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 13 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 14 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 15 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 16 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 17 | github.com/hibiken/asynq v0.25.0 h1:VCPyRRrrjFChsTSI8x5OCPu51MlEz6Rk+1p0kHKnZug= 18 | github.com/hibiken/asynq v0.25.0/go.mod h1:DYQ1etBEl2Y+uSkqFElGYbk3M0ujLVwCfWE+TlvxtEk= 19 | github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= 20 | github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= 21 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 22 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 23 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 24 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 25 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 26 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 27 | github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= 28 | github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= 29 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 30 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 31 | github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= 32 | github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= 33 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 34 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 35 | github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= 36 | github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= 37 | github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= 38 | github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= 39 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 40 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 41 | github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= 42 | github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 43 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 44 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 45 | golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= 46 | golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 47 | golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= 48 | golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 49 | google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= 50 | google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 51 | -------------------------------------------------------------------------------- /x/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // Package metrics provides implementations of prometheus.Collector to collect Asynq queue metrics. 2 | package metrics 3 | 4 | import ( 5 | "fmt" 6 | "log" 7 | 8 | "github.com/hibiken/asynq" 9 | "github.com/prometheus/client_golang/prometheus" 10 | ) 11 | 12 | // Namespace used in fully-qualified metrics names. 13 | const namespace = "asynq" 14 | 15 | // QueueMetricsCollector gathers queue metrics. 16 | // It implements prometheus.Collector interface. 17 | // 18 | // All metrics exported from this collector have prefix "asynq". 19 | type QueueMetricsCollector struct { 20 | inspector *asynq.Inspector 21 | } 22 | 23 | // collectQueueInfo gathers QueueInfo of all queues. 24 | // Since this operation is expensive, it must be called once per collection. 25 | func (qmc *QueueMetricsCollector) collectQueueInfo() ([]*asynq.QueueInfo, error) { 26 | qnames, err := qmc.inspector.Queues() 27 | if err != nil { 28 | return nil, fmt.Errorf("failed to get queue names: %v", err) 29 | } 30 | infos := make([]*asynq.QueueInfo, len(qnames)) 31 | for i, qname := range qnames { 32 | qinfo, err := qmc.inspector.GetQueueInfo(qname) 33 | if err != nil { 34 | return nil, fmt.Errorf("failed to get queue info: %v", err) 35 | } 36 | infos[i] = qinfo 37 | } 38 | return infos, nil 39 | } 40 | 41 | // Descriptors used by QueueMetricsCollector 42 | var ( 43 | tasksQueuedDesc = prometheus.NewDesc( 44 | prometheus.BuildFQName(namespace, "", "tasks_enqueued_total"), 45 | "Number of tasks enqueued; broken down by queue and state.", 46 | []string{"queue", "state"}, nil, 47 | ) 48 | 49 | queueSizeDesc = prometheus.NewDesc( 50 | prometheus.BuildFQName(namespace, "", "queue_size"), 51 | "Number of tasks in a queue", 52 | []string{"queue"}, nil, 53 | ) 54 | 55 | queueLatencyDesc = prometheus.NewDesc( 56 | prometheus.BuildFQName(namespace, "", "queue_latency_seconds"), 57 | "Number of seconds the oldest pending task is waiting in pending state to be processed.", 58 | []string{"queue"}, nil, 59 | ) 60 | 61 | queueMemUsgDesc = prometheus.NewDesc( 62 | prometheus.BuildFQName(namespace, "", "queue_memory_usage_approx_bytes"), 63 | "Number of memory used by a given queue (approximated number by sampling).", 64 | []string{"queue"}, nil, 65 | ) 66 | 67 | tasksProcessedTotalDesc = prometheus.NewDesc( 68 | prometheus.BuildFQName(namespace, "", "tasks_processed_total"), 69 | "Number of tasks processed (both succeeded and failed); broken down by queue", 70 | []string{"queue"}, nil, 71 | ) 72 | 73 | tasksFailedTotalDesc = prometheus.NewDesc( 74 | prometheus.BuildFQName(namespace, "", "tasks_failed_total"), 75 | "Number of tasks failed; broken down by queue", 76 | []string{"queue"}, nil, 77 | ) 78 | 79 | pausedQueues = prometheus.NewDesc( 80 | prometheus.BuildFQName(namespace, "", "queue_paused_total"), 81 | "Number of queues paused", 82 | []string{"queue"}, nil, 83 | ) 84 | ) 85 | 86 | func (qmc *QueueMetricsCollector) Describe(ch chan<- *prometheus.Desc) { 87 | prometheus.DescribeByCollect(qmc, ch) 88 | } 89 | 90 | func (qmc *QueueMetricsCollector) Collect(ch chan<- prometheus.Metric) { 91 | queueInfos, err := qmc.collectQueueInfo() 92 | if err != nil { 93 | log.Printf("Failed to collect metrics data: %v", err) 94 | } 95 | for _, info := range queueInfos { 96 | ch <- prometheus.MustNewConstMetric( 97 | tasksQueuedDesc, 98 | prometheus.GaugeValue, 99 | float64(info.Active), 100 | info.Queue, 101 | "active", 102 | ) 103 | ch <- prometheus.MustNewConstMetric( 104 | tasksQueuedDesc, 105 | prometheus.GaugeValue, 106 | float64(info.Pending), 107 | info.Queue, 108 | "pending", 109 | ) 110 | ch <- prometheus.MustNewConstMetric( 111 | tasksQueuedDesc, 112 | prometheus.GaugeValue, 113 | float64(info.Scheduled), 114 | info.Queue, 115 | "scheduled", 116 | ) 117 | ch <- prometheus.MustNewConstMetric( 118 | tasksQueuedDesc, 119 | prometheus.GaugeValue, 120 | float64(info.Retry), 121 | info.Queue, 122 | "retry", 123 | ) 124 | ch <- prometheus.MustNewConstMetric( 125 | tasksQueuedDesc, 126 | prometheus.GaugeValue, 127 | float64(info.Archived), 128 | info.Queue, 129 | "archived", 130 | ) 131 | ch <- prometheus.MustNewConstMetric( 132 | tasksQueuedDesc, 133 | prometheus.GaugeValue, 134 | float64(info.Completed), 135 | info.Queue, 136 | "completed", 137 | ) 138 | 139 | ch <- prometheus.MustNewConstMetric( 140 | queueSizeDesc, 141 | prometheus.GaugeValue, 142 | float64(info.Size), 143 | info.Queue, 144 | ) 145 | 146 | ch <- prometheus.MustNewConstMetric( 147 | queueLatencyDesc, 148 | prometheus.GaugeValue, 149 | info.Latency.Seconds(), 150 | info.Queue, 151 | ) 152 | 153 | ch <- prometheus.MustNewConstMetric( 154 | queueMemUsgDesc, 155 | prometheus.GaugeValue, 156 | float64(info.MemoryUsage), 157 | info.Queue, 158 | ) 159 | 160 | ch <- prometheus.MustNewConstMetric( 161 | tasksProcessedTotalDesc, 162 | prometheus.CounterValue, 163 | float64(info.ProcessedTotal), 164 | info.Queue, 165 | ) 166 | 167 | ch <- prometheus.MustNewConstMetric( 168 | tasksFailedTotalDesc, 169 | prometheus.CounterValue, 170 | float64(info.FailedTotal), 171 | info.Queue, 172 | ) 173 | 174 | pausedValue := 0 // zero to indicate "not paused" 175 | if info.Paused { 176 | pausedValue = 1 177 | } 178 | ch <- prometheus.MustNewConstMetric( 179 | pausedQueues, 180 | prometheus.GaugeValue, 181 | float64(pausedValue), 182 | info.Queue, 183 | ) 184 | } 185 | } 186 | 187 | // NewQueueMetricsCollector returns a collector that exports metrics about Asynq queues. 188 | func NewQueueMetricsCollector(inspector *asynq.Inspector) *QueueMetricsCollector { 189 | return &QueueMetricsCollector{inspector: inspector} 190 | } 191 | -------------------------------------------------------------------------------- /x/rate/example_test.go: -------------------------------------------------------------------------------- 1 | package rate_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/hibiken/asynq" 9 | "github.com/hibiken/asynq/x/rate" 10 | ) 11 | 12 | type RateLimitError struct { 13 | RetryIn time.Duration 14 | } 15 | 16 | func (e *RateLimitError) Error() string { 17 | return fmt.Sprintf("rate limited (retry in %v)", e.RetryIn) 18 | } 19 | 20 | func ExampleNewSemaphore() { 21 | redisConnOpt := asynq.RedisClientOpt{Addr: ":6379"} 22 | sema := rate.NewSemaphore(redisConnOpt, "my_queue", 10) 23 | // call sema.Close() when appropriate 24 | 25 | _ = asynq.HandlerFunc(func(ctx context.Context, task *asynq.Task) error { 26 | ok, err := sema.Acquire(ctx) 27 | if err != nil { 28 | return err 29 | } 30 | if !ok { 31 | return &RateLimitError{RetryIn: 30 * time.Second} 32 | } 33 | 34 | // Make sure to release the token once we're done. 35 | defer sema.Release(ctx) 36 | 37 | // Process task 38 | return nil 39 | }) 40 | } 41 | -------------------------------------------------------------------------------- /x/rate/semaphore.go: -------------------------------------------------------------------------------- 1 | // Package rate contains rate limiting strategies for asynq.Handler(s). 2 | package rate 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "strings" 8 | "time" 9 | 10 | "github.com/hibiken/asynq" 11 | asynqcontext "github.com/hibiken/asynq/internal/context" 12 | "github.com/redis/go-redis/v9" 13 | ) 14 | 15 | // NewSemaphore creates a counting Semaphore for the given scope with the given number of tokens. 16 | func NewSemaphore(rco asynq.RedisConnOpt, scope string, maxTokens int) *Semaphore { 17 | rc, ok := rco.MakeRedisClient().(redis.UniversalClient) 18 | if !ok { 19 | panic(fmt.Sprintf("rate.NewSemaphore: unsupported RedisConnOpt type %T", rco)) 20 | } 21 | 22 | if maxTokens < 1 { 23 | panic("rate.NewSemaphore: maxTokens cannot be less than 1") 24 | } 25 | 26 | if len(strings.TrimSpace(scope)) == 0 { 27 | panic("rate.NewSemaphore: scope should not be empty") 28 | } 29 | 30 | return &Semaphore{ 31 | rc: rc, 32 | scope: scope, 33 | maxTokens: maxTokens, 34 | } 35 | } 36 | 37 | // Semaphore is a distributed counting semaphore which can be used to set maxTokens across multiple asynq servers. 38 | type Semaphore struct { 39 | rc redis.UniversalClient 40 | maxTokens int 41 | scope string 42 | } 43 | 44 | // KEYS[1] -> asynq:sema: 45 | // ARGV[1] -> max concurrency 46 | // ARGV[2] -> current time in unix time 47 | // ARGV[3] -> deadline in unix time 48 | // ARGV[4] -> task ID 49 | var acquireCmd = redis.NewScript(` 50 | redis.call("ZREMRANGEBYSCORE", KEYS[1], "-inf", tonumber(ARGV[2])-1) 51 | local count = redis.call("ZCARD", KEYS[1]) 52 | 53 | if (count < tonumber(ARGV[1])) then 54 | redis.call("ZADD", KEYS[1], ARGV[3], ARGV[4]) 55 | return 'true' 56 | else 57 | return 'false' 58 | end 59 | `) 60 | 61 | // Acquire attempts to acquire a token from the semaphore. 62 | // - Returns (true, nil), iff semaphore key exists and current value is less than maxTokens 63 | // - Returns (false, nil) when token cannot be acquired 64 | // - Returns (false, error) otherwise 65 | // 66 | // The context.Context passed to Acquire must have a deadline set, 67 | // this ensures that token is released if the job goroutine crashes and does not call Release. 68 | func (s *Semaphore) Acquire(ctx context.Context) (bool, error) { 69 | d, ok := ctx.Deadline() 70 | if !ok { 71 | return false, fmt.Errorf("provided context must have a deadline") 72 | } 73 | 74 | taskID, ok := asynqcontext.GetTaskID(ctx) 75 | if !ok { 76 | return false, fmt.Errorf("provided context is missing task ID value") 77 | } 78 | 79 | return acquireCmd.Run(ctx, s.rc, 80 | []string{semaphoreKey(s.scope)}, 81 | s.maxTokens, 82 | time.Now().Unix(), 83 | d.Unix(), 84 | taskID, 85 | ).Bool() 86 | } 87 | 88 | // Release will release the token on the counting semaphore. 89 | func (s *Semaphore) Release(ctx context.Context) error { 90 | taskID, ok := asynqcontext.GetTaskID(ctx) 91 | if !ok { 92 | return fmt.Errorf("provided context is missing task ID value") 93 | } 94 | 95 | n, err := s.rc.ZRem(ctx, semaphoreKey(s.scope), taskID).Result() 96 | if err != nil { 97 | return fmt.Errorf("redis command failed: %v", err) 98 | } 99 | 100 | if n == 0 { 101 | return fmt.Errorf("no token found for task %q", taskID) 102 | } 103 | 104 | return nil 105 | } 106 | 107 | // Close closes the connection to redis. 108 | func (s *Semaphore) Close() error { 109 | return s.rc.Close() 110 | } 111 | 112 | func semaphoreKey(scope string) string { 113 | return "asynq:sema:" + scope 114 | } 115 | --------------------------------------------------------------------------------