├── .codespellrc ├── .editorconfig ├── .github ├── FUNDING.yml ├── actions │ └── setup-deps │ │ └── action.yaml ├── dependabot.yml ├── linters │ ├── .ecrc │ ├── .golangci.yml │ ├── .jscpd.json │ ├── .markdown-lint.yml │ └── .yaml-lint.yml └── workflows │ ├── codeql-analysis.yml │ ├── lint.yml │ └── testing.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── Taskfile.yaml ├── _example └── outbox-worker-kafka │ ├── README.md │ ├── Taskfile.yaml │ ├── client.go │ ├── common.go │ ├── docker-compose.yml │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── worker.go ├── adapter ├── db.go ├── libpq │ └── db.go ├── logger.go ├── pgxv4 │ └── db.go ├── pgxv5 │ └── db.go ├── slog │ ├── logger.go │ └── logger_test.go ├── testing │ ├── all.go │ ├── libpq.go │ ├── mock.go │ ├── pgxv4.go │ └── pgxv5.go ├── zap │ ├── logger.go │ └── logger_test.go └── zerolog │ ├── logger.go │ └── logger_test.go ├── backoff.go ├── backoff_test.go ├── client.go ├── client_option.go ├── client_option_test.go ├── client_test.go ├── ctx.go ├── ctx_test.go ├── doc.go ├── docker-compose.yml ├── enqueue_test.go ├── error.go ├── error_test.go ├── go.mod ├── go.sum ├── helpers.go ├── job.go ├── job_test.go ├── migrations ├── job_id_to_ulid.sql └── schema.sql ├── worker.go ├── worker_option.go ├── worker_option_test.go └── worker_test.go /.codespellrc: -------------------------------------------------------------------------------- 1 | [codespell] 2 | skip = go.mod,go.sum,coverage.txt,.codespellrc,./vendor,./_example/outbox-worker-kafka/vendor,./.git,./.idea 3 | check-hidden = true 4 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 2 6 | end_of_line = lf 7 | charset = utf-8 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | 11 | [*.go] 12 | indent_style = tab 13 | indent_size = 4 14 | ij_continuation_indent_size = 4 15 | ij_go_GROUP_CURRENT_PROJECT_IMPORTS = true 16 | ij_go_add_parentheses_for_single_import = true 17 | ij_go_group_stdlib_imports = true 18 | ij_go_import_sorting = gofmt 19 | ij_go_local_group_mode = project 20 | ij_go_move_all_imports_in_one_declaration = true 21 | ij_go_move_all_stdlib_imports_in_one_group = true 22 | 23 | [go.mod] 24 | indent_style = tab 25 | indent_size = 4 26 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # These are supported funding model platforms 3 | 4 | #github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 5 | #patreon: # Replace with a single Patreon username 6 | #open_collective: # Replace with a single Open Collective username 7 | #ko_fi: # Replace with a single Ko-fi username 8 | #tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 9 | #community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 10 | #liberapay: # Replace with a single Liberapay username 11 | #issuehunt: # Replace with a single IssueHunt username 12 | #otechie: # Replace with a single Otechie username 13 | #lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 14 | custom: ["paypal.me/vgarvardt"] 15 | -------------------------------------------------------------------------------- /.github/actions/setup-deps/action.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: setup-deps 3 | description: Install all the dependencies required for workflows 4 | inputs: 5 | go-version: 6 | required: true 7 | description: The Go version to download (if necessary) and use. Supports semver spec and ranges. 8 | token: 9 | required: true 10 | description: GitHub token, most likely "secrets.GITHUB_TOKEN" 11 | runs: 12 | using: composite 13 | steps: 14 | - name: Set up Go 15 | uses: actions/setup-go@v5 16 | with: 17 | go-version: ${{ inputs.go-version }} 18 | 19 | - name: Install Task 20 | uses: arduino/setup-task@v2 21 | with: 22 | repo-token: ${{ inputs.token }} 23 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # To get started with Dependabot version updates, you'll need to specify which 3 | # package ecosystems to update and where the package manifests are located. 4 | # Please see the documentation for all configuration options: 5 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 6 | 7 | version: 2 8 | updates: 9 | - package-ecosystem: "gomod" 10 | directory: "/" 11 | schedule: 12 | interval: "daily" 13 | - package-ecosystem: "github-actions" 14 | directory: "/" 15 | schedule: 16 | interval: "weekly" 17 | -------------------------------------------------------------------------------- /.github/linters/.ecrc: -------------------------------------------------------------------------------- 1 | { 2 | "Verbose": false, 3 | "Debug": false, 4 | "IgnoreDefaults": false, 5 | "SpacesAftertabs": false, 6 | "NoColor": false, 7 | "Exclude": ["testfiles", "README.md", "doc.go"], 8 | "AllowedContentTypes": [], 9 | "PassedFiles": [], 10 | "Disable": { 11 | "EndOfLine": false, 12 | "Indentation": false, 13 | "InsertFinalNewline": false, 14 | "TrimTrailingWhitespace": false, 15 | "IndentSize": false, 16 | "MaxLineLength": false 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /.github/linters/.golangci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "2" 3 | linters: 4 | enable: 5 | - goconst 6 | - gocritic 7 | - gocyclo 8 | - gosec 9 | - revive 10 | - unconvert 11 | settings: 12 | errcheck: 13 | check-blank: true 14 | gocyclo: 15 | min-complexity: 15 16 | revive: 17 | rules: 18 | - name: exported 19 | arguments: 20 | - disableStutteringCheck 21 | exclusions: 22 | generated: lax 23 | presets: 24 | - common-false-positives 25 | - legacy 26 | - std-error-handling 27 | rules: 28 | - path: _test\.go 29 | linters: 30 | - dupl 31 | - goconst 32 | - gosec 33 | - staticcheck 34 | paths: 35 | - third_party$ 36 | - builtin$ 37 | - examples$ 38 | formatters: 39 | enable: 40 | - gci 41 | - gofmt 42 | - goimports 43 | settings: 44 | gci: 45 | sections: 46 | - standard 47 | - default 48 | - prefix(github.com/vgarvardt/gue) 49 | custom-order: true 50 | exclusions: 51 | generated: lax 52 | paths: 53 | - third_party$ 54 | - builtin$ 55 | - examples$ 56 | -------------------------------------------------------------------------------- /.github/linters/.jscpd.json: -------------------------------------------------------------------------------- 1 | { 2 | "threshold": 5, 3 | "reporters": [ 4 | "consoleFull" 5 | ], 6 | "ignore": [ 7 | "**/*_test.go", 8 | "**/testing/mock.go" 9 | ], 10 | "absolute": true 11 | } 12 | -------------------------------------------------------------------------------- /.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ########################### 3 | ########################### 4 | ## Markdown Linter rules ## 5 | ########################### 6 | ########################### 7 | 8 | # Linter rules doc: 9 | # - https://github.com/DavidAnson/markdownlint 10 | # 11 | # Note: 12 | # To comment out a single error: 13 | # 14 | # any violations you want 15 | # 16 | # 17 | 18 | ############### 19 | # Rules by id # 20 | ############### 21 | MD004: false # Unordered list style 22 | MD007: 23 | indent: 2 # Unordered list indentation 24 | MD010: 25 | code_blocks: false # Ignore no-hard-tabs in cde blocks because that's how formatting for golang works 26 | MD013: 27 | line_length: 120 # Line length 80 is far too short 28 | MD024: 29 | siblings_only: true # Allow the same headers for different sections 30 | MD026: 31 | punctuation: ".,;:!。,;:" # List of not allowed 32 | MD029: false # Ordered list item prefix 33 | MD033: false # Allow inline HTML 34 | MD036: false # Emphasis used instead of a heading 35 | 36 | ################# 37 | # Rules by tags # 38 | ################# 39 | blank_lines: false # Error on blank lines 40 | -------------------------------------------------------------------------------- /.github/linters/.yaml-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ########################################### 3 | # These are the rules used for # 4 | # linting all the yaml files in the stack # 5 | # NOTE: # 6 | # You can disable line with: # 7 | # # yamllint disable-line # 8 | ########################################### 9 | rules: 10 | braces: 11 | level: warning 12 | min-spaces-inside: 0 13 | max-spaces-inside: 1 14 | min-spaces-inside-empty: 1 15 | max-spaces-inside-empty: 5 16 | brackets: 17 | level: warning 18 | min-spaces-inside: 0 19 | max-spaces-inside: 1 20 | min-spaces-inside-empty: 1 21 | max-spaces-inside-empty: 5 22 | colons: 23 | level: warning 24 | max-spaces-before: 0 25 | max-spaces-after: 1 26 | commas: 27 | level: warning 28 | max-spaces-before: 0 29 | min-spaces-after: 1 30 | max-spaces-after: 1 31 | comments: disable 32 | comments-indentation: disable 33 | document-end: disable 34 | document-start: 35 | level: warning 36 | present: true 37 | empty-lines: 38 | level: warning 39 | max: 2 40 | max-start: 0 41 | max-end: 0 42 | hyphens: 43 | level: warning 44 | max-spaces-after: 1 45 | indentation: 46 | level: warning 47 | spaces: consistent 48 | indent-sequences: true 49 | check-multi-line-strings: false 50 | key-duplicates: enable 51 | line-length: 52 | level: warning 53 | max: 120 54 | allow-non-breakable-words: true 55 | allow-non-breakable-inline-mappings: true 56 | new-line-at-end-of-file: disable 57 | new-lines: 58 | type: unix 59 | trailing-spaces: disable 60 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "CodeQL" 3 | 4 | on: 5 | push: 6 | branches: [master] 7 | pull_request: 8 | branches: [master] 9 | schedule: 10 | - cron: "0 9 * * 1" 11 | 12 | jobs: 13 | analyze: 14 | name: Analyze 15 | runs-on: ubuntu-latest 16 | 17 | permissions: 18 | # required for all workflows 19 | security-events: write 20 | 21 | steps: 22 | - name: Checkout repository 23 | uses: actions/checkout@v4 24 | 25 | - name: Setup dependencies 26 | uses: ./.github/actions/setup-deps 27 | with: 28 | go-version: "1.23" 29 | token: ${{secrets.GITHUB_TOKEN}} 30 | 31 | - name: Initialize CodeQL 32 | uses: github/codeql-action/init@v3 33 | with: 34 | languages: go 35 | 36 | - name: Perform CodeQL Analysis 37 | uses: github/codeql-action/analyze@v3 38 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Lint" 3 | 4 | concurrency: 5 | group: "${{ github.workflow }}-${{ github.head_ref }}" 6 | cancel-in-progress: ${{ github.event_name == 'pull_request' }} 7 | 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | 13 | jobs: 14 | lint: 15 | name: Lint 16 | runs-on: ubuntu-latest 17 | timeout-minutes: 10 18 | 19 | steps: 20 | - name: Check out code 21 | uses: actions/checkout@v4 22 | with: 23 | # Full git history is needed to get a proper list of changed files within `super-linter` 24 | fetch-depth: 0 25 | 26 | - name: Lint Code Base 27 | uses: github/super-linter@v7 28 | env: 29 | VALIDATE_ALL_CODEBASE: ${{ github.event_name != 'pull_request' }} 30 | DEFAULT_BRANCH: master 31 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 32 | # go validator works pretty bad in super-linter, we'll use the original one 33 | VALIDATE_GO: false 34 | VALIDATE_GO_MODULES: false 35 | # do not validate SQL - linters are pretty useless in case of this library, 36 | # and the functionality is tested using integration tests 37 | VALIDATE_SQL: false 38 | VALIDATE_SQLFLUFF: false 39 | VALIDATE_CHECKOV: false 40 | # it forces all list items to be on a single line in .github/linters/.jscpd.json that is not pretty at all 41 | VALIDATE_JSON_PRETTIER: false 42 | 43 | - name: Setup dependencies 44 | uses: ./.github/actions/setup-deps 45 | with: 46 | go-version: "1.23" 47 | token: ${{ secrets.GITHUB_TOKEN }} 48 | 49 | - name: Lint Golang 50 | uses: golangci/golangci-lint-action@v8 51 | with: 52 | only-new-issues: ${{ github.event_name == 'pull_request' }} 53 | args: > 54 | --config=./.github/linters/.golangci.yml 55 | --timeout=5m 56 | 57 | codespell: 58 | name: Check spelling 59 | runs-on: ubuntu-latest 60 | timeout-minutes: 5 61 | 62 | steps: 63 | - uses: actions/checkout@v4 64 | - uses: codespell-project/actions-codespell@v2 65 | -------------------------------------------------------------------------------- /.github/workflows/testing.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Test" 3 | 4 | concurrency: 5 | group: "${{ github.workflow }}-${{ github.head_ref }}" 6 | cancel-in-progress: ${{ github.event_name == 'pull_request' }} 7 | 8 | on: 9 | push: 10 | branches: [master] 11 | pull_request: 12 | 13 | jobs: 14 | test: 15 | name: Test 16 | runs-on: ubuntu-latest 17 | strategy: 18 | matrix: 19 | go-version: ["1.23", "1.24", "stable"] 20 | timeout-minutes: 10 21 | steps: 22 | - name: Check out code 23 | uses: actions/checkout@v4 24 | 25 | - name: Setup dependencies 26 | uses: ./.github/actions/setup-deps 27 | with: 28 | go-version: ${{ matrix.go-version }} 29 | token: ${{ secrets.GITHUB_TOKEN }} 30 | 31 | - name: Run tests 32 | run: task test 33 | 34 | - name: Upload coverage to Codecov 35 | if: success() 36 | uses: codecov/codecov-action@v5 37 | env: 38 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 39 | with: 40 | files: ./coverage.txt 41 | fail_ci_if_error: false 42 | 43 | summary: 44 | name: Test 45 | runs-on: ubuntu-latest 46 | needs: [test] 47 | timeout-minutes: 1 48 | steps: 49 | - name: Dummy task 50 | run: echo 'Dummy summary task to have one PR status for all tested versions' 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Go template 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, built with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | coverage.txt 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | vendor/ 19 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v5 4 | 5 | ### Breaking 6 | 7 | - `gue_jobs.job_id` column type changed to `TEXT` and the `Job.ID` field type changed from `int64` 8 | to [`ULID`](https://github.com/oklog/ulid) to generate ID on the client-side but keep jobs sortable by the primary 9 | key. Library is not providing any migration routines, it is up to the users to apply a migration. Example can be 10 | found at [migrations/job_id_to_ulid.sql](./migrations/job_id_to_ulid.sql). 11 | - `gue_jobs.args` column type changed to `BYTEA` - this allows storing any bytes as job args, not only valid JSON; 12 | library is not providing any migration routines, it is up to the users to apply a migration that may look something 13 | like `ALTER TABLE gue_jobs ALTER COLUMN args TYPE bytea USING (args::text)::bytea` to change the column type and 14 | convert existing JSON records to the binary byte array representation 15 | - `Job.Error()` accepts `error` instance instead of error string 16 | - `Job.LastError` type changed from `github.com/jackc/pgtype.Text` to stdlib `database/sql.NullString` 17 | - min tested Postgres version is `11.x` 18 | 19 | ### New 20 | 21 | - Handler may return special typed errors to control rescheduling/discarding of the jobs on the individual basis 22 | - `ErrRescheduleJobIn()` - reschedule Job after some interval from the current time 23 | - `ErrRescheduleJobAt()` - reschedule Job to some specific time 24 | - `ErrDiscardJob()` - discard a Job 25 | 26 | ## v4 27 | 28 | ### Breaking 29 | 30 | - min supported go version is `1.18` 31 | - min tested Postgres version is `10.x` 32 | - `pgx v3` adapter is gone as it is pretty old already 33 | - `go-pg/pg/v10` adapter is gone as it is in the maintenance mode already 34 | - `NewClient()` returns not only client instance but an error if it fails to init 35 | - `NewWorker()` and `NewWorkerPool()` return not only worker and pool instance but an error if fail to init 36 | - previously deprecated `Worker.Start()` and `WorkerPool.Start()` removed in favour of `Worker.Run()` 37 | and `WorkerPool.Run()` - please check documentation as they are slightly different 38 | - `Job.Priority` changed its type from `int16` to `JobPriority` that is a wrapper type for `int16` 39 | - `gue/adapter/exponential.Default` became `gue.DefaultExponentialBackoff` 40 | - `gue/adapter/exponential.New()` became `gue.NewExponentialBackoff()` 41 | 42 | ### New 43 | 44 | - `pgx v5` adapter support 45 | - const values for `JobPriority` type to simplify usage of the common values: 46 | - `JobPriorityHighest` 47 | - `JobPriorityHigh` 48 | - `JobPriorityDefault` - set by default when the `Job.Priority` is not explicitly set 49 | - `JobPriorityLow` 50 | - `JobPriorityLowest` 51 | - `WorkerPool.WorkOne` method, can be useful for testing purpose mostly 52 | - backoff implementation may return negative value to discard errored job immediately 53 | - `gue.BackoffNever` backoff implementation discards the job on the first error 54 | - [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go) Metrics are available for `Client` - 55 | use `WithClientMeter()` option to set meter for the client instance. Available metrics: 56 | - `gue_client_enqueue` - number of job enqueue tries, exposes `job-type` and `success` attributes 57 | - `gue_client_lock_job` - number of job lock tries, exposes `job-type` and `success` attributes 58 | - [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go) Metrics are available for `Worker` - 59 | use `WithWorkerMeter()` or `WithPoolMeter()` option to set meter for the worker instance. Available metrics: 60 | - `gue_worker_jobs_worked` - number of jobs process tries, exposes `job-type` and `success` attributes 61 | - `gue_worker_jobs_duration` - histogram of jobs processing duration, exposes `job-type` attribute 62 | - [OpenTelemetry](https://github.com/open-telemetry/opentelemetry-go) Tracing is available for `Worker` - 63 | use `WithWorkerTracer()` or `WithPoolTracer()` option to set tracer for the worker instance 64 | - `GetWorkerIdx` function extracts worker index in the pool from the handler context 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 Vladimir Garvardt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gue 2 | 3 | [![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)](https://pkg.go.dev/github.com/vgarvardt/gue/v5) 4 | [![Coverage Status](https://codecov.io/gh/vgarvardt/gue/branch/master/graph/badge.svg)](https://codecov.io/gh/vgarvardt/gue) 5 | [![ReportCard](https://goreportcard.com/badge/github.com/vgarvardt/gue)](https://goreportcard.com/report/github.com/vgarvardt/gue) 6 | [![License](https://img.shields.io/npm/l/express.svg)](http://opensource.org/licenses/MIT) 7 | 8 | Gue is Golang queue on top of PostgreSQL that uses transaction-level locks. 9 | 10 | Originally this project used to be a fork of [bgentry/que-go](https://github.com/bgentry/que-go) 11 | but because of some backward-compatibility breaking changes and original library author not being very responsive for 12 | PRs I turned fork into standalone project. Version 2 breaks internal backward-compatibility with the original project - 13 | DB table and all the internal logic (queries, algorithms) is completely rewritten. 14 | 15 | The name Gue is yet another silly word transformation: Queue -> Que, Go + Que -> Gue. 16 | 17 | ## Install 18 | 19 | ```shell 20 | go get -u github.com/vgarvardt/gue/v5 21 | ``` 22 | 23 | Additionally, you need to apply [DB migration](migrations/schema.sql). 24 | 25 | ## Usage Example 26 | 27 | ```go 28 | package main 29 | 30 | import ( 31 | "context" 32 | "encoding/json" 33 | "fmt" 34 | "log" 35 | "os" 36 | "time" 37 | 38 | "github.com/jackc/pgx/v5/pgxpool" 39 | "golang.org/x/sync/errgroup" 40 | 41 | "github.com/vgarvardt/gue/v5" 42 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 43 | ) 44 | 45 | const ( 46 | printerQueue = "name_printer" 47 | jobTypePrinter = "PrintName" 48 | ) 49 | 50 | type printNameArgs struct { 51 | Name string 52 | } 53 | 54 | func main() { 55 | printName := func(ctx context.Context, j *gue.Job) error { 56 | var args printNameArgs 57 | if err := json.Unmarshal(j.Args, &args); err != nil { 58 | return err 59 | } 60 | fmt.Printf("Hello %s!\n", args.Name) 61 | return nil 62 | } 63 | 64 | pgxCfg, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL")) 65 | if err != nil { 66 | log.Fatal(err) 67 | } 68 | 69 | pgxPool, err := pgxpool.NewWithConfig(context.Background(), pgxCfg) 70 | if err != nil { 71 | log.Fatal(err) 72 | } 73 | defer pgxPool.Close() 74 | 75 | poolAdapter := pgxv5.NewConnPool(pgxPool) 76 | 77 | gc, err := gue.NewClient(poolAdapter) 78 | if err != nil { 79 | log.Fatal(err) 80 | } 81 | wm := gue.WorkMap{ 82 | jobTypePrinter: printName, 83 | } 84 | 85 | finishedJobsLog := func(ctx context.Context, j *gue.Job, err error) { 86 | if err != nil { 87 | return 88 | } 89 | 90 | j.Tx().Exec( 91 | ctx, 92 | "INSERT INTO finished_jobs_log (queue, type, run_at) VALUES ($1, $2, now())", 93 | j.Queue, 94 | j.Type, 95 | ) 96 | } 97 | 98 | // create a pool w/ 2 workers 99 | workers, err := gue.NewWorkerPool(gc, wm, 2, gue.WithPoolQueue(printerQueue), gue.WithPoolHooksJobDone(finishedJobsLog)) 100 | if err != nil { 101 | log.Fatal(err) 102 | } 103 | 104 | ctx, shutdown := context.WithCancel(context.Background()) 105 | 106 | // work jobs in goroutine 107 | g, gctx := errgroup.WithContext(ctx) 108 | g.Go(func() error { 109 | err := workers.Run(gctx) 110 | if err != nil { 111 | // In a real-world applications, use a better way to shut down 112 | // application on unrecoverable error. E.g. fx.Shutdowner from 113 | // go.uber.org/fx module. 114 | log.Fatal(err) 115 | } 116 | return err 117 | }) 118 | 119 | args, err := json.Marshal(printNameArgs{Name: "vgarvardt"}) 120 | if err != nil { 121 | log.Fatal(err) 122 | } 123 | 124 | j := &gue.Job{ 125 | Type: jobTypePrinter, 126 | Queue: printerQueue, 127 | Args: args, 128 | } 129 | if err := gc.Enqueue(context.Background(), j); err != nil { 130 | log.Fatal(err) 131 | } 132 | 133 | j = &gue.Job{ 134 | Type: jobTypePrinter, 135 | Queue: printerQueue, 136 | RunAt: time.Now().UTC().Add(30 * time.Second), // delay 30 seconds 137 | Args: args, 138 | } 139 | if err := gc.Enqueue(context.Background(), j); err != nil { 140 | log.Fatal(err) 141 | } 142 | 143 | time.Sleep(30 * time.Second) // wait for while 144 | 145 | // send shutdown signal to worker 146 | shutdown() 147 | if err := g.Wait(); err != nil { 148 | log.Fatal(err) 149 | } 150 | } 151 | 152 | ``` 153 | 154 | ## PostgreSQL drivers 155 | 156 | Package supports several PostgreSQL drivers using adapter interface internally. Currently, adapters for the following 157 | drivers have been implemented: 158 | 159 | - [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) 160 | - [github.com/jackc/pgx/v4](https://github.com/jackc/pgx) 161 | - [github.com/lib/pq](https://github.com/lib/pq) 162 | 163 | ### `pgx/v5` 164 | 165 | ```go 166 | package main 167 | 168 | import ( 169 | "log" 170 | "os" 171 | 172 | "github.com/jackc/pgx/v5/pgxpool" 173 | 174 | "github.com/vgarvardt/gue/v5" 175 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 176 | ) 177 | 178 | func main() { 179 | pgxCfg, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL")) 180 | if err != nil { 181 | log.Fatal(err) 182 | } 183 | 184 | pgxPool, err := pgxpool.NewConfig(context.Background(), pgxCfg) 185 | if err != nil { 186 | log.Fatal(err) 187 | } 188 | defer pgxPool.Close() 189 | 190 | poolAdapter := pgxv5.NewConnPool(pgxPool) 191 | 192 | gc, err := gue.NewClient(poolAdapter) 193 | ... 194 | } 195 | ``` 196 | 197 | ### `pgx/v4` 198 | 199 | ```go 200 | package main 201 | 202 | import ( 203 | "context" 204 | "log" 205 | "os" 206 | 207 | "github.com/jackc/pgx/v4/pgxpool" 208 | 209 | "github.com/vgarvardt/gue/v5" 210 | "github.com/vgarvardt/gue/v5/adapter/pgxv4" 211 | ) 212 | 213 | func main() { 214 | pgxCfg, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL")) 215 | if err != nil { 216 | log.Fatal(err) 217 | } 218 | 219 | pgxPool, err := pgxpool.ConnectConfig(context.Background(), pgxCfg) 220 | if err != nil { 221 | log.Fatal(err) 222 | } 223 | defer pgxPool.Close() 224 | 225 | poolAdapter := pgxv4.NewConnPool(pgxPool) 226 | 227 | gc, err := gue.NewClient(poolAdapter) 228 | ... 229 | } 230 | ``` 231 | 232 | ### `lib/pq` 233 | 234 | ```go 235 | package main 236 | 237 | import ( 238 | "database/sql" 239 | "log" 240 | "os" 241 | 242 | _ "github.com/lib/pq" // register postgres driver 243 | 244 | "github.com/vgarvardt/gue/v5" 245 | "github.com/vgarvardt/gue/v5/adapter/libpq" 246 | ) 247 | 248 | func main() { 249 | db, err := sql.Open("postgres", os.Getenv("DATABASE_URL")) 250 | if err != nil { 251 | log.Fatal(err) 252 | } 253 | defer db.Close() 254 | 255 | poolAdapter := libpq.NewConnPool(db) 256 | 257 | gc, err := gue.NewClient(poolAdapter) 258 | ... 259 | } 260 | ``` 261 | 262 | ## Logging 263 | 264 | Package supports several logging libraries using adapter interface internally. Currently, adapters for the following 265 | drivers have been implemented: 266 | 267 | - NoOp (`adapter.NoOpLogger`) - default adapter that does nothing, so it is basically `/dev/null` logger 268 | - Stdlib `log` - adapter that uses [`log`](https://golang.org/pkg/log/) logger for logs output. Instantiate it 269 | with `adapter.NewStdLogger(...)`. 270 | - Uber `zap` - adapter that uses [`go.uber.org/zap`](https://pkg.go.dev/go.uber.org/zap) logger for logs output. 271 | Instantiate it with `adapter/zap.New(...)`. 272 | - Olivier Poitrey's `zerolog` - adapter that uses [`github.com/rs/zerolog`](https://pkg.go.dev/github.com/rs/zerolog) 273 | logger for logs output. Instantiate it with `adapter/zerolog.New(...)`. 274 | - Stdlib `slog` - adapter that uses [`log/slog`](https://pkg.go.dev/log/slog) 275 | logger for logs output. Instantiate it with `adapter/slog.New(...)`. 276 | -------------------------------------------------------------------------------- /Taskfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | env: 5 | CGO_ENABLED: 0 6 | 7 | tasks: 8 | test: 9 | summary: Run tests 10 | cmds: 11 | - task: test-deps-up 12 | - task: test-run 13 | - task: test-deps-down 14 | 15 | test-deps-up: 16 | summary: Starts test dependencies 17 | cmds: 18 | - cmd: docker compose up --detach --wait 19 | 20 | test-deps-down: 21 | summary: Stops test dependencies 22 | cmds: 23 | - cmd: docker compose down -v 24 | 25 | test-run: 26 | summary: Runs tests, must have dependencies running in the docker compose 27 | cmds: 28 | - cmd: go test -timeout 2m -cover -coverprofile=coverage.txt -covermode=atomic ./... 29 | vars: 30 | PG_HOST: 31 | sh: docker compose port postgres 5432 32 | env: 33 | TEST_POSTGRES: "postgres://test:test@{{.PG_HOST}}/test?sslmode=disable" 34 | 35 | spell-lint: 36 | summary: Check spelling 37 | vars: 38 | CURDIR: 39 | sh: pwd 40 | cmds: 41 | - cmd: | 42 | docker run \ 43 | --interactive --tty --rm \ 44 | --volume "{{.CURDIR}}:/workdir" \ 45 | --workdir "/workdir" \ 46 | python:3.12-slim bash -c "python -m pip install --upgrade pip && pip install 'codespell>=2.2.4' && codespell" 47 | 48 | lint: 49 | summary: Lint the code (expects golangci-lint to be installed) 50 | cmds: 51 | - cmd: golangci-lint run --config=./.github/linters/.golangci.yml --fix 52 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/README.md: -------------------------------------------------------------------------------- 1 | # Gue-based Outbox Worker for Kafka 2 | 3 | > **Disclaimer**: this Outbox Worker implementation is just a showcase example of gue library usage and is not designed 4 | > to be used in high load environments. There are ways of improving its performance and resilience, but they are out of 5 | > scope of this showcase example. 6 | 7 | This is simple [Transactional outbox pattern](https://microservices.io/patterns/data/transactional-outbox.html) 8 | implementation that uses `gue` for messages store/relay processes. 9 | 10 | In order to run this example you'll need to have the following tools preinstalled in your system: 11 | 12 | - Golang 1.21+ 13 | - Docker with compose command - run `docker compose version` to ensure it works 14 | - [Task runner](https://taskfile.dev/installation/) 15 | 16 | Example consist of two components: 17 | 18 | ## 1. Client 19 | 20 | Generates messages and enqueues them as `gue` Jobs for further processing. Run it with `task client`. 21 | 22 | Once running it asks how may messages do you want to publish to Kafka. Give it a number and check that jobs are being 23 | inserted into the `gue_jobs` table in the database. 24 | 25 | ## 2. Worker 26 | 27 | Runs `gue` Worker, polls Jobs enqueued by the Client and tries to publish them to Kafka. Run it with `task worker`. 28 | 29 | Once running it polls jobs from the `gue_jobs` table in the database and tries to publish messages to kafka. 30 | 31 | ### A note on Kafka 32 | 33 | To avoid spinning up real [Kafka](https://kafka.apache.org/), [Redpanda](https://redpanda.com/) instance is used - it 34 | uses Kafka-compatible protocol and consumes much less resources. Everything else is not so important for the purpose of 35 | this showcase example. 36 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/Taskfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | env: 5 | CGO_ENABLED: 0 6 | 7 | tasks: 8 | client: 9 | summary: Runs outbox client app 10 | cmds: 11 | - task: deps-up 12 | - task: _start_client 13 | 14 | worker: 15 | summary: Runs outbox worker app 16 | cmds: 17 | - task: deps-up 18 | - task: _start_worker 19 | 20 | _start_client: 21 | summary: Starts client app, expects all deps to be available 22 | cmds: 23 | - go run ./... client 24 | env: 25 | DB_DSN: "postgres://outbox:outbox@localhost:{{.PG_PORT}}/outbox?sslmode=disable" 26 | vars: 27 | PG_PORT: 28 | # `docker compose port postgres 5432` -> "0.0.0.0:52041" 29 | sh: docker compose port postgres 5432 | cut -f2 -d":" 30 | 31 | _start_worker: 32 | summary: Starts worker app, expects all deps to be available 33 | cmds: 34 | - go run ./... worker 35 | env: 36 | DB_DSN: "postgres://outbox:outbox@localhost:{{.PG_PORT}}/outbox?sslmode=disable" 37 | KAFKA_BROKERS: "localhost:{{.KAFKA_PORT}}" 38 | vars: 39 | PG_PORT: 40 | # `docker compose port postgres 5432` -> "0.0.0.0:52041" 41 | sh: docker compose port postgres 5432 | cut -f2 -d":" 42 | KAFKA_PORT: 43 | # `docker compose port redpanda 9092` -> "0.0.0.0:65027" 44 | sh: docker compose port redpanda 9092 | cut -f2 -d":" 45 | 46 | deps-up: 47 | summary: Starts test dependencies 48 | cmds: 49 | - cmd: docker compose up --detach --wait 50 | 51 | deps-down: 52 | summary: Stops test dependencies 53 | cmds: 54 | - cmd: docker compose down -v 55 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "time" 9 | 10 | "github.com/AlecAivazis/survey/v2" 11 | "github.com/AlecAivazis/survey/v2/terminal" 12 | "github.com/IBM/sarama" 13 | "github.com/google/uuid" 14 | "github.com/spf13/cobra" 15 | 16 | "github.com/vgarvardt/gue/v5" 17 | ) 18 | 19 | func newClientCommand() *cobra.Command { 20 | var gc *gue.Client 21 | 22 | return &cobra.Command{ 23 | Use: "client", 24 | Short: "Outbox Worker Client, enqueues messages to the gue for further processing by the worker", 25 | PreRunE: func(cmd *cobra.Command, args []string) (err error) { 26 | gc, err = newGueClient(cmd.Context()) 27 | return 28 | }, 29 | RunE: func(cmd *cobra.Command, args []string) error { 30 | ctx := cmd.Context() 31 | 32 | quitCh := initQuitCh() 33 | for { 34 | select { 35 | case sig := <-quitCh: 36 | log.Printf("Received interrupt (%s), exiting app\n", sig.String()) 37 | return nil 38 | 39 | case <-ctx.Done(): 40 | log.Printf("Received context done (err: %q), exiting app\n", ctx.Err().Error()) 41 | return nil 42 | 43 | default: 44 | var num int 45 | if err := survey.AskOne( 46 | &survey.Input{ 47 | Message: "How many message should I publish to kafka using outbox?", 48 | Default: "10", 49 | }, 50 | &num, 51 | survey.WithValidator(survey.Required), 52 | ); err != nil { 53 | if errors.Is(err, terminal.InterruptErr) { 54 | log.Printf("Received terminal interrupt, exiting app\n") 55 | return nil 56 | } 57 | return fmt.Errorf("could not get get response on how many message to publish: %w", err) 58 | } 59 | 60 | if num < 1 { 61 | log.Printf("Number of messages to publish must be greater than zero\n") 62 | continue 63 | } 64 | 65 | now := time.Now() 66 | for i := 0; i < num; i++ { 67 | msg := outboxMessage{ 68 | Topic: kafkaTopic, 69 | Key: []byte(fmt.Sprintf("%s-%d", now.String(), i%2)), 70 | Value: []byte(fmt.Sprintf("message #%d @ %s", i, now.String())), 71 | Headers: []sarama.RecordHeader{ 72 | {Key: []byte("message-uuid"), Value: []byte(uuid.NewString())}, 73 | }, 74 | } 75 | 76 | args, err := json.Marshal(msg) 77 | if err != nil { 78 | return fmt.Errorf("could not marshal message to json: %w", err) 79 | } 80 | 81 | if err := gc.Enqueue(ctx, &gue.Job{ 82 | Queue: outboxQueue, 83 | Type: outboxJobType, 84 | Args: args, 85 | }); err != nil { 86 | return fmt.Errorf("could not enqueue job: %w", err) 87 | } 88 | 89 | log.Printf( 90 | "Enqueued message for publishing to the gue: topic %q, key %q, value %q\n", 91 | msg.Topic, msg.Key, msg.Value, 92 | ) 93 | } 94 | } 95 | } 96 | }, 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/common.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | "path" 11 | "strings" 12 | "syscall" 13 | 14 | "github.com/IBM/sarama" 15 | "github.com/jackc/pgx/v5/pgxpool" 16 | 17 | "github.com/vgarvardt/gue/v5" 18 | "github.com/vgarvardt/gue/v5/adapter" 19 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 20 | ) 21 | 22 | // func init() { 23 | // sarama.Logger = log.New(os.Stdout, "[Sarama] ", log.LstdFlags) 24 | // } 25 | 26 | const ( 27 | kafkaTopic = "test-topic" 28 | outboxQueue = "outbox-kafka" 29 | outboxJobType = "outbox-message" 30 | ) 31 | 32 | type outboxMessage struct { 33 | Topic string `json:"topic"` 34 | Key []byte `json:"key"` 35 | Value []byte `json:"value"` 36 | Headers []sarama.RecordHeader `json:"headers"` 37 | } 38 | 39 | func initQuitCh() chan os.Signal { 40 | sigCh := make(chan os.Signal, 1) 41 | signal.Notify( 42 | sigCh, 43 | os.Interrupt, 44 | syscall.SIGHUP, 45 | syscall.SIGINT, 46 | syscall.SIGTERM, 47 | syscall.SIGQUIT, 48 | ) 49 | 50 | return sigCh 51 | } 52 | 53 | func newGueClient(ctx context.Context) (*gue.Client, error) { 54 | dbDSN := os.Getenv("DB_DSN") 55 | if dbDSN == "" { 56 | return nil, errors.New("DB_DSN env var is not set, should be something like postgres://user:password@host:port/dbname") 57 | } 58 | 59 | log.Printf("Connecting to the DB %q\n", dbDSN) 60 | connPoolConfig, err := pgxpool.ParseConfig(dbDSN) 61 | if err != nil { 62 | return nil, fmt.Errorf("could not parse DB DSN to connection config: %w", err) 63 | } 64 | 65 | connPool, err := pgxpool.NewWithConfig(ctx, connPoolConfig) 66 | if err != nil { 67 | return nil, fmt.Errorf("could not connection pool: %w", err) 68 | } 69 | 70 | if err := applyGueMigration(ctx, connPool); err != nil { 71 | return nil, err 72 | } 73 | 74 | guePool := pgxv5.NewConnPool(connPool) 75 | gc, err := gue.NewClient( 76 | guePool, 77 | gue.WithClientID("outbox-worker-client-"+gue.RandomStringID()), 78 | gue.WithClientLogger(adapter.NewStdLogger()), 79 | ) 80 | if err != nil { 81 | return nil, fmt.Errorf("could not instantiate gue client: %w", err) 82 | } 83 | 84 | return gc, nil 85 | } 86 | 87 | func applyGueMigration(ctx context.Context, connPool *pgxpool.Pool) error { 88 | cwd, err := os.Getwd() 89 | if err != nil { 90 | return fmt.Errorf("could not get current working directory: %w", err) 91 | } 92 | 93 | schemaPath := path.Join(cwd, "..", "..", "migrations", "schema.sql") 94 | queries, err := os.ReadFile(schemaPath) 95 | if err != nil { 96 | return fmt.Errorf("could not read schema file contents: %w", err) 97 | } 98 | 99 | if _, err := connPool.Exec(ctx, string(queries)); err != nil { 100 | return fmt.Errorf("could not apply gue schema migration: %w", err) 101 | } 102 | 103 | return nil 104 | } 105 | 106 | func createTestTopic() error { 107 | kafkaBrokers := os.Getenv("KAFKA_BROKERS") 108 | if kafkaBrokers == "" { 109 | return errors.New("KAFKA_BROKERS env var is not set, should be something like localhost:9092") 110 | } 111 | 112 | log.Printf("Initialising test kafka topic at %q\n", kafkaBrokers) 113 | config := sarama.NewConfig() 114 | config.ClientID = "gue-outbox-worker-kafka-example-admin" 115 | 116 | ca, err := sarama.NewClusterAdmin(strings.Split(kafkaBrokers, ","), config) 117 | if err != nil { 118 | return fmt.Errorf("could not create kafka cluster admin client: %w", err) 119 | } 120 | 121 | if err := ca.CreateTopic(kafkaTopic, &sarama.TopicDetail{ 122 | NumPartitions: 5, 123 | ReplicationFactor: 1, 124 | }, false); err != nil { 125 | var topicErr *sarama.TopicError 126 | if !errors.As(err, &topicErr) || topicErr.Err != sarama.ErrTopicAlreadyExists { 127 | return fmt.Errorf("could not create test topic: %w", err) 128 | } 129 | } 130 | 131 | if err := ca.Close(); err != nil { 132 | return fmt.Errorf("could not properly close kafka cluster admin client: %w", err) 133 | } 134 | 135 | return nil 136 | } 137 | 138 | func newSyncProducer() (sarama.SyncProducer, error) { 139 | kafkaBrokers := os.Getenv("KAFKA_BROKERS") 140 | if kafkaBrokers == "" { 141 | return nil, errors.New("KAFKA_BROKERS env var is not set, should be something like localhost:9092") 142 | } 143 | 144 | config := sarama.NewConfig() 145 | config.ClientID = "gue-outbox-worker-kafka-example" 146 | 147 | config.Producer.RequiredAcks = sarama.WaitForAll 148 | config.Producer.Partitioner = sarama.NewHashPartitioner 149 | config.Producer.Return.Successes = true 150 | 151 | log.Printf("Initialising sync kafka producer at %q\n", kafkaBrokers) 152 | producer, err := sarama.NewSyncProducer(strings.Split(kafkaBrokers, ","), config) 153 | if err != nil { 154 | return nil, fmt.Errorf("coulf not instantiate new sync producer: %w", err) 155 | } 156 | 157 | return producer, nil 158 | } 159 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.9" 3 | services: 4 | postgres: 5 | image: postgres:14 6 | environment: 7 | - POSTGRES_USER=outbox 8 | - POSTGRES_PASSWORD=outbox 9 | - POSTGRES_DATABASE=outbox 10 | ports: 11 | - "5432" 12 | healthcheck: 13 | test: ["CMD-SHELL", "pg_isready -U test"] 14 | interval: 1s 15 | timeout: 2s 16 | retries: 20 17 | 18 | redpanda: 19 | image: docker.redpanda.com/vectorized/redpanda:v22.2.6 20 | command: 21 | - redpanda 22 | - start 23 | - --smp 24 | - "1" 25 | - --reserve-memory 26 | - 0M 27 | - --overprovisioned 28 | - --node-id 29 | - "0" 30 | - --kafka-addr 31 | - PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092 32 | - --advertise-kafka-addr 33 | - PLAINTEXT://redpanda:29092,OUTSIDE://localhost:9092 34 | - --pandaproxy-addr 35 | - PLAINTEXT://0.0.0.0:28082,OUTSIDE://0.0.0.0:8082 36 | - --advertise-pandaproxy-addr 37 | - PLAINTEXT://redpanda:28082,OUTSIDE://localhost:8082 38 | ports: 39 | - "9092:9092" 40 | healthcheck: 41 | test: ["CMD", "curl", "--fail", "localhost:9644/v1/status/ready"] 42 | start_period: 15s 43 | interval: 2s 44 | timeout: 1s 45 | retries: 20 46 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/vgarvardt/gue/v5/example/outbox-worker-kafka 2 | 3 | go 1.23 4 | toolchain go1.24.1 5 | 6 | require ( 7 | github.com/AlecAivazis/survey/v2 v2.3.7 8 | github.com/IBM/sarama v1.44.0 9 | github.com/google/uuid v1.6.0 10 | github.com/jackc/pgx/v5 v5.7.2 11 | github.com/spf13/cobra v1.8.1 12 | github.com/vgarvardt/gue/v5 v5.7.1 13 | ) 14 | 15 | require ( 16 | github.com/davecgh/go-spew v1.1.1 // indirect 17 | github.com/eapache/go-resiliency v1.7.0 // indirect 18 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect 19 | github.com/eapache/queue v1.1.0 // indirect 20 | github.com/golang/snappy v0.0.4 // indirect 21 | github.com/hashicorp/errwrap v1.1.0 // indirect 22 | github.com/hashicorp/go-multierror v1.1.1 // indirect 23 | github.com/hashicorp/go-uuid v1.0.3 // indirect 24 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 25 | github.com/jackc/pgpassfile v1.0.0 // indirect 26 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 27 | github.com/jackc/puddle/v2 v2.2.2 // indirect 28 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 29 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 30 | github.com/jcmturner/gofork v1.7.6 // indirect 31 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 32 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 33 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect 34 | github.com/klauspost/compress v1.17.11 // indirect 35 | github.com/mattn/go-colorable v0.1.13 // indirect 36 | github.com/mattn/go-isatty v0.0.19 // indirect 37 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect 38 | github.com/oklog/ulid/v2 v2.1.0 // indirect 39 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 40 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 41 | github.com/spf13/pflag v1.0.5 // indirect 42 | github.com/vgarvardt/backoff v1.0.0 // indirect 43 | go.opentelemetry.io/otel v1.33.0 // indirect 44 | go.opentelemetry.io/otel/metric v1.33.0 // indirect 45 | go.opentelemetry.io/otel/trace v1.33.0 // indirect 46 | golang.org/x/crypto v0.36.0 // indirect 47 | golang.org/x/net v0.38.0 // indirect 48 | golang.org/x/sync v0.12.0 // indirect 49 | golang.org/x/sys v0.31.0 // indirect 50 | golang.org/x/term v0.30.0 // indirect 51 | golang.org/x/text v0.23.0 // indirect 52 | ) 53 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/go.sum: -------------------------------------------------------------------------------- 1 | github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= 2 | github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= 3 | github.com/IBM/sarama v1.44.0 h1:puNKqcScjSAgVLramjsuovZrS0nJZFVsrvuUymkWqhE= 4 | github.com/IBM/sarama v1.44.0/go.mod h1:MxQ9SvGfvKIorbk077Ff6DUnBlGpidiQOtU2vuBaxVw= 5 | github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= 6 | github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= 7 | github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 8 | github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= 9 | github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= 10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 12 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= 14 | github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= 15 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= 16 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= 17 | github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= 18 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 19 | github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= 20 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 21 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 22 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 23 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 24 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 25 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 26 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 27 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 28 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 29 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 30 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 31 | github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= 32 | github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= 33 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 34 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 35 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 36 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 37 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 38 | github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 39 | github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= 40 | github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 41 | github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= 42 | github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= 43 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 44 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 45 | github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= 46 | github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= 47 | github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= 48 | github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= 49 | github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= 50 | github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= 51 | github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= 52 | github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= 53 | github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= 54 | github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= 55 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= 56 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= 57 | github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= 58 | github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= 59 | github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= 60 | github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= 61 | github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= 62 | github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= 63 | github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= 64 | github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= 65 | github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= 66 | github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= 67 | github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= 68 | github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= 69 | github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= 70 | github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= 71 | github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= 72 | github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= 73 | github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= 74 | github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= 75 | github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= 76 | github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= 77 | github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= 78 | github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= 79 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= 80 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= 81 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 82 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 83 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= 84 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= 85 | github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= 86 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 87 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 88 | github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 89 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 90 | github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= 91 | github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 92 | github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= 93 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= 94 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= 95 | github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= 96 | github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= 97 | github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= 98 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= 99 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 100 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 101 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 102 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= 103 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 104 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 105 | github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= 106 | github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= 107 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 108 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 109 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 110 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 111 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 112 | github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 113 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 114 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 115 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 116 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 117 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 118 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 119 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 120 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 121 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 122 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 123 | github.com/vgarvardt/backoff v1.0.0 h1:VKub60RkA/po0gz0fHsr1vWb6pbyvOQpOs/4Ciw4atM= 124 | github.com/vgarvardt/backoff v1.0.0/go.mod h1:Om8PDVpm4MpRNDg/IKpJWsvS2MabY7LtwSahd09zg8E= 125 | github.com/vgarvardt/gue/v5 v5.7.1 h1:2BI8IzaWBsKMLkKLhb5T8o3vIigCienZ7C9LblMNf28= 126 | github.com/vgarvardt/gue/v5 v5.7.1/go.mod h1:h4AUromZbR/7K9JlTHLQtpqPvddANnel8fddGagrF54= 127 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 128 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 129 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 130 | go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= 131 | go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= 132 | go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= 133 | go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= 134 | go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= 135 | go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= 136 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 137 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 138 | go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 139 | go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 140 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 141 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 142 | golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= 143 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 144 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 145 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 146 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 147 | golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 148 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 149 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 150 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 151 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 152 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 153 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 154 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 155 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 156 | golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= 157 | golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= 158 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 159 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 160 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 161 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 162 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 163 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 164 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 165 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 166 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 167 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 168 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 169 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 170 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 171 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 172 | golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= 173 | golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= 174 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 175 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 176 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 177 | golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 178 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 179 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 180 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 181 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 182 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 183 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 184 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 185 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 186 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 187 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 188 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 189 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 190 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | func main() { 11 | ctx := context.Background() 12 | 13 | rootCmd := &cobra.Command{ 14 | Use: "outbox [command]", 15 | Version: "0.0.0-example", 16 | } 17 | 18 | rootCmd.AddCommand(newClientCommand()) 19 | rootCmd.AddCommand(newWorkerCommand()) 20 | 21 | if err := rootCmd.ExecuteContext(ctx); err != nil { 22 | log.Fatal(err) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /_example/outbox-worker-kafka/worker.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "log" 8 | "time" 9 | 10 | "github.com/IBM/sarama" 11 | "github.com/spf13/cobra" 12 | 13 | "github.com/vgarvardt/gue/v5" 14 | "github.com/vgarvardt/gue/v5/adapter" 15 | ) 16 | 17 | func newWorkerCommand() *cobra.Command { 18 | var ( 19 | gc *gue.Client 20 | producer sarama.SyncProducer 21 | ) 22 | 23 | return &cobra.Command{ 24 | Use: "worker", 25 | Short: "Outbox Worker, reads gue messages enqueued by the client and publishes them to Kafka", 26 | PreRunE: func(cmd *cobra.Command, args []string) (err error) { 27 | gc, err = newGueClient(cmd.Context()) 28 | if err != nil { 29 | return 30 | } 31 | 32 | if err = createTestTopic(); err != nil { 33 | return 34 | } 35 | 36 | producer, err = newSyncProducer() 37 | if err != nil { 38 | return 39 | } 40 | 41 | return 42 | }, 43 | PostRunE: func(cmd *cobra.Command, args []string) error { 44 | return producer.Close() 45 | }, 46 | RunE: func(cmd *cobra.Command, args []string) error { 47 | wm := gue.WorkMap{ 48 | outboxJobType: outboxMessageHandler(producer), 49 | } 50 | 51 | worker, err := gue.NewWorker( 52 | gc, wm, 53 | gue.WithWorkerQueue(outboxQueue), 54 | gue.WithWorkerLogger(adapter.NewStdLogger()), 55 | gue.WithWorkerPollInterval(500*time.Millisecond), 56 | gue.WithWorkerPollStrategy(gue.RunAtPollStrategy), 57 | gue.WithWorkerID("outbox-worker-"+gue.RandomStringID()), 58 | ) 59 | if err != nil { 60 | return fmt.Errorf("could not build gue worker: %w", err) 61 | } 62 | 63 | cancelCtx, cancel := context.WithCancel(cmd.Context()) 64 | defer cancel() 65 | 66 | go func() { 67 | if err := worker.Run(cancelCtx); err != nil { 68 | log.Fatalf("Worker finished with error: %s\n", err) 69 | } 70 | log.Println("Worker finished") 71 | }() 72 | 73 | quitCh := initQuitCh() 74 | sig := <-quitCh 75 | log.Printf("Received interrupt (%s), exiting app\n", sig.String()) 76 | cancel() 77 | return nil 78 | }, 79 | } 80 | } 81 | 82 | func outboxMessageHandler(producer sarama.SyncProducer) gue.WorkFunc { 83 | return func(ctx context.Context, j *gue.Job) error { 84 | var m outboxMessage 85 | if err := json.Unmarshal(j.Args, &m); err != nil { 86 | return fmt.Errorf("could not unmarshal kafka oubox message: %w", err) 87 | } 88 | 89 | pm := sarama.ProducerMessage{ 90 | Topic: m.Topic, 91 | Key: sarama.ByteEncoder(m.Key), 92 | Value: sarama.ByteEncoder(m.Value), 93 | Headers: m.Headers, 94 | } 95 | partition, offset, err := producer.SendMessage(&pm) 96 | if err != nil { 97 | return fmt.Errorf("could not publish message to kafka from outbox [job-id: %d]: %w", j.ID, err) 98 | } 99 | 100 | log.Printf( 101 | "Published message to kafka: topic %q, partition %d, offset %d, key %q, value %q\n", 102 | m.Topic, partition, offset, m.Key, m.Value, 103 | ) 104 | 105 | return nil 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /adapter/db.go: -------------------------------------------------------------------------------- 1 | package adapter 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | ) 7 | 8 | var ( 9 | // ErrNoRows abstract db driver-level "no rows in result set" error 10 | ErrNoRows = errors.New("no rows in result set") 11 | // ErrTxClosed abstract db driver-level "transaction is closed" error 12 | ErrTxClosed = errors.New("tx is closed") 13 | ) 14 | 15 | // Row represents single row returned by DB driver 16 | type Row interface { 17 | // Scan reads the values from the current row into dest values positionally. 18 | // If no rows were found it returns ErrNoRows. If multiple rows are returned it 19 | // ignores all but the first. 20 | Scan(dest ...any) error 21 | } 22 | 23 | // CommandTag is the result of an Exec function 24 | type CommandTag interface { 25 | // RowsAffected returns the number of rows affected. If the CommandTag was not 26 | // for a row affecting command (such as "CREATE TABLE") then it returns 0 27 | RowsAffected() int64 28 | } 29 | 30 | // Rows represents rows set returned by DB driver 31 | type Rows interface { 32 | // Next prepares the next row for reading. It returns true if there is another 33 | // row and false if no more rows are available. It automatically closes rows 34 | // when all rows are read. 35 | Next() bool 36 | // Scan reads the values from the current row into dest values positionally. 37 | Scan(dest ...any) error 38 | // Err returns any error that occurred while reading. 39 | Err() error 40 | } 41 | 42 | // Queryable is the base interface for different types of db connections that should implement 43 | // basic querying operations. 44 | type Queryable interface { 45 | // Exec executes query. A query can be either a prepared statement name or an SQL string. 46 | // args should be referenced positionally from the sql string as $1, $2, etc. 47 | Exec(ctx context.Context, query string, args ...any) (CommandTag, error) 48 | // QueryRow executes query with args. Any error that occurs while 49 | // querying is deferred until calling Scan on the returned Row. That Row will 50 | // error with ErrNoRows if no rows are returned. 51 | QueryRow(ctx context.Context, query string, args ...any) Row 52 | // Query executes a query that returns rows, typically a SELECT. 53 | // The args are for any placeholder parameters in the query. 54 | Query(ctx context.Context, query string, args ...any) (Rows, error) 55 | } 56 | 57 | // Tx represents a database transaction. 58 | type Tx interface { 59 | Queryable 60 | // Rollback rolls back the transaction. Rollback will return ErrTxClosed if the 61 | // Tx is already closed, but is otherwise safe to call multiple times. Hence, a 62 | // defer tx.Rollback() is safe even if tx.Commit() will be called first in a 63 | // non-error condition. 64 | Rollback(ctx context.Context) error 65 | // Commit commits the transaction 66 | Commit(ctx context.Context) error 67 | } 68 | 69 | // Conn is a single PostgreSQL connection. 70 | type Conn interface { 71 | Queryable 72 | // Ping checks if the DB and connection are alive. 73 | Ping(ctx context.Context) error 74 | // Begin starts a transaction with the default transaction mode. 75 | Begin(ctx context.Context) (Tx, error) 76 | // Release returns connection to the pool it was acquired from. 77 | // Once Release has been called, other methods must not be called. 78 | Release() error 79 | } 80 | 81 | // ConnPool is a PostgreSQL connection pool handle. 82 | type ConnPool interface { 83 | Queryable 84 | // Ping checks if the DB and connection are alive. 85 | Ping(ctx context.Context) error 86 | // Begin starts a transaction with the default transaction mode. 87 | Begin(ctx context.Context) (Tx, error) 88 | // Acquire returns a connection Conn from the ConnPool. 89 | // Connection must be returned to the pool after usage by calling Conn.Release(). 90 | Acquire(ctx context.Context) (Conn, error) 91 | // Close ends the use of a connection pool. It prevents any new connections from 92 | // being acquired and closes available underlying connections. Any acquired 93 | // connections will be closed when they are released. 94 | Close() error 95 | } 96 | -------------------------------------------------------------------------------- /adapter/libpq/db.go: -------------------------------------------------------------------------------- 1 | package libpq 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | 8 | "github.com/vgarvardt/gue/v5/adapter" 9 | ) 10 | 11 | // aRow implements adapter.Row using github.com/lib/pq 12 | type aRow struct { 13 | row *sql.Row 14 | } 15 | 16 | // Scan implements adapter.Row.Scan() using github.com/lib/pq 17 | func (r *aRow) Scan(dest ...any) error { 18 | err := r.row.Scan(dest...) 19 | if errors.Is(err, sql.ErrNoRows) { 20 | return adapter.ErrNoRows 21 | } 22 | 23 | return err 24 | } 25 | 26 | // aCommandTag implements adapter.CommandTag using github.com/lib/pq 27 | type aCommandTag struct { 28 | ct sql.Result 29 | } 30 | 31 | // RowsAffected implements adapter.CommandTag.RowsAffected() using github.com/lib/pq 32 | func (ct aCommandTag) RowsAffected() int64 { 33 | ra, err := ct.ct.RowsAffected() 34 | if err != nil { 35 | // TODO: log this error at least 36 | return 0 37 | } 38 | 39 | return ra 40 | } 41 | 42 | // aRows implements adapter.Rows using github.com/lib/pq 43 | type aRows struct { 44 | rows *sql.Rows 45 | } 46 | 47 | // Next implements adapter.Rows.Next() using github.com/lib/pq 48 | func (r *aRows) Next() bool { 49 | return r.rows.Next() 50 | } 51 | 52 | // Scan implements adapter.Rows.Scan() using github.com/lib/pq 53 | func (r *aRows) Scan(dest ...any) error { 54 | return r.rows.Scan(dest...) 55 | } 56 | 57 | // Err implements adapter.Rows.Err() using github.com/lib/pq 58 | func (r *aRows) Err() error { 59 | return r.rows.Err() 60 | } 61 | 62 | // aTx implements adapter.Tx using github.com/lib/pq 63 | type aTx struct { 64 | tx *sql.Tx 65 | } 66 | 67 | // NewTx instantiates new adapter.Tx using github.com/lib/pq 68 | func NewTx(tx *sql.Tx) adapter.Tx { 69 | return &aTx{tx: tx} 70 | } 71 | 72 | // UnwrapTx tries to unwrap driver-specific transaction instance from the interface. 73 | // Returns unwrap success as the second parameter. 74 | func UnwrapTx(tx adapter.Tx) (*sql.Tx, bool) { 75 | driverTx, ok := tx.(*aTx) 76 | if !ok { 77 | return nil, false 78 | } 79 | 80 | return driverTx.tx, ok 81 | } 82 | 83 | // Exec implements adapter.Tx.Exec() using github.com/lib/pq 84 | func (tx *aTx) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 85 | ct, err := tx.tx.ExecContext(ctx, query, args...) 86 | return aCommandTag{ct}, err 87 | } 88 | 89 | // QueryRow implements adapter.Tx.QueryRow() using github.com/lib/pq 90 | func (tx *aTx) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 91 | return &aRow{tx.tx.QueryRowContext(ctx, query, args...)} 92 | } 93 | 94 | // Query implements adapter.Tx.Query() using github.com/lib/pq 95 | func (tx *aTx) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 96 | rows, err := tx.tx.QueryContext(ctx, query, args...) 97 | return &aRows{rows}, err 98 | } 99 | 100 | // Rollback implements adapter.Tx.Rollback() using github.com/lib/pq 101 | func (tx *aTx) Rollback(_ context.Context) error { 102 | err := tx.tx.Rollback() 103 | if errors.Is(err, sql.ErrTxDone) { 104 | return adapter.ErrTxClosed 105 | } 106 | 107 | return err 108 | } 109 | 110 | // Commit implements adapter.Tx.Commit() using github.com/lib/pq 111 | func (tx *aTx) Commit(_ context.Context) error { 112 | return tx.tx.Commit() 113 | } 114 | 115 | type conn struct { 116 | c *sql.Conn 117 | } 118 | 119 | // NewConn instantiates new adapter.Conn using github.com/lib/pq 120 | func NewConn(c *sql.Conn) adapter.Conn { 121 | return &conn{c} 122 | } 123 | 124 | // Ping implements adapter.Conn.Ping() using github.com/lib/pq 125 | func (c *conn) Ping(ctx context.Context) error { 126 | return c.c.PingContext(ctx) 127 | } 128 | 129 | // Begin implements adapter.Conn.Begin() using github.com/lib/pq 130 | func (c *conn) Begin(ctx context.Context) (adapter.Tx, error) { 131 | tx, err := c.c.BeginTx(ctx, nil) 132 | return NewTx(tx), err 133 | } 134 | 135 | // Exec implements adapter.Conn.Exec() using github.com/lib/pq 136 | func (c *conn) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 137 | r, err := c.c.ExecContext(ctx, query, args...) 138 | return aCommandTag{r}, err 139 | } 140 | 141 | // QueryRow implements adapter.Conn.QueryRow() github.com/lib/pq 142 | func (c *conn) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 143 | return &aRow{c.c.QueryRowContext(ctx, query, args...)} 144 | } 145 | 146 | // Query implements adapter.Conn.Query() github.com/lib/pq 147 | func (c *conn) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 148 | rows, err := c.c.QueryContext(ctx, query, args...) 149 | return &aRows{rows}, err 150 | } 151 | 152 | // Release implements adapter.Conn.Release() using github.com/lib/pq 153 | func (c *conn) Release() error { 154 | return c.c.Close() 155 | } 156 | 157 | // connPool implements adapter.ConnPool using github.com/lib/pq 158 | type connPool struct { 159 | pool *sql.DB 160 | } 161 | 162 | // NewConnPool instantiates new adapter.ConnPool using github.com/lib/pq 163 | func NewConnPool(pool *sql.DB) adapter.ConnPool { 164 | return &connPool{pool} 165 | } 166 | 167 | // Ping implements adapter.ConnPool.Ping() using github.com/lib/pq 168 | func (c *connPool) Ping(ctx context.Context) error { 169 | return c.pool.PingContext(ctx) 170 | } 171 | 172 | // Exec implements adapter.ConnPool.Exec() using github.com/lib/pq 173 | func (c *connPool) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 174 | ct, err := c.pool.ExecContext(ctx, query, args...) 175 | return aCommandTag{ct}, err 176 | } 177 | 178 | // QueryRow implements adapter.ConnPool.QueryRow() using github.com/lib/pq 179 | func (c *connPool) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 180 | return &aRow{c.pool.QueryRowContext(ctx, query, args...)} 181 | } 182 | 183 | // Query implements adapter.ConnPool.Query() using github.com/lib/pq 184 | func (c *connPool) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 185 | rows, err := c.pool.QueryContext(ctx, query, args...) 186 | return &aRows{rows}, err 187 | } 188 | 189 | // Begin implements adapter.ConnPool.Begin() using github.com/lib/pq 190 | func (c *connPool) Begin(ctx context.Context) (adapter.Tx, error) { 191 | tx, err := c.pool.BeginTx(ctx, nil) 192 | return NewTx(tx), err 193 | } 194 | 195 | // Acquire implements adapter.ConnPool.Acquire() using github.com/lib/pq 196 | func (c *connPool) Acquire(ctx context.Context) (adapter.Conn, error) { 197 | cc, err := c.pool.Conn(ctx) 198 | return NewConn(cc), err 199 | } 200 | 201 | // Close implements adapter.ConnPool.Close() using github.com/lib/pq 202 | func (c *connPool) Close() error { 203 | return c.pool.Close() 204 | } 205 | -------------------------------------------------------------------------------- /adapter/logger.go: -------------------------------------------------------------------------------- 1 | package adapter 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strings" 7 | "sync" 8 | ) 9 | 10 | // KeyError is the default key for error field 11 | const KeyError = "error" 12 | 13 | type ( 14 | // Field is the simple container for a single log field 15 | Field struct { 16 | Key string 17 | Value any 18 | } 19 | 20 | // Logger declares base logging methods 21 | Logger interface { 22 | Debug(msg string, fields ...Field) 23 | Info(msg string, fields ...Field) 24 | Error(msg string, fields ...Field) 25 | 26 | With(fields ...Field) Logger 27 | } 28 | 29 | // NoOpLogger implements Logger that does nothing, all logs are going to /dev/null 30 | NoOpLogger struct{} 31 | 32 | // StdLogger implements Logger that uses stdlib "log" as output 33 | StdLogger struct { 34 | fields *sync.Map 35 | fLen int 36 | } 37 | ) 38 | 39 | // F returns value as field 40 | func F(key string, value any) Field { 41 | return Field{Key: key, Value: value} 42 | } 43 | 44 | // Err returns error as field 45 | func Err(err error) Field { 46 | return F(KeyError, err) 47 | } 48 | 49 | // Debug implements Logger.Debug for /dev/null logger 50 | func (l NoOpLogger) Debug(string, ...Field) {} 51 | 52 | // Info implements Logger.Debug for /dev/null logger 53 | func (l NoOpLogger) Info(string, ...Field) {} 54 | 55 | // Error implements Logger.Debug for /dev/null logger 56 | func (l NoOpLogger) Error(string, ...Field) {} 57 | 58 | // With implements nested logger for /dev/null logger 59 | func (l NoOpLogger) With(...Field) Logger { 60 | return l 61 | } 62 | 63 | // NewStdLogger instantiates new Logger using stdlib "log". 64 | // Builder allows to set default set of fields for all the logs being written. 65 | func NewStdLogger(fields ...Field) *StdLogger { 66 | f := new(sync.Map) 67 | for _, ff := range fields { 68 | f.Store(ff.Key, ff.Value) 69 | } 70 | 71 | return &StdLogger{f, len(fields)} 72 | } 73 | 74 | // Debug implements Logger.Debug for stdlib "log" logger 75 | func (l *StdLogger) Debug(msg string, fields ...Field) { 76 | log.Printf("%s %s", msg, l.buildContext("debug", fields...)) 77 | } 78 | 79 | // Info implements Logger.Debug for stdlib "log" logger 80 | func (l *StdLogger) Info(msg string, fields ...Field) { 81 | log.Printf("%s %s", msg, l.buildContext("info", fields...)) 82 | } 83 | 84 | // Error implements Logger.Debug for stdlib "log" logger 85 | func (l *StdLogger) Error(msg string, fields ...Field) { 86 | log.Printf("%s %s", msg, l.buildContext("error", fields...)) 87 | } 88 | 89 | // With implements nested logger for stdlib "log" logger 90 | func (l *StdLogger) With(fields ...Field) Logger { 91 | f := new(sync.Map) 92 | fLen := len(fields) 93 | l.fields.Range(func(key, value any) bool { 94 | f.Store(key, value) 95 | fLen++ 96 | return true 97 | }) 98 | for _, ff := range fields { 99 | f.Store(ff.Key, ff.Value) 100 | } 101 | 102 | return &StdLogger{f, fLen} 103 | } 104 | 105 | func (l *StdLogger) buildContext(level string, fields ...Field) string { 106 | ctx := make([]string, 0, len(fields)+l.fLen+1) 107 | ctx = append(ctx, "level="+level) 108 | l.fields.Range(func(key, value any) bool { 109 | ctx = append(ctx, fmt.Sprintf("%s=%v", key, value)) 110 | return true 111 | }) 112 | for _, f := range fields { 113 | ctx = append(ctx, fmt.Sprintf("%s=%v", f.Key, f.Value)) 114 | } 115 | 116 | return strings.Join(ctx, " ") 117 | } 118 | -------------------------------------------------------------------------------- /adapter/pgxv4/db.go: -------------------------------------------------------------------------------- 1 | package pgxv4 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/jackc/pgconn" 8 | "github.com/jackc/pgx/v4" 9 | "github.com/jackc/pgx/v4/pgxpool" 10 | 11 | "github.com/vgarvardt/gue/v5/adapter" 12 | ) 13 | 14 | // aRow implements adapter.Row using github.com/jackc/pgx/v4 15 | type aRow struct { 16 | row pgx.Row 17 | } 18 | 19 | // Scan implements adapter.Row.Scan() using github.com/jackc/pgx/v4 20 | func (r *aRow) Scan(dest ...any) error { 21 | err := r.row.Scan(dest...) 22 | if errors.Is(err, pgx.ErrNoRows) { 23 | return adapter.ErrNoRows 24 | } 25 | 26 | return err 27 | } 28 | 29 | // aCommandTag implements adapter.CommandTag using github.com/jackc/pgx/v4 30 | type aCommandTag struct { 31 | ct pgconn.CommandTag 32 | } 33 | 34 | // RowsAffected implements adapter.CommandTag.RowsAffected() using github.com/jackc/pgx/v4 35 | func (ct aCommandTag) RowsAffected() int64 { 36 | return ct.ct.RowsAffected() 37 | } 38 | 39 | // aRows implements adapter.Rows using github.com/jackc/pgx/v4 40 | type aRows struct { 41 | rows pgx.Rows 42 | } 43 | 44 | // Next implements adapter.Rows.Next() using github.com/jackc/pgx/v4 45 | func (r *aRows) Next() bool { 46 | return r.rows.Next() 47 | } 48 | 49 | // Scan implements adapter.Rows.Scan() using github.com/jackc/pgx/v4 50 | func (r *aRows) Scan(dest ...any) error { 51 | return r.rows.Scan(dest...) 52 | } 53 | 54 | // Err implements adapter.Rows.Err() using github.com/jackc/pgx/v4 55 | func (r *aRows) Err() error { 56 | return r.rows.Err() 57 | } 58 | 59 | // aTx implements adapter.Tx using github.com/jackc/pgx/v4 60 | type aTx struct { 61 | tx pgx.Tx 62 | } 63 | 64 | // NewTx instantiates new adapter.Tx using github.com/jackc/pgx/v4 65 | func NewTx(tx pgx.Tx) adapter.Tx { 66 | return &aTx{tx: tx} 67 | } 68 | 69 | // UnwrapTx tries to unwrap driver-specific transaction instance from the interface. 70 | // Returns unwrap success as the second parameter. 71 | func UnwrapTx(tx adapter.Tx) (pgx.Tx, bool) { 72 | driverTx, ok := tx.(*aTx) 73 | if !ok { 74 | return nil, false 75 | } 76 | 77 | return driverTx.tx, ok 78 | } 79 | 80 | // Exec implements adapter.Tx.Exec() using github.com/jackc/pgx/v4 81 | func (tx *aTx) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 82 | ct, err := tx.tx.Exec(ctx, query, args...) 83 | return aCommandTag{ct}, err 84 | } 85 | 86 | // QueryRow implements adapter.Tx.QueryRow() using github.com/jackc/pgx/v4 87 | func (tx *aTx) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 88 | return &aRow{tx.tx.QueryRow(ctx, query, args...)} 89 | } 90 | 91 | // Query implements adapter.Tx.Query() using github.com/jackc/pgx/v4 92 | func (tx *aTx) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 93 | rows, err := tx.tx.Query(ctx, query, args...) 94 | return &aRows{rows}, err 95 | } 96 | 97 | // Rollback implements adapter.Tx.Rollback() using github.com/jackc/pgx/v4 98 | func (tx *aTx) Rollback(ctx context.Context) error { 99 | err := tx.tx.Rollback(ctx) 100 | if errors.Is(err, pgx.ErrTxClosed) { 101 | return adapter.ErrTxClosed 102 | } 103 | 104 | return err 105 | } 106 | 107 | // Commit implements adapter.Tx.Commit() using github.com/jackc/pgx/v4 108 | func (tx *aTx) Commit(ctx context.Context) error { 109 | return tx.tx.Commit(ctx) 110 | } 111 | 112 | type conn struct { 113 | c *pgxpool.Conn 114 | } 115 | 116 | // NewConn instantiates new adapter.Conn using github.com/jackc/pgx/v4 117 | func NewConn(c *pgxpool.Conn) adapter.Conn { 118 | return &conn{c} 119 | } 120 | 121 | // Ping implements adapter.Conn.Ping() using github.com/jackc/pgx/v4 122 | func (c *conn) Ping(ctx context.Context) error { 123 | return c.c.Ping(ctx) 124 | } 125 | 126 | // Begin implements adapter.Conn.Begin() using github.com/jackc/pgx/v4 127 | func (c *conn) Begin(ctx context.Context) (adapter.Tx, error) { 128 | tx, err := c.c.Begin(ctx) 129 | return NewTx(tx), err 130 | } 131 | 132 | // Exec implements adapter.Conn.Exec() using github.com/jackc/pgx/v4 133 | func (c *conn) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 134 | r, err := c.c.Exec(ctx, query, args...) 135 | return aCommandTag{r}, err 136 | } 137 | 138 | // QueryRow implements adapter.Conn.QueryRow() github.com/jackc/pgx/v4 139 | func (c *conn) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 140 | return &aRow{c.c.QueryRow(ctx, query, args...)} 141 | } 142 | 143 | // Query implements adapter.Conn.Query() github.com/jackc/pgx/v4 144 | func (c *conn) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 145 | rows, err := c.c.Query(ctx, query, args...) 146 | return &aRows{rows}, err 147 | } 148 | 149 | // Release implements adapter.Conn.Release() using github.com/jackc/pgx/v4 150 | func (c *conn) Release() error { 151 | c.c.Release() 152 | return nil 153 | } 154 | 155 | // connPool implements adapter.ConnPool using github.com/jackc/pgx/v4 156 | type connPool struct { 157 | pool *pgxpool.Pool 158 | } 159 | 160 | // NewConnPool instantiates new adapter.ConnPool using github.com/jackc/pgx/v4 161 | func NewConnPool(pool *pgxpool.Pool) adapter.ConnPool { 162 | return &connPool{pool} 163 | } 164 | 165 | // Ping implements adapter.ConnPool.Ping() using github.com/jackc/pgx/v4 166 | func (c *connPool) Ping(ctx context.Context) error { 167 | return c.pool.Ping(ctx) 168 | } 169 | 170 | // Begin implements adapter.ConnPool.Begin() using github.com/jackc/pgx/v4 171 | func (c *connPool) Begin(ctx context.Context) (adapter.Tx, error) { 172 | tx, err := c.pool.Begin(ctx) 173 | return NewTx(tx), err 174 | } 175 | 176 | // Exec implements adapter.ConnPool.Exec() using github.com/jackc/pgx/v4 177 | func (c *connPool) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 178 | ct, err := c.pool.Exec(ctx, query, args...) 179 | return aCommandTag{ct}, err 180 | } 181 | 182 | // QueryRow implements adapter.ConnPool.QueryRow() using github.com/jackc/pgx/v4 183 | func (c *connPool) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 184 | return &aRow{c.pool.QueryRow(ctx, query, args...)} 185 | } 186 | 187 | // Query implements adapter.ConnPool.Query() using github.com/jackc/pgx/v4 188 | func (c *connPool) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 189 | rows, err := c.pool.Query(ctx, query, args...) 190 | return &aRows{rows}, err 191 | } 192 | 193 | // Acquire implements adapter.ConnPool.Acquire() using github.com/jackc/pgx/v4 194 | func (c *connPool) Acquire(ctx context.Context) (adapter.Conn, error) { 195 | cc, err := c.pool.Acquire(ctx) 196 | return NewConn(cc), err 197 | } 198 | 199 | // Close implements adapter.ConnPool.Close() using github.com/jackc/pgx/v4 200 | func (c *connPool) Close() error { 201 | c.pool.Close() 202 | return nil 203 | } 204 | -------------------------------------------------------------------------------- /adapter/pgxv5/db.go: -------------------------------------------------------------------------------- 1 | package pgxv5 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/jackc/pgx/v5" 8 | "github.com/jackc/pgx/v5/pgconn" 9 | "github.com/jackc/pgx/v5/pgxpool" 10 | 11 | "github.com/vgarvardt/gue/v5/adapter" 12 | ) 13 | 14 | // aRow implements adapter.Row using github.com/jackc/pgx/v5 15 | type aRow struct { 16 | row pgx.Row 17 | } 18 | 19 | // Scan implements adapter.Row.Scan() using github.com/jackc/pgx/v5 20 | func (r *aRow) Scan(dest ...any) error { 21 | err := r.row.Scan(dest...) 22 | if errors.Is(err, pgx.ErrNoRows) { 23 | return adapter.ErrNoRows 24 | } 25 | 26 | return err 27 | } 28 | 29 | // aCommandTag implements adapter.CommandTag using github.com/jackc/pgx/v5 30 | type aCommandTag struct { 31 | ct pgconn.CommandTag 32 | } 33 | 34 | // RowsAffected implements adapter.CommandTag.RowsAffected() using github.com/jackc/pgx/v5 35 | func (ct aCommandTag) RowsAffected() int64 { 36 | return ct.ct.RowsAffected() 37 | } 38 | 39 | // aRows implements adapter.Rows using github.com/jackc/pgx/v5 40 | type aRows struct { 41 | rows pgx.Rows 42 | } 43 | 44 | // Next implements adapter.Rows.Next() using github.com/jackc/pgx/v5 45 | func (r *aRows) Next() bool { 46 | return r.rows.Next() 47 | } 48 | 49 | // Scan implements adapter.Rows.Scan() using github.com/jackc/pgx/v5 50 | func (r *aRows) Scan(dest ...any) error { 51 | return r.rows.Scan(dest...) 52 | } 53 | 54 | // Err implements adapter.Rows.Err() using github.com/jackc/pgx/v5 55 | func (r *aRows) Err() error { 56 | return r.rows.Err() 57 | } 58 | 59 | // aTx implements adapter.Tx using github.com/jackc/pgx/v5 60 | type aTx struct { 61 | tx pgx.Tx 62 | } 63 | 64 | // NewTx instantiates new adapter.Tx using github.com/jackc/pgx/v5 65 | func NewTx(tx pgx.Tx) adapter.Tx { 66 | return &aTx{tx: tx} 67 | } 68 | 69 | // UnwrapTx tries to unwrap driver-specific transaction instance from the interface. 70 | // Returns unwrap success as the second parameter. 71 | func UnwrapTx(tx adapter.Tx) (pgx.Tx, bool) { 72 | driverTx, ok := tx.(*aTx) 73 | if !ok { 74 | return nil, false 75 | } 76 | 77 | return driverTx.tx, ok 78 | } 79 | 80 | // Exec implements adapter.Tx.Exec() using github.com/jackc/pgx/v5 81 | func (tx *aTx) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 82 | ct, err := tx.tx.Exec(ctx, query, args...) 83 | return aCommandTag{ct}, err 84 | } 85 | 86 | // QueryRow implements adapter.Tx.QueryRow() using github.com/jackc/pgx/v5 87 | func (tx *aTx) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 88 | return &aRow{tx.tx.QueryRow(ctx, query, args...)} 89 | } 90 | 91 | // Query implements adapter.Tx.Query() using github.com/jackc/pgx/v5 92 | func (tx *aTx) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 93 | rows, err := tx.tx.Query(ctx, query, args...) 94 | return &aRows{rows}, err 95 | } 96 | 97 | // Rollback implements adapter.Tx.Rollback() using github.com/jackc/pgx/v5 98 | func (tx *aTx) Rollback(ctx context.Context) error { 99 | err := tx.tx.Rollback(ctx) 100 | if errors.Is(err, pgx.ErrTxClosed) { 101 | return adapter.ErrTxClosed 102 | } 103 | 104 | return err 105 | } 106 | 107 | // Commit implements adapter.Tx.Commit() using github.com/jackc/pgx/v5 108 | func (tx *aTx) Commit(ctx context.Context) error { 109 | return tx.tx.Commit(ctx) 110 | } 111 | 112 | type conn struct { 113 | c *pgxpool.Conn 114 | } 115 | 116 | // NewConn instantiates new adapter.Conn using github.com/jackc/pgx/v5 117 | func NewConn(c *pgxpool.Conn) adapter.Conn { 118 | return &conn{c} 119 | } 120 | 121 | // Ping implements adapter.Conn.Ping() using github.com/jackc/pgx/v5 122 | func (c *conn) Ping(ctx context.Context) error { 123 | return c.c.Ping(ctx) 124 | } 125 | 126 | // Begin implements adapter.Conn.Begin() using github.com/jackc/pgx/v5 127 | func (c *conn) Begin(ctx context.Context) (adapter.Tx, error) { 128 | tx, err := c.c.Begin(ctx) 129 | return NewTx(tx), err 130 | } 131 | 132 | // Exec implements adapter.Conn.Exec() using github.com/jackc/pgx/v5 133 | func (c *conn) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 134 | r, err := c.c.Exec(ctx, query, args...) 135 | return aCommandTag{r}, err 136 | } 137 | 138 | // QueryRow implements adapter.Conn.QueryRow() github.com/jackc/pgx/v5 139 | func (c *conn) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 140 | return &aRow{c.c.QueryRow(ctx, query, args...)} 141 | } 142 | 143 | // Query implements adapter.Conn.Query() github.com/jackc/pgx/v5 144 | func (c *conn) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 145 | rows, err := c.c.Query(ctx, query, args...) 146 | return &aRows{rows}, err 147 | } 148 | 149 | // Release implements adapter.Conn.Release() using github.com/jackc/pgx/v5 150 | func (c *conn) Release() error { 151 | c.c.Release() 152 | return nil 153 | } 154 | 155 | // connPool implements adapter.ConnPool using github.com/jackc/pgx/v5 156 | type connPool struct { 157 | pool *pgxpool.Pool 158 | } 159 | 160 | // NewConnPool instantiates new adapter.ConnPool using github.com/jackc/pgx/v5 161 | func NewConnPool(pool *pgxpool.Pool) adapter.ConnPool { 162 | return &connPool{pool} 163 | } 164 | 165 | // Ping implements adapter.ConnPool.Ping() using github.com/jackc/pgx/v5 166 | func (c *connPool) Ping(ctx context.Context) error { 167 | return c.pool.Ping(ctx) 168 | } 169 | 170 | // Begin implements adapter.ConnPool.Begin() using github.com/jackc/pgx/v5 171 | func (c *connPool) Begin(ctx context.Context) (adapter.Tx, error) { 172 | tx, err := c.pool.Begin(ctx) 173 | return NewTx(tx), err 174 | } 175 | 176 | // Exec implements adapter.ConnPool.Exec() using github.com/jackc/pgx/v5 177 | func (c *connPool) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 178 | ct, err := c.pool.Exec(ctx, query, args...) 179 | return aCommandTag{ct}, err 180 | } 181 | 182 | // QueryRow implements adapter.ConnPool.QueryRow() using github.com/jackc/pgx/v5 183 | func (c *connPool) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 184 | return &aRow{c.pool.QueryRow(ctx, query, args...)} 185 | } 186 | 187 | // Query implements adapter.ConnPool.Query() using github.com/jackc/pgx/v5 188 | func (c *connPool) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 189 | rows, err := c.pool.Query(ctx, query, args...) 190 | return &aRows{rows}, err 191 | } 192 | 193 | // Acquire implements adapter.ConnPool.Acquire() using github.com/jackc/pgx/v5 194 | func (c *connPool) Acquire(ctx context.Context) (adapter.Conn, error) { 195 | cc, err := c.pool.Acquire(ctx) 196 | return NewConn(cc), err 197 | } 198 | 199 | // Close implements adapter.ConnPool.Close() using github.com/jackc/pgx/v5 200 | func (c *connPool) Close() error { 201 | c.pool.Close() 202 | return nil 203 | } 204 | -------------------------------------------------------------------------------- /adapter/slog/logger.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package slog 4 | 5 | import ( 6 | libSLog "log/slog" 7 | 8 | "github.com/vgarvardt/gue/v5/adapter" 9 | ) 10 | 11 | var _ adapter.Logger = &slog{} 12 | 13 | type slog struct { 14 | l *libSLog.Logger 15 | } 16 | 17 | // New instantiates new adapter.Logger using go.uber.org/slog 18 | func New(l *libSLog.Logger) adapter.Logger { 19 | return &slog{l} 20 | } 21 | 22 | // Debug implements Logger.Debug for go.uber.org/slog logger 23 | func (l *slog) Debug(msg string, fields ...adapter.Field) { 24 | l.l.Debug(msg, l.slogFields(fields...)...) 25 | } 26 | 27 | // Info implements Logger.Debug for go.uber.org/slog logger 28 | func (l *slog) Info(msg string, fields ...adapter.Field) { 29 | l.l.Info(msg, l.slogFields(fields...)...) 30 | } 31 | 32 | // Error implements Logger.Debug for go.uber.org/slog logger 33 | func (l *slog) Error(msg string, fields ...adapter.Field) { 34 | l.l.Error(msg, l.slogFields(fields...)...) 35 | } 36 | 37 | // With implements nested logger for go.uber.org/slog logger 38 | func (l *slog) With(fields ...adapter.Field) adapter.Logger { 39 | return New(l.l.With(l.slogFields(fields...)...)) 40 | } 41 | 42 | func (l *slog) slogFields(fields ...adapter.Field) []any { 43 | result := make([]any, 0, len(fields)*2) 44 | for _, f := range fields { 45 | result = append(result, f.Key, f.Value) 46 | } 47 | return result 48 | } 49 | -------------------------------------------------------------------------------- /adapter/slog/logger_test.go: -------------------------------------------------------------------------------- 1 | //go:build go1.21 2 | 3 | package slog 4 | 5 | import ( 6 | "bytes" 7 | "errors" 8 | libSLog "log/slog" 9 | "strings" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | 15 | "github.com/vgarvardt/gue/v5/adapter" 16 | ) 17 | 18 | func TestNew(t *testing.T) { 19 | var buf bytes.Buffer 20 | l := libSLog.New(libSLog.NewJSONHandler(&buf, &libSLog.HandlerOptions{ 21 | AddSource: true, 22 | Level: libSLog.LevelDebug, 23 | })) 24 | ll := New(l) 25 | 26 | err := errors.New("something went wrong") 27 | 28 | ll.Debug("debug-1", adapter.F("debug-key", "debug-val")) 29 | ll.Info("info-1", adapter.F("info-key", "info-val")) 30 | ll.Error("error-1", adapter.F("error-key", "error-val")) 31 | ll.Error("error-2", adapter.Err(err)) 32 | 33 | lll := ll.With(adapter.F("nested-key", "nested-val")) 34 | lll.Info("info-2", adapter.F("info-key-2", "info-val-2")) 35 | 36 | lines := strings.Split(strings.TrimSpace(buf.String()), "\n") 37 | require.Len(t, lines, 5) 38 | 39 | for line, contains := range [][]string{ 40 | {`"level":"DEBUG"`, `"msg":"debug-1"`, `"debug-key":"debug-val"`}, 41 | {`"level":"INFO"`, `"msg":"info-1"`, `"info-key":"info-val"`}, 42 | {`"level":"ERROR"`, `"msg":"error-1"`, `"error-key":"error-val"`}, 43 | {`"level":"ERROR"`, `"msg":"error-2"`, `"error":"something went wrong"`}, 44 | {`"level":"INFO"`, `"msg":"info-2"`, `"info-key-2":"info-val-2"`, `"nested-key":"nested-val"`}, 45 | } { 46 | for _, sub := range contains { 47 | assert.Contains(t, lines[line], sub) 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /adapter/testing/all.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "os" 7 | "sync" 8 | "testing" 9 | 10 | _ "github.com/lib/pq" // register pq sql driver 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | 14 | "github.com/vgarvardt/gue/v5/adapter" 15 | ) 16 | 17 | const defaultPoolConns = 5 18 | 19 | var migrations sync.Map 20 | 21 | // OpenTestPool callback type for opening connection pool with default parameters used in tests 22 | type OpenTestPool func(t testing.TB) adapter.ConnPool 23 | 24 | // OpenOpenTestPoolMaxConns callback type for opening connection pool with custom max connections used in tests 25 | type OpenOpenTestPoolMaxConns func(t testing.TB, maxConnections int32) adapter.ConnPool 26 | 27 | // AllAdaptersOpenTestPool lists all available adapters with callbacks 28 | var AllAdaptersOpenTestPool = map[string]OpenTestPool{ 29 | "pgx/v4": OpenTestPoolPGXv4, 30 | "pgx/v5": OpenTestPoolPGXv5, 31 | "lib/pq": OpenTestPoolLibPQ, 32 | } 33 | 34 | func truncateAndClose(t testing.TB, pool adapter.ConnPool) { 35 | t.Helper() 36 | 37 | _, err := pool.Exec(context.Background(), "TRUNCATE TABLE gue_jobs") 38 | assert.NoError(t, err) 39 | 40 | err = pool.Close() 41 | assert.NoError(t, err) 42 | } 43 | 44 | func applyMigrations(schema string) *sync.Once { 45 | once, _ := migrations.LoadOrStore(schema, &sync.Once{}) 46 | return once.(*sync.Once) 47 | } 48 | 49 | func doApplyMigrations(t testing.TB, schema string) { 50 | t.Helper() 51 | 52 | dsn := testConnDSN(t) 53 | if schema != "" { 54 | dsn += "&search_path=" + schema 55 | t.Logf("doApplyMigrations dsn: %s", dsn) 56 | } 57 | 58 | migrationsConn, err := sql.Open("postgres", dsn) 59 | require.NoError(t, err) 60 | defer func() { 61 | err := migrationsConn.Close() 62 | assert.NoError(t, err) 63 | }() 64 | 65 | migrationSQL, err := os.ReadFile("./migrations/schema.sql") 66 | require.NoError(t, err) 67 | 68 | if schema != "" { 69 | _, err := migrationsConn.Exec("CREATE SCHEMA IF NOT EXISTS " + schema) 70 | require.NoError(t, err) 71 | } 72 | 73 | _, err = migrationsConn.Exec(string(migrationSQL)) 74 | require.NoError(t, err) 75 | } 76 | 77 | func testConnDSN(t testing.TB) string { 78 | t.Helper() 79 | 80 | testPgConnString, found := os.LookupEnv("TEST_POSTGRES") 81 | require.True(t, found, "TEST_POSTGRES env var is not set") 82 | require.NotEmpty(t, testPgConnString, "TEST_POSTGRES env var is empty") 83 | 84 | return testPgConnString 85 | // return `postgres://test:test@localhost:54823/test?sslmode=disable` 86 | } 87 | -------------------------------------------------------------------------------- /adapter/testing/libpq.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "testing" 7 | 8 | _ "github.com/lib/pq" // register postgres driver 9 | "github.com/stretchr/testify/require" 10 | 11 | "github.com/vgarvardt/gue/v5/adapter" 12 | "github.com/vgarvardt/gue/v5/adapter/libpq" 13 | ) 14 | 15 | // OpenTestPoolMaxConnsLibPQ opens connections pool used in testing 16 | func OpenTestPoolMaxConnsLibPQ(t testing.TB, maxConnections int, gueSchema, secondSchema string) adapter.ConnPool { 17 | t.Helper() 18 | 19 | if (gueSchema == "" && secondSchema != "") || (gueSchema != "" && secondSchema == "") { 20 | require.Fail(t, "Both schemas should be either set or unset") 21 | } 22 | 23 | applyMigrations(gueSchema).Do(func() { 24 | doApplyMigrations(t, gueSchema) 25 | }) 26 | 27 | dsn := testConnDSN(t) 28 | if gueSchema != "" && secondSchema != "" { 29 | dsn += fmt.Sprintf("&search_path=%s,%s", secondSchema, gueSchema) 30 | } 31 | 32 | db, err := sql.Open("postgres", dsn) 33 | require.NoError(t, err) 34 | 35 | db.SetMaxOpenConns(maxConnections) 36 | 37 | // guw schema will be created by migrations routine, we need to take care only on the second one 38 | if secondSchema != "" { 39 | _, err := db.Exec("CREATE SCHEMA IF NOT EXISTS " + secondSchema) 40 | require.NoError(t, err) 41 | } 42 | 43 | pool := libpq.NewConnPool(db) 44 | 45 | t.Cleanup(func() { 46 | truncateAndClose(t, pool) 47 | }) 48 | 49 | return pool 50 | } 51 | 52 | // OpenTestPoolLibPQ opens connections pool used in testing 53 | func OpenTestPoolLibPQ(t testing.TB) adapter.ConnPool { 54 | t.Helper() 55 | 56 | return OpenTestPoolMaxConnsLibPQ(t, defaultPoolConns, "", "") 57 | } 58 | 59 | // OpenTestPoolLibPQCustomSchemas opens connections pool used in testing with gue table installed to own schema and 60 | // search_path set to two different schemas 61 | func OpenTestPoolLibPQCustomSchemas(t testing.TB, gueSchema, secondSchema string) adapter.ConnPool { 62 | t.Helper() 63 | 64 | return OpenTestPoolMaxConnsLibPQ(t, defaultPoolConns, gueSchema, secondSchema) 65 | } 66 | -------------------------------------------------------------------------------- /adapter/testing/mock.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/stretchr/testify/mock" 7 | 8 | "github.com/vgarvardt/gue/v5/adapter" 9 | ) 10 | 11 | var ( 12 | _ adapter.Row = &Row{} 13 | _ adapter.Rows = &Rows{} 14 | _ adapter.CommandTag = &CommandTag{} 15 | _ adapter.Queryable = &Queryable{} 16 | _ adapter.Tx = &Tx{} 17 | _ adapter.Conn = &Conn{} 18 | _ adapter.ConnPool = &ConnPool{} 19 | ) 20 | 21 | // Row mock implementation of adapter.Row 22 | type Row struct { 23 | mock.Mock 24 | } 25 | 26 | // Scan mock implementation of adapter.Row.Scan() 27 | func (m *Row) Scan(dest ...any) error { 28 | args := m.Called(dest...) 29 | return args.Error(0) 30 | } 31 | 32 | // CommandTag mock implementation of adapter.CommandTag 33 | type CommandTag struct { 34 | mock.Mock 35 | } 36 | 37 | // RowsAffected mock implementation of adapter.CommandTag.RowsAffected() 38 | func (m *CommandTag) RowsAffected() int64 { 39 | args := m.Called() 40 | return args.Get(0).(int64) 41 | } 42 | 43 | // Rows mock implementation of adapter.Rows 44 | type Rows struct { 45 | mock.Mock 46 | } 47 | 48 | // Next mock implementation of adapter.Rows.Next() 49 | func (m *Rows) Next() bool { 50 | args := m.Called() 51 | return args.Bool(0) 52 | } 53 | 54 | // Scan mock implementation of adapter.Rows.Scan() 55 | func (m *Rows) Scan(dest ...any) error { 56 | args := m.Called(dest...) 57 | return args.Error(0) 58 | } 59 | 60 | // Err mock implementation of adapter.Rows.Err() 61 | func (m *Rows) Err() error { 62 | args := m.Called() 63 | return args.Error(0) 64 | } 65 | 66 | // Queryable mock implementation of adapter.Queryable 67 | type Queryable struct { 68 | mock.Mock 69 | } 70 | 71 | // Exec mock implementation of adapter.Queryable.Exec() 72 | func (m *Queryable) Exec(ctx context.Context, query string, args ...any) (adapter.CommandTag, error) { 73 | mArgs := m.Called(ctx, query, args) 74 | arg0 := mArgs.Get(0) 75 | if arg0 == nil { 76 | return nil, mArgs.Error(1) 77 | } 78 | return arg0.(adapter.CommandTag), mArgs.Error(1) 79 | } 80 | 81 | // QueryRow mock implementation of adapter.Queryable.QueryRow() 82 | func (m *Queryable) QueryRow(ctx context.Context, query string, args ...any) adapter.Row { 83 | mArgs := m.Called(ctx, query, args) 84 | return mArgs.Get(0).(adapter.Row) 85 | } 86 | 87 | // Query mock implementation of adapter.Queryable.Query() 88 | func (m *Queryable) Query(ctx context.Context, query string, args ...any) (adapter.Rows, error) { 89 | mArgs := m.Called(ctx, query, args) 90 | arg0 := mArgs.Get(0) 91 | if arg0 == nil { 92 | return nil, mArgs.Error(1) 93 | } 94 | return arg0.(adapter.Rows), mArgs.Error(1) 95 | } 96 | 97 | // Tx mock implementation of adapter.Tx 98 | type Tx struct { 99 | Queryable 100 | mock.Mock 101 | } 102 | 103 | // Rollback mock implementation of adapter.Tx.Rollback() 104 | func (m *Tx) Rollback(ctx context.Context) error { 105 | args := m.Called(ctx) 106 | return args.Error(0) 107 | } 108 | 109 | // Commit mock implementation of adapter.Tx.Commit() 110 | func (m *Tx) Commit(ctx context.Context) error { 111 | args := m.Called(ctx) 112 | return args.Error(0) 113 | } 114 | 115 | // Conn mock implementation of adapter.Conn 116 | type Conn struct { 117 | Queryable 118 | mock.Mock 119 | } 120 | 121 | // Ping mock implementation of adapter.Conn.Ping() 122 | func (m *Conn) Ping(ctx context.Context) error { 123 | args := m.Called(ctx) 124 | return args.Error(0) 125 | } 126 | 127 | // Begin mock implementation of adapter.Conn.Begin() 128 | func (m *Conn) Begin(ctx context.Context) (adapter.Tx, error) { 129 | mArgs := m.Called(ctx) 130 | arg0 := mArgs.Get(0) 131 | if arg0 == nil { 132 | return nil, mArgs.Error(1) 133 | } 134 | return arg0.(adapter.Tx), mArgs.Error(1) 135 | } 136 | 137 | // Release mock implementation of adapter.Conn.Release() 138 | func (m *Conn) Release() error { 139 | args := m.Called() 140 | return args.Error(0) 141 | } 142 | 143 | // ConnPool mock implementation of adapter.ConnPool 144 | type ConnPool struct { 145 | Queryable 146 | mock.Mock 147 | } 148 | 149 | // Ping mock implementation of adapter.ConnPool.Ping() 150 | func (m *ConnPool) Ping(ctx context.Context) error { 151 | args := m.Called(ctx) 152 | return args.Error(0) 153 | } 154 | 155 | // Begin mock implementation of adapter.ConnPool.Begin() 156 | func (m *ConnPool) Begin(ctx context.Context) (adapter.Tx, error) { 157 | mArgs := m.Called(ctx) 158 | arg0 := mArgs.Get(0) 159 | if arg0 == nil { 160 | return nil, mArgs.Error(1) 161 | } 162 | return arg0.(adapter.Tx), mArgs.Error(1) 163 | } 164 | 165 | // Acquire mock implementation of adapter.ConnPool.Acquire() 166 | func (m *ConnPool) Acquire(ctx context.Context) (adapter.Conn, error) { 167 | mArgs := m.Called(ctx) 168 | arg0 := mArgs.Get(0) 169 | if arg0 == nil { 170 | return nil, mArgs.Error(1) 171 | } 172 | return arg0.(adapter.Conn), mArgs.Error(1) 173 | } 174 | 175 | // Close mock implementation of adapter.ConnPool.Close() 176 | func (m *ConnPool) Close() error { 177 | args := m.Called() 178 | return args.Error(0) 179 | } 180 | -------------------------------------------------------------------------------- /adapter/testing/pgxv4.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/jackc/pgx/v4/pgxpool" 8 | "github.com/stretchr/testify/require" 9 | 10 | "github.com/vgarvardt/gue/v5/adapter" 11 | "github.com/vgarvardt/gue/v5/adapter/pgxv4" 12 | ) 13 | 14 | // OpenTestPoolMaxConnsPGXv4 opens connections pool used in testing 15 | func OpenTestPoolMaxConnsPGXv4(t testing.TB, maxConnections int32) adapter.ConnPool { 16 | t.Helper() 17 | 18 | applyMigrations("").Do(func() { 19 | doApplyMigrations(t, "") 20 | }) 21 | 22 | connPoolConfig, err := pgxpool.ParseConfig(testConnDSN(t)) 23 | require.NoError(t, err) 24 | 25 | connPoolConfig.MaxConns = maxConnections 26 | 27 | poolPGXv4, err := pgxpool.ConnectConfig(context.Background(), connPoolConfig) 28 | require.NoError(t, err) 29 | 30 | pool := pgxv4.NewConnPool(poolPGXv4) 31 | 32 | t.Cleanup(func() { 33 | truncateAndClose(t, pool) 34 | }) 35 | 36 | return pool 37 | } 38 | 39 | // OpenTestPoolPGXv4 opens connections pool used in testing 40 | func OpenTestPoolPGXv4(t testing.TB) adapter.ConnPool { 41 | t.Helper() 42 | 43 | return OpenTestPoolMaxConnsPGXv4(t, defaultPoolConns) 44 | } 45 | -------------------------------------------------------------------------------- /adapter/testing/pgxv5.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/jackc/pgx/v5/pgxpool" 8 | "github.com/stretchr/testify/require" 9 | 10 | "github.com/vgarvardt/gue/v5/adapter" 11 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 12 | ) 13 | 14 | // OpenTestPoolMaxConnsPGXv5 opens connections pool used in testing 15 | func OpenTestPoolMaxConnsPGXv5(t testing.TB, maxConnections int32) adapter.ConnPool { 16 | t.Helper() 17 | 18 | applyMigrations("").Do(func() { 19 | doApplyMigrations(t, "") 20 | }) 21 | 22 | connPoolConfig, err := pgxpool.ParseConfig(testConnDSN(t)) 23 | require.NoError(t, err) 24 | 25 | connPoolConfig.MaxConns = maxConnections 26 | 27 | poolPGXv5, err := pgxpool.NewWithConfig(context.Background(), connPoolConfig) 28 | require.NoError(t, err) 29 | 30 | pool := pgxv5.NewConnPool(poolPGXv5) 31 | 32 | t.Cleanup(func() { 33 | truncateAndClose(t, pool) 34 | }) 35 | 36 | return pool 37 | } 38 | 39 | // OpenTestPoolPGXv5 opens connections pool used in testing 40 | func OpenTestPoolPGXv5(t testing.TB) adapter.ConnPool { 41 | t.Helper() 42 | 43 | return OpenTestPoolMaxConnsPGXv5(t, defaultPoolConns) 44 | } 45 | -------------------------------------------------------------------------------- /adapter/zap/logger.go: -------------------------------------------------------------------------------- 1 | package zap 2 | 3 | import ( 4 | uberZap "go.uber.org/zap" 5 | 6 | "github.com/vgarvardt/gue/v5/adapter" 7 | ) 8 | 9 | var _ adapter.Logger = &zap{} 10 | 11 | type zap struct { 12 | l *uberZap.Logger 13 | } 14 | 15 | // New instantiates new adapter.Logger using go.uber.org/zap 16 | func New(l *uberZap.Logger) adapter.Logger { 17 | return &zap{l} 18 | } 19 | 20 | // Debug implements Logger.Debug for go.uber.org/zap logger 21 | func (l *zap) Debug(msg string, fields ...adapter.Field) { 22 | l.l.Debug(msg, l.zapFields(fields...)...) 23 | } 24 | 25 | // Info implements Logger.Debug for go.uber.org/zap logger 26 | func (l *zap) Info(msg string, fields ...adapter.Field) { 27 | l.l.Info(msg, l.zapFields(fields...)...) 28 | } 29 | 30 | // Error implements Logger.Debug for go.uber.org/zap logger 31 | func (l *zap) Error(msg string, fields ...adapter.Field) { 32 | l.l.Error(msg, l.zapFields(fields...)...) 33 | } 34 | 35 | // With implements nested logger for go.uber.org/zap logger 36 | func (l *zap) With(fields ...adapter.Field) adapter.Logger { 37 | return New(l.l.With(l.zapFields(fields...)...)) 38 | } 39 | 40 | func (l *zap) zapFields(fields ...adapter.Field) []uberZap.Field { 41 | result := make([]uberZap.Field, len(fields)) 42 | for i, f := range fields { 43 | result[i] = uberZap.Any(f.Key, f.Value) 44 | } 45 | return result 46 | } 47 | -------------------------------------------------------------------------------- /adapter/zap/logger_test.go: -------------------------------------------------------------------------------- 1 | package zap 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | uberZap "go.uber.org/zap" 10 | "go.uber.org/zap/zapcore" 11 | "go.uber.org/zap/zaptest/observer" 12 | 13 | "github.com/vgarvardt/gue/v5/adapter" 14 | ) 15 | 16 | func TestNew(t *testing.T) { 17 | zapCore, logs := observer.New(zapcore.DebugLevel) 18 | l := uberZap.New(zapCore) 19 | ll := New(l) 20 | 21 | err := errors.New("something went wrong") 22 | 23 | ll.Debug("debug-1", adapter.F("debug-key", "debug-val")) 24 | ll.Info("info-1", adapter.F("info-key", "info-val")) 25 | ll.Error("error-1", adapter.F("error-key", "error-val")) 26 | ll.Error("error-2", adapter.Err(err)) 27 | 28 | lll := ll.With(adapter.F("nested-key", "nested-val")) 29 | lll.Info("info-2", adapter.F("info-key-2", "info-val-2")) 30 | 31 | require.Equal(t, 5, logs.Len()) 32 | 33 | var i any 34 | 35 | logEntries := logs.AllUntimed() 36 | assert.Equal(t, []observer.LoggedEntry{ 37 | { 38 | Entry: zapcore.Entry{ 39 | Level: zapcore.DebugLevel, 40 | Message: "debug-1", 41 | }, 42 | Context: []zapcore.Field{ 43 | { 44 | Key: "debug-key", 45 | String: "debug-val", 46 | Type: zapcore.StringType, 47 | Interface: i, 48 | }, 49 | }, 50 | }, { 51 | Entry: zapcore.Entry{ 52 | Level: zapcore.InfoLevel, 53 | Message: "info-1", 54 | }, 55 | Context: []zapcore.Field{ 56 | { 57 | Key: "info-key", 58 | String: "info-val", 59 | Type: zapcore.StringType, 60 | Interface: i, 61 | }, 62 | }, 63 | }, { 64 | Entry: zapcore.Entry{ 65 | Level: zapcore.ErrorLevel, 66 | Message: "error-1", 67 | }, 68 | Context: []zapcore.Field{ 69 | { 70 | Key: "error-key", 71 | String: "error-val", 72 | Type: zapcore.StringType, 73 | Interface: i, 74 | }, 75 | }, 76 | }, { 77 | Entry: zapcore.Entry{ 78 | Level: zapcore.ErrorLevel, 79 | Message: "error-2", 80 | }, 81 | Context: []zapcore.Field{ 82 | { 83 | Key: adapter.KeyError, 84 | Type: zapcore.ErrorType, 85 | Interface: err, 86 | }, 87 | }, 88 | }, { 89 | Entry: zapcore.Entry{ 90 | Level: zapcore.InfoLevel, 91 | Message: "info-2", 92 | }, 93 | Context: []zapcore.Field{ 94 | { 95 | Key: "nested-key", 96 | String: "nested-val", 97 | Type: zapcore.StringType, 98 | Interface: i, 99 | }, { 100 | Key: "info-key-2", 101 | String: "info-val-2", 102 | Type: zapcore.StringType, 103 | Interface: i, 104 | }, 105 | }, 106 | }, 107 | }, logEntries) 108 | } 109 | -------------------------------------------------------------------------------- /adapter/zerolog/logger.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | rsZerolog "github.com/rs/zerolog" 5 | 6 | "github.com/vgarvardt/gue/v5/adapter" 7 | ) 8 | 9 | var _ adapter.Logger = &zerolog{} 10 | 11 | type zerolog struct { 12 | l rsZerolog.Logger 13 | } 14 | 15 | // New instantiates new adapter.Logger using github.com/rs/zerolog 16 | func New(l rsZerolog.Logger) adapter.Logger { 17 | return &zerolog{l} 18 | } 19 | 20 | // Debug implements Logger.Debug for github.com/rs/zerolog logger 21 | func (l *zerolog) Debug(msg string, fields ...adapter.Field) { 22 | l.l.Debug().Fields(l.zerologFields(fields...)).Msg(msg) 23 | } 24 | 25 | // Info implements Logger.Debug for github.com/rs/zerolog logger 26 | func (l *zerolog) Info(msg string, fields ...adapter.Field) { 27 | l.l.Info().Fields(l.zerologFields(fields...)).Msg(msg) 28 | } 29 | 30 | // Error implements Logger.Debug for github.com/rs/zerolog logger 31 | func (l *zerolog) Error(msg string, fields ...adapter.Field) { 32 | l.l.Error().Fields(l.zerologFields(fields...)).Msg(msg) 33 | } 34 | 35 | // With implements nested logger for github.com/rs/zerolog logger 36 | func (l *zerolog) With(fields ...adapter.Field) adapter.Logger { 37 | return New(l.l.With().Fields(l.zerologFields(fields...)).Logger()) 38 | } 39 | 40 | func (l *zerolog) zerologFields(fields ...adapter.Field) map[string]any { 41 | fieldsMap := make(map[string]any, len(fields)) 42 | for _, f := range fields { 43 | fieldsMap[f.Key] = f.Value 44 | } 45 | return fieldsMap 46 | } 47 | -------------------------------------------------------------------------------- /adapter/zerolog/logger_test.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "strings" 7 | "testing" 8 | 9 | rsZerolog "github.com/rs/zerolog" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | 13 | "github.com/vgarvardt/gue/v5/adapter" 14 | ) 15 | 16 | func TestNew(t *testing.T) { 17 | buf := new(bytes.Buffer) 18 | zLog := rsZerolog.New(buf) 19 | ll := New(zLog) 20 | 21 | err := errors.New("something went wrong") 22 | 23 | ll.Debug("debug-1", adapter.F("debug-key", "debug-val")) 24 | ll.Info("info-1", adapter.F("info-key", "info-val")) 25 | ll.Error("error-1", adapter.F("error-key", "error-val")) 26 | ll.Error("error-2", adapter.Err(err)) 27 | 28 | lll := ll.With(adapter.F("nested-key", "nested-val")) 29 | lll.Info("info-2", adapter.F("info-key-2", "info-val-2")) 30 | 31 | logLines := strings.Split(strings.TrimSpace(buf.String()), "\n") 32 | require.Len(t, logLines, 5) 33 | 34 | assert.Contains(t, logLines[0], `"level":"debug"`) 35 | assert.Contains(t, logLines[0], `"message":"debug-1"`) 36 | assert.Contains(t, logLines[0], `"debug-key":"debug-val"`) 37 | 38 | assert.Contains(t, logLines[1], `"level":"info"`) 39 | assert.Contains(t, logLines[1], `"message":"info-1"`) 40 | assert.Contains(t, logLines[1], `"info-key":"info-val"`) 41 | 42 | assert.Contains(t, logLines[2], `"level":"error"`) 43 | assert.Contains(t, logLines[2], `"message":"error-1"`) 44 | assert.Contains(t, logLines[2], `"error-key":"error-val"`) 45 | 46 | assert.Contains(t, logLines[3], `"level":"error"`) 47 | assert.Contains(t, logLines[3], `"message":"error-2"`) 48 | assert.Contains(t, logLines[3], `"error":"something went wrong"`) 49 | 50 | assert.Contains(t, logLines[4], `"level":"info"`) 51 | assert.Contains(t, logLines[4], `"message":"info-2"`) 52 | assert.Contains(t, logLines[4], `"nested-key":"nested-val"`) 53 | assert.Contains(t, logLines[4], `"info-key-2":"info-val-2"`) 54 | } 55 | -------------------------------------------------------------------------------- /backoff.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "time" 5 | 6 | exp "github.com/vgarvardt/backoff" 7 | ) 8 | 9 | // Backoff is the interface for backoff implementation that will be used to reschedule errored jobs to a later time. 10 | // If the Backoff implementation returns negative duration - the job will be discarded. 11 | type Backoff func(retries int) time.Duration 12 | 13 | var ( 14 | // DefaultExponentialBackoff is the exponential Backoff implementation with default config applied 15 | DefaultExponentialBackoff = NewExponentialBackoff(exp.Config{ 16 | BaseDelay: 1.0 * time.Second, 17 | Multiplier: 1.6, 18 | Jitter: 0.2, 19 | MaxDelay: 1.0 * time.Hour, 20 | }) 21 | 22 | // BackoffNever is the Backoff implementation that never returns errored job to the queue for retry, 23 | // but discards it in case of the error. 24 | BackoffNever = func(retries int) time.Duration { 25 | return -1 26 | } 27 | ) 28 | 29 | // NewExponentialBackoff instantiates new exponential Backoff implementation with config 30 | func NewExponentialBackoff(cfg exp.Config) Backoff { 31 | return exp.Exponential{Config: cfg}.Backoff 32 | } 33 | 34 | // NewConstantBackoff instantiates new backoff implementation with the constant retry duration that does not depend 35 | // on the retry. 36 | func NewConstantBackoff(d time.Duration) Backoff { 37 | return func(int) time.Duration { 38 | return d 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /backoff_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | "go.uber.org/zap/zaptest" 12 | 13 | "github.com/vgarvardt/gue/v5/adapter" 14 | adapterTesting "github.com/vgarvardt/gue/v5/adapter/testing" 15 | adapterZap "github.com/vgarvardt/gue/v5/adapter/zap" 16 | ) 17 | 18 | func TestBackoff(t *testing.T) { 19 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 20 | t.Run(name, func(t *testing.T) { 21 | testBackoff(t, openFunc(t)) 22 | }) 23 | } 24 | } 25 | 26 | func testBackoff(t *testing.T, connPool adapter.ConnPool) { 27 | ctx := context.Background() 28 | logger := adapterZap.New(zaptest.NewLogger(t)) 29 | now := time.Now() 30 | 31 | t.Run("default exponential backoff", func(t *testing.T) { 32 | c, err := NewClient(connPool, WithClientLogger(logger)) 33 | require.NoError(t, err) 34 | 35 | j := Job{RunAt: now, Type: "foo"} 36 | err = c.Enqueue(ctx, &j) 37 | require.NoError(t, err) 38 | 39 | jLocked1, err := c.LockJobByID(ctx, j.ID) 40 | require.NoError(t, err) 41 | 42 | err = jLocked1.Error(ctx, errors.New("return with the error")) 43 | require.NoError(t, err) 44 | 45 | jLocked2, err := c.LockJobByID(ctx, j.ID) 46 | require.NoError(t, err) 47 | 48 | assert.Equal(t, int32(1), jLocked2.ErrorCount) 49 | assert.True(t, jLocked2.LastError.Valid) 50 | assert.Equal(t, "return with the error", jLocked2.LastError.String) 51 | assert.Greater(t, jLocked2.RunAt.Unix(), jLocked1.RunAt.Unix()) 52 | 53 | err = jLocked2.Done(ctx) 54 | require.NoError(t, err) 55 | }) 56 | 57 | t.Run("never backoff", func(t *testing.T) { 58 | c, err := NewClient(connPool, WithClientLogger(logger), WithClientBackoff(BackoffNever)) 59 | require.NoError(t, err) 60 | 61 | j := Job{RunAt: now, Type: "bar"} 62 | err = c.Enqueue(ctx, &j) 63 | require.NoError(t, err) 64 | 65 | jLocked1, err := c.LockJobByID(ctx, j.ID) 66 | require.NoError(t, err) 67 | 68 | err = jLocked1.Error(ctx, errors.New("return with the error")) 69 | require.NoError(t, err) 70 | 71 | jLocked2, err := c.LockJobByID(ctx, j.ID) 72 | require.Error(t, err) 73 | assert.Nil(t, jLocked2) 74 | }) 75 | 76 | t.Run("const backoff", func(t *testing.T) { 77 | c, err := NewClient(connPool, WithClientLogger(logger), WithClientBackoff(NewConstantBackoff(time.Minute))) 78 | require.NoError(t, err) 79 | 80 | j := Job{RunAt: now, Type: "foo"} 81 | err = c.Enqueue(ctx, &j) 82 | require.NoError(t, err) 83 | 84 | jLocked1, err := c.LockJobByID(ctx, j.ID) 85 | require.NoError(t, err) 86 | 87 | err = jLocked1.Error(ctx, errors.New("return with the error")) 88 | require.NoError(t, err) 89 | 90 | jLocked2, err := c.LockJobByID(ctx, j.ID) 91 | require.NoError(t, err) 92 | 93 | assert.Equal(t, int32(1), jLocked2.ErrorCount) 94 | assert.True(t, jLocked2.LastError.Valid) 95 | assert.Equal(t, "return with the error", jLocked2.LastError.String) 96 | assert.Greater(t, jLocked2.RunAt.Unix(), jLocked1.RunAt.Unix()) 97 | assert.WithinDuration(t, jLocked1.RunAt.Add(time.Minute), jLocked2.RunAt, time.Second) 98 | 99 | err = jLocked2.Done(ctx) 100 | require.NoError(t, err) 101 | }) 102 | } 103 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "strings" 10 | "time" 11 | 12 | "github.com/oklog/ulid/v2" 13 | "go.opentelemetry.io/otel/attribute" 14 | "go.opentelemetry.io/otel/metric" 15 | "go.opentelemetry.io/otel/metric/noop" 16 | 17 | "github.com/vgarvardt/gue/v5/adapter" 18 | ) 19 | 20 | // ErrMissingType is returned when you attempt to enqueue a job with no Type 21 | // specified. 22 | var ErrMissingType = errors.New("job type must be specified") 23 | 24 | var ( 25 | attrJobType = attribute.Key("job-type") 26 | attrSuccess = attribute.Key("success") 27 | ) 28 | 29 | // Client is a Gue client that can add jobs to the queue and remove jobs from 30 | // the queue. 31 | type Client struct { 32 | pool adapter.ConnPool 33 | logger adapter.Logger 34 | id string 35 | backoff Backoff 36 | meter metric.Meter 37 | 38 | entropy io.Reader 39 | 40 | mEnqueue metric.Int64Counter 41 | mLockJob metric.Int64Counter 42 | } 43 | 44 | // NewClient creates a new Client that uses the pgx pool. 45 | func NewClient(pool adapter.ConnPool, options ...ClientOption) (*Client, error) { 46 | instance := Client{ 47 | pool: pool, 48 | logger: adapter.NoOpLogger{}, 49 | id: RandomStringID(), 50 | backoff: DefaultExponentialBackoff, 51 | meter: noop.NewMeterProvider().Meter("noop"), 52 | entropy: &ulid.LockedMonotonicReader{ 53 | MonotonicReader: ulid.Monotonic(rand.Reader, 0), 54 | }, 55 | } 56 | 57 | for _, option := range options { 58 | option(&instance) 59 | } 60 | 61 | instance.logger = instance.logger.With(adapter.F("client-id", instance.id)) 62 | 63 | return &instance, instance.initMetrics() 64 | } 65 | 66 | // Enqueue adds a job to the queue. 67 | func (c *Client) Enqueue(ctx context.Context, j *Job) error { 68 | return c.execEnqueue(ctx, []*Job{j}, c.pool) 69 | } 70 | 71 | // EnqueueWithID adds a job to the queue with a specific id 72 | func (c *Client) EnqueueWithID(ctx context.Context, j *Job, jobID ulid.ULID) error { 73 | return c.execEnqueueWithID(ctx, []*Job{j}, c.pool, []ulid.ULID{jobID}) 74 | } 75 | 76 | // EnqueueTx adds a job to the queue within the scope of the transaction. 77 | // This allows you to guarantee that an enqueued job will either be committed or 78 | // rolled back atomically with other changes in the course of this transaction. 79 | // 80 | // It is the caller's responsibility to Commit or Rollback the transaction after 81 | // this function is called. 82 | func (c *Client) EnqueueTx(ctx context.Context, j *Job, tx adapter.Tx) error { 83 | return c.execEnqueue(ctx, []*Job{j}, tx) 84 | } 85 | 86 | // EnqueueTxWithID is the same as EnqueueTx except it adds a job to the queue 87 | // with a specific id. 88 | func (c *Client) EnqueueTxWithID(ctx context.Context, j *Job, jobID ulid.ULID, tx adapter.Tx) error { 89 | return c.execEnqueueWithID(ctx, []*Job{j}, tx, []ulid.ULID{jobID}) 90 | } 91 | 92 | // EnqueueBatch adds a batch of jobs. Operation is atomic, so either all jobs are added, or none. 93 | func (c *Client) EnqueueBatch(ctx context.Context, jobs []*Job) error { 94 | // No need to start a transaction if there are no jobs to enqueue 95 | if len(jobs) == 0 { 96 | return nil 97 | } 98 | 99 | return c.execEnqueue(ctx, jobs, c.pool) 100 | } 101 | 102 | // EnqueueBatchTx adds a batch of jobs within the scope of the transaction. 103 | // This allows you to guarantee that an enqueued batch will either be committed or 104 | // rolled back atomically with other changes in the course of this transaction. 105 | // 106 | // It is the caller's responsibility to Commit or Rollback the transaction after 107 | // this function is called. 108 | func (c *Client) EnqueueBatchTx(ctx context.Context, jobs []*Job, tx adapter.Tx) error { 109 | if len(jobs) == 0 { 110 | return nil 111 | } 112 | 113 | return c.execEnqueue(ctx, jobs, tx) 114 | } 115 | 116 | var errSlicesMustMatch = errors.New("jobs and jobIDs slices must have the same non-zero length, pls report this a bug") 117 | 118 | func (c *Client) execEnqueueWithID(ctx context.Context, jobs []*Job, q adapter.Queryable, jobIDs []ulid.ULID) (err error) { 119 | if len(jobs) != len(jobIDs) || len(jobs) == 0 || len(jobIDs) == 0 { 120 | return errSlicesMustMatch 121 | } 122 | 123 | var ( 124 | args []any 125 | values []string 126 | ) 127 | for i, j := range jobs { 128 | if j.Type == "" { 129 | return ErrMissingType 130 | } 131 | 132 | j.CreatedAt = time.Now().UTC() 133 | 134 | runAt := j.RunAt 135 | if runAt.IsZero() { 136 | j.RunAt = j.CreatedAt 137 | } 138 | 139 | j.ID = jobIDs[i] 140 | idAsString := jobIDs[i].String() 141 | 142 | if j.Args == nil { 143 | j.Args = []byte{} 144 | } 145 | 146 | values = append(values, fmt.Sprintf("($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)", i*8+1, i*8+2, i*8+3, i*8+4, i*8+5, i*8+6, i*8+7, i*8+8)) 147 | args = append(args, idAsString, j.Queue, j.Priority, j.RunAt, j.Type, j.Args, j.CreatedAt, j.CreatedAt) 148 | } 149 | 150 | _, err = q.Exec(ctx, `INSERT INTO gue_jobs 151 | (job_id, queue, priority, run_at, job_type, args, created_at, updated_at) 152 | VALUES 153 | `+strings.Join(values, ", "), args...) 154 | 155 | for _, j := range jobs { 156 | c.logger.Debug( 157 | "Tried to enqueue a job", 158 | adapter.Err(err), 159 | adapter.F("queue", j.Queue), 160 | adapter.F("id", j.ID.String()), 161 | ) 162 | 163 | c.mEnqueue.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(err == nil))) 164 | } 165 | 166 | return err 167 | } 168 | 169 | func (c *Client) execEnqueue(ctx context.Context, jobs []*Job, q adapter.Queryable) error { 170 | jobIDs := make([]ulid.ULID, 0, len(jobs)) 171 | for range jobs { 172 | jobID, err := ulid.New(ulid.Now(), c.entropy) 173 | if err != nil { 174 | return fmt.Errorf("could not generate new Job ULID ID: %w", err) 175 | } 176 | jobIDs = append(jobIDs, jobID) 177 | } 178 | 179 | return c.execEnqueueWithID(ctx, jobs, q, jobIDs) 180 | } 181 | 182 | // LockJob attempts to retrieve a Job from the database in the specified queue. 183 | // If a job is found, it will be locked on the transactional level, so other workers 184 | // will be skipping it. If no job is found, nil will be returned instead of an error. 185 | // 186 | // This function cares about the priority first to lock top priority jobs first even if there are available ones that 187 | // should be executed earlier but with the lower priority. 188 | // 189 | // Because Gue uses transaction-level locks, we have to hold the 190 | // same transaction throughout the process of getting a job, working it, 191 | // deleting it, and releasing the lock. 192 | // 193 | // After the Job has been worked, you must call either Job.Done() or Job.Error() on it 194 | // in order to commit transaction to persist Job changes (remove or update it). 195 | func (c *Client) LockJob(ctx context.Context, queue string) (*Job, error) { 196 | sql := `SELECT job_id, queue, priority, run_at, job_type, args, error_count, last_error, created_at 197 | FROM gue_jobs 198 | WHERE queue = $1 AND run_at <= $2 199 | ORDER BY priority ASC 200 | LIMIT 1 FOR UPDATE SKIP LOCKED` 201 | 202 | return c.execLockJob(ctx, true, sql, queue, time.Now().UTC()) 203 | } 204 | 205 | // LockJobByID attempts to retrieve a specific Job from the database. 206 | // If the job is found, it will be locked on the transactional level, so other workers 207 | // will be skipping it. If the job is not found, an error will be returned 208 | // 209 | // Because Gue uses transaction-level locks, we have to hold the 210 | // same transaction throughout the process of getting the job, working it, 211 | // deleting it, and releasing the lock. 212 | // 213 | // After the Job has been worked, you must call either Job.Done() or Job.Error() on it 214 | // in order to commit transaction to persist Job changes (remove or update it). 215 | func (c *Client) LockJobByID(ctx context.Context, id ulid.ULID) (*Job, error) { 216 | sql := `SELECT job_id, queue, priority, run_at, job_type, args, error_count, last_error, created_at 217 | FROM gue_jobs 218 | WHERE job_id = $1 FOR UPDATE SKIP LOCKED` 219 | 220 | return c.execLockJob(ctx, false, sql, id.String()) 221 | } 222 | 223 | // LockNextScheduledJob attempts to retrieve the earliest scheduled Job from the database in the specified queue. 224 | // If a job is found, it will be locked on the transactional level, so other workers 225 | // will be skipping it. If no job is found, nil will be returned instead of an error. 226 | // 227 | // This function cares about the scheduled time first to lock earliest to execute jobs first even if there are ones 228 | // with a higher priority scheduled to a later time but already eligible for execution 229 | // 230 | // Because Gue uses transaction-level locks, we have to hold the 231 | // same transaction throughout the process of getting a job, working it, 232 | // deleting it, and releasing the lock. 233 | // 234 | // After the Job has been worked, you must call either Job.Done() or Job.Error() on it 235 | // in order to commit transaction to persist Job changes (remove or update it). 236 | func (c *Client) LockNextScheduledJob(ctx context.Context, queue string) (*Job, error) { 237 | sql := `SELECT job_id, queue, priority, run_at, job_type, args, error_count, last_error, created_at 238 | FROM gue_jobs 239 | WHERE queue = $1 AND run_at <= $2 240 | ORDER BY run_at, priority ASC 241 | LIMIT 1 FOR UPDATE SKIP LOCKED` 242 | 243 | return c.execLockJob(ctx, true, sql, queue, time.Now().UTC()) 244 | } 245 | 246 | func (c *Client) execLockJob(ctx context.Context, handleErrNoRows bool, sql string, args ...any) (*Job, error) { 247 | tx, err := c.pool.Begin(ctx) 248 | if err != nil { 249 | c.mLockJob.Add(ctx, 1, metric.WithAttributes(attrJobType.String(""), attrSuccess.Bool(false))) 250 | return nil, err 251 | } 252 | 253 | j := Job{tx: tx, backoff: c.backoff, logger: c.logger} 254 | 255 | err = tx.QueryRow(ctx, sql, args...).Scan( 256 | &j.ID, 257 | &j.Queue, 258 | &j.Priority, 259 | &j.RunAt, 260 | &j.Type, 261 | &j.Args, 262 | &j.ErrorCount, 263 | &j.LastError, 264 | &j.CreatedAt, 265 | ) 266 | if err == nil { 267 | c.mLockJob.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(true))) 268 | return &j, nil 269 | } 270 | 271 | rbErr := tx.Rollback(ctx) 272 | if handleErrNoRows && errors.Is(err, adapter.ErrNoRows) { 273 | return nil, rbErr 274 | } 275 | 276 | return nil, fmt.Errorf("could not lock a job (rollback result: %v): %w", rbErr, err) 277 | } 278 | 279 | func (c *Client) initMetrics() (err error) { 280 | if c.mEnqueue, err = c.meter.Int64Counter( 281 | "gue_client_enqueue", 282 | metric.WithDescription("Number of jobs being enqueued"), 283 | metric.WithUnit("1"), 284 | ); err != nil { 285 | return fmt.Errorf("could not register mEnqueue metric: %w", err) 286 | } 287 | 288 | if c.mLockJob, err = c.meter.Int64Counter( 289 | "gue_client_lock_job", 290 | metric.WithDescription("Number of jobs being locked (consumed)"), 291 | metric.WithUnit("1"), 292 | ); err != nil { 293 | return fmt.Errorf("could not register mLockJob metric: %w", err) 294 | } 295 | 296 | return nil 297 | } 298 | -------------------------------------------------------------------------------- /client_option.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "go.opentelemetry.io/otel/metric" 5 | 6 | "github.com/vgarvardt/gue/v5/adapter" 7 | ) 8 | 9 | // ClientOption defines a type that allows to set client properties during the build-time. 10 | type ClientOption func(*Client) 11 | 12 | // WithClientLogger sets Logger implementation to client. 13 | func WithClientLogger(logger adapter.Logger) ClientOption { 14 | return func(c *Client) { 15 | c.logger = logger 16 | } 17 | } 18 | 19 | // WithClientID sets client ID for easier identification in logs. 20 | func WithClientID(id string) ClientOption { 21 | return func(c *Client) { 22 | c.id = id 23 | } 24 | } 25 | 26 | // WithClientBackoff sets backoff implementation that will be applied to errored jobs 27 | // within current client session. 28 | func WithClientBackoff(backoff Backoff) ClientOption { 29 | return func(c *Client) { 30 | c.backoff = backoff 31 | } 32 | } 33 | 34 | // WithClientMeter sets metric.Meter instance to the client. 35 | func WithClientMeter(meter metric.Meter) ClientOption { 36 | return func(c *Client) { 37 | c.meter = meter 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /client_option_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/mock" 10 | "github.com/stretchr/testify/require" 11 | "go.opentelemetry.io/otel/metric/noop" 12 | 13 | "github.com/vgarvardt/gue/v5/adapter" 14 | ) 15 | 16 | func TestWithClientID(t *testing.T) { 17 | clientWithDefaultID, err := NewClient(nil) 18 | require.NoError(t, err) 19 | assert.NotEmpty(t, clientWithDefaultID.id) 20 | 21 | customID := "some-meaningful-id" 22 | clientWithCustomID, err := NewClient(nil, WithClientID(customID)) 23 | require.NoError(t, err) 24 | assert.Equal(t, customID, clientWithCustomID.id) 25 | } 26 | 27 | func TestWithClientLogger(t *testing.T) { 28 | clientWithDefaultLogger, err := NewClient(nil) 29 | require.NoError(t, err) 30 | assert.IsType(t, adapter.NoOpLogger{}, clientWithDefaultLogger.logger) 31 | 32 | logMessage := "hello" 33 | 34 | l := new(mockLogger) 35 | l.On("Info", logMessage, mock.Anything) 36 | // worker sets id as default logger field 37 | l.On("With", mock.Anything).Return(l) 38 | 39 | clientWithCustomLogger, err := NewClient(nil, WithClientLogger(l)) 40 | require.NoError(t, err) 41 | clientWithCustomLogger.logger.Info(logMessage) 42 | 43 | l.AssertExpectations(t) 44 | } 45 | 46 | func TestWithClientBackoff(t *testing.T) { 47 | customBackoff := func(retries int) time.Duration { 48 | return time.Duration(retries) * time.Second 49 | } 50 | 51 | defaultPtr := reflect.ValueOf(DefaultExponentialBackoff).Pointer() 52 | customPtr := reflect.ValueOf(customBackoff).Pointer() 53 | 54 | clientWithDefaultBackoff, err := NewClient(nil) 55 | require.NoError(t, err) 56 | 57 | clientWithDefaultBackoffPtr := reflect.ValueOf(clientWithDefaultBackoff.backoff).Pointer() 58 | 59 | assert.Equal(t, defaultPtr, clientWithDefaultBackoffPtr) 60 | 61 | clientWithCustomBackoff, err := NewClient(nil, WithClientBackoff(customBackoff)) 62 | require.NoError(t, err) 63 | 64 | assert.Equal(t, customBackoff(123), clientWithCustomBackoff.backoff(123)) 65 | assert.NotEqual(t, defaultPtr, customPtr) 66 | } 67 | 68 | func TestWithClientMeter(t *testing.T) { 69 | customMeter := noop.NewMeterProvider().Meter("custom") 70 | 71 | _, err := NewClient(nil) 72 | require.NoError(t, err) 73 | 74 | clientWithCustomMeter, err := NewClient(nil, WithClientMeter(customMeter)) 75 | require.NoError(t, err) 76 | 77 | assert.Equal(t, customMeter, clientWithCustomMeter.meter) 78 | } 79 | -------------------------------------------------------------------------------- /ctx.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import "context" 4 | 5 | type ctxKey struct{} 6 | 7 | var ( 8 | workerIdxKey = ctxKey{} 9 | ) 10 | 11 | const ( 12 | // WorkerIdxUnknown is returned when worker index in the pool is not set for some reason. 13 | WorkerIdxUnknown = -1 14 | ) 15 | 16 | // setWorkerIdx sets the index of the worker in the pool to the worker context. 17 | func setWorkerIdx(ctx context.Context, idx int) context.Context { 18 | return context.WithValue(ctx, workerIdxKey, idx) 19 | } 20 | 21 | // GetWorkerIdx gets the index of the worker in the pool from the worker context. 22 | // Returns WorkerIdxUnknown if the context is not set or the value is not found there. 23 | func GetWorkerIdx(ctx context.Context) int { 24 | if ctx == nil { 25 | return WorkerIdxUnknown 26 | } 27 | 28 | if idx, ok := ctx.Value(workerIdxKey).(int); ok { 29 | return idx 30 | } 31 | 32 | return WorkerIdxUnknown 33 | } 34 | -------------------------------------------------------------------------------- /ctx_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestSetWorkerIdx(t *testing.T) { 11 | t.Run("no ctx", func(t *testing.T) { 12 | idx := GetWorkerIdx(nil) 13 | assert.Equal(t, WorkerIdxUnknown, idx) 14 | }) 15 | 16 | t.Run("no idx in the ctx", func(t *testing.T) { 17 | ctx := context.Background() 18 | idx := GetWorkerIdx(ctx) 19 | assert.Equal(t, WorkerIdxUnknown, idx) 20 | }) 21 | 22 | t.Run("idx is set", func(t *testing.T) { 23 | ctx := setWorkerIdx(context.Background(), 99) 24 | idx := GetWorkerIdx(ctx) 25 | assert.Equal(t, 99, idx) 26 | }) 27 | } 28 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package gue implements Golang queues on top of PostgreSQL. 3 | It uses transaction-level locks for concurrent work. 4 | 5 | # PostgreSQL drivers 6 | 7 | Package supports several PostgreSQL drivers using adapter interface internally. 8 | Currently, adapters for the following drivers have been implemented: 9 | - github.com/jackc/pgx/v4 10 | - github.com/jackc/pgx/v5 11 | - github.com/lib/pq 12 | 13 | # Usage 14 | 15 | Here is a complete example showing worker setup for pgx/v4 and two jobs enqueued, one with a delay: 16 | 17 | package main 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "fmt" 23 | "log" 24 | "os" 25 | "time" 26 | 27 | "github.com/jackc/pgx/v5/pgxpool" 28 | "golang.org/x/sync/errgroup" 29 | 30 | "github.com/vgarvardt/gue/v5" 31 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 32 | ) 33 | 34 | type printNameArgs struct { 35 | Name string 36 | } 37 | 38 | func main() { 39 | printName := func(j *gue.Job) error { 40 | var args printNameArgs 41 | if err := json.Unmarshal(j.Args, &args); err != nil { 42 | return err 43 | } 44 | fmt.Printf("Hello %s!\n", args.Name) 45 | return nil 46 | } 47 | 48 | pgxCfg, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL")) 49 | if err != nil { 50 | log.Fatal(err) 51 | } 52 | 53 | pgxPool, err := pgxpool.NewWithConfig(context.Background(), pgxCfg) 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | defer pgxPool.Close() 58 | 59 | poolAdapter := pgxv5.NewConnPool(pgxPool) 60 | 61 | gc, err := gue.NewClient(poolAdapter) 62 | if err != nil { 63 | log.Fatal(err) 64 | } 65 | 66 | wm := gue.WorkMap{ 67 | "PrintName": printName, 68 | } 69 | 70 | // create a pool with 2 workers 71 | workers, err := gue.NewWorkerPool(gc, wm, 2, gue.WithPoolQueue("name_printer")) 72 | if err != nil { 73 | log.Fatal(err) 74 | } 75 | 76 | ctx, shutdown := context.WithCancel(context.Background()) 77 | 78 | // work jobs in goroutine 79 | g, gctx := errgroup.WithContext(ctx) 80 | g.Go(func() error { 81 | err := workers.Run(gctx) 82 | if err != nil { 83 | // In a real-world applications, use a better way to shut down 84 | // application on unrecoverable error. E.g. fx.Shutdowner from 85 | // go.uber.org/fx module. 86 | log.Fatal(err) 87 | } 88 | return err 89 | }) 90 | 91 | args, err := json.Marshal(printNameArgs{Name: "vgarvardt"}) 92 | if err != nil { 93 | log.Fatal(err) 94 | } 95 | 96 | j := &gue.Job{ 97 | Type: "PrintName", 98 | Args: args, 99 | } 100 | if err := gc.Enqueue(context.Background(), j); err != nil { 101 | log.Fatal(err) 102 | } 103 | 104 | j := &gue.Job{ 105 | Type: "PrintName", 106 | RunAt: time.Now().UTC().Add(30 * time.Second), // delay 30 seconds 107 | Args: args, 108 | } 109 | if err := gc.Enqueue(context.Background(), j); err != nil { 110 | log.Fatal(err) 111 | } 112 | 113 | time.Sleep(30 * time.Second) // wait for while 114 | 115 | // send shutdown signal to worker 116 | shutdown() 117 | if err := g.Wait(); err != nil { 118 | log.Fatal(err) 119 | } 120 | } 121 | */ 122 | package gue 123 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | postgres: 4 | image: postgres:11 5 | ports: 6 | - "5432" 7 | environment: 8 | LC_ALL: C.UTF-8 9 | POSTGRES_USER: test 10 | POSTGRES_PASSWORD: test 11 | POSTGRES_DB: test 12 | tmpfs: 13 | - /var/lib/postgresql/data 14 | healthcheck: 15 | test: ["CMD", "pg_isready"] 16 | interval: 3s 17 | timeout: 3s 18 | retries: 5 19 | -------------------------------------------------------------------------------- /enqueue_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/oklog/ulid/v2" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/vgarvardt/gue/v5/adapter" 13 | adapterTesting "github.com/vgarvardt/gue/v5/adapter/testing" 14 | ) 15 | 16 | func TestEnqueueOnlyType(t *testing.T) { 17 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 18 | t.Run(name, func(t *testing.T) { 19 | testEnqueueOnlyType(t, openFunc(t)) 20 | }) 21 | } 22 | } 23 | 24 | func testEnqueueOnlyType(t *testing.T, connPool adapter.ConnPool) { 25 | ctx := context.Background() 26 | 27 | c, err := NewClient(connPool) 28 | require.NoError(t, err) 29 | 30 | jobType := "MyJob" 31 | job := Job{Type: jobType} 32 | err = c.Enqueue(ctx, &job) 33 | require.NoError(t, err) 34 | 35 | j, err := c.LockJobByID(ctx, job.ID) 36 | require.NoError(t, err) 37 | require.NotNil(t, j) 38 | require.False(t, j.CreatedAt.IsZero()) 39 | 40 | t.Cleanup(func() { 41 | err := j.Done(ctx) 42 | assert.NoError(t, err) 43 | }) 44 | 45 | // check resulting job 46 | assert.NotEmpty(t, j.ID) 47 | assert.Equal(t, defaultQueueName, j.Queue) 48 | assert.Equal(t, JobPriorityDefault, j.Priority) 49 | assert.False(t, j.RunAt.IsZero()) 50 | assert.Equal(t, jobType, j.Type) 51 | assert.Equal(t, []byte(``), j.Args) 52 | assert.Equal(t, int32(0), j.ErrorCount) 53 | assert.False(t, j.LastError.Valid) 54 | 55 | assert.False(t, j.CreatedAt.IsZero()) 56 | assert.True(t, time.Now().After(j.CreatedAt)) 57 | assert.True( 58 | t, 59 | job.CreatedAt.Round(time.Second).Equal(j.CreatedAt.Round(time.Second)), 60 | job.CreatedAt.Round(time.Second).String(), 61 | j.CreatedAt.Round(time.Second).String(), 62 | ) 63 | } 64 | 65 | func TestEnqueueWithPriority(t *testing.T) { 66 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 67 | t.Run(name, func(t *testing.T) { 68 | testEnqueueWithPriority(t, openFunc(t)) 69 | }) 70 | } 71 | } 72 | 73 | func testEnqueueWithPriority(t *testing.T, connPool adapter.ConnPool) { 74 | ctx := context.Background() 75 | 76 | c, err := NewClient(connPool) 77 | require.NoError(t, err) 78 | 79 | want := JobPriority(99) 80 | job := Job{Type: "MyJob", Priority: want} 81 | err = c.Enqueue(ctx, &job) 82 | require.NoError(t, err) 83 | 84 | j, err := c.LockJobByID(ctx, job.ID) 85 | require.NoError(t, err) 86 | require.NotNil(t, j) 87 | 88 | t.Cleanup(func() { 89 | err := j.Done(ctx) 90 | assert.NoError(t, err) 91 | }) 92 | 93 | assert.Equal(t, want, j.Priority) 94 | } 95 | 96 | func TestEnqueueWithRunAt(t *testing.T) { 97 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 98 | t.Run(name, func(t *testing.T) { 99 | testEnqueueWithRunAt(t, openFunc(t)) 100 | }) 101 | } 102 | } 103 | 104 | func testEnqueueWithRunAt(t *testing.T, connPool adapter.ConnPool) { 105 | ctx := context.Background() 106 | 107 | c, err := NewClient(connPool) 108 | require.NoError(t, err) 109 | 110 | want := time.Now().Add(2 * time.Minute) 111 | job := Job{Type: "MyJob", RunAt: want} 112 | err = c.Enqueue(ctx, &job) 113 | require.NoError(t, err) 114 | 115 | j, err := c.LockJobByID(ctx, job.ID) 116 | require.NoError(t, err) 117 | require.NotNil(t, j) 118 | 119 | t.Cleanup(func() { 120 | err := j.Done(ctx) 121 | assert.NoError(t, err) 122 | }) 123 | 124 | // truncate to the microsecond as postgres driver does 125 | assert.WithinDuration(t, want, j.RunAt, time.Microsecond) 126 | } 127 | 128 | func TestEnqueueWithArgs(t *testing.T) { 129 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 130 | t.Run(name, func(t *testing.T) { 131 | testEnqueueWithArgs(t, openFunc(t)) 132 | }) 133 | } 134 | } 135 | 136 | func testEnqueueWithArgs(t *testing.T, connPool adapter.ConnPool) { 137 | ctx := context.Background() 138 | 139 | c, err := NewClient(connPool) 140 | require.NoError(t, err) 141 | 142 | want := []byte(`{"arg1":0, "arg2":"a string"}`) 143 | job := Job{Type: "MyJob", Args: want} 144 | err = c.Enqueue(ctx, &job) 145 | require.NoError(t, err) 146 | 147 | j, err := c.LockJobByID(ctx, job.ID) 148 | require.NoError(t, err) 149 | require.NotNil(t, j) 150 | 151 | t.Cleanup(func() { 152 | err := j.Done(ctx) 153 | assert.NoError(t, err) 154 | }) 155 | 156 | assert.Equal(t, want, j.Args) 157 | } 158 | 159 | func TestEnqueueWithQueue(t *testing.T) { 160 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 161 | t.Run(name, func(t *testing.T) { 162 | testEnqueueWithQueue(t, openFunc(t)) 163 | }) 164 | } 165 | } 166 | 167 | func testEnqueueWithQueue(t *testing.T, connPool adapter.ConnPool) { 168 | ctx := context.Background() 169 | 170 | c, err := NewClient(connPool) 171 | require.NoError(t, err) 172 | 173 | want := "special-work-queue" 174 | job := Job{Type: "MyJob", Queue: want} 175 | err = c.Enqueue(ctx, &job) 176 | require.NoError(t, err) 177 | 178 | j, err := c.LockJobByID(ctx, job.ID) 179 | require.NoError(t, err) 180 | require.NotNil(t, j) 181 | 182 | t.Cleanup(func() { 183 | err := j.Done(ctx) 184 | assert.NoError(t, err) 185 | }) 186 | 187 | assert.Equal(t, want, j.Queue) 188 | } 189 | 190 | func TestEnqueueWithEmptyType(t *testing.T) { 191 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 192 | t.Run(name, func(t *testing.T) { 193 | testEnqueueWithEmptyType(t, openFunc(t)) 194 | }) 195 | } 196 | } 197 | 198 | func testEnqueueWithEmptyType(t *testing.T, connPool adapter.ConnPool) { 199 | ctx := context.Background() 200 | 201 | c, err := NewClient(connPool) 202 | require.NoError(t, err) 203 | 204 | err = c.Enqueue(ctx, &Job{Type: ""}) 205 | require.Equal(t, ErrMissingType, err) 206 | } 207 | 208 | func TestEnqueueTx(t *testing.T) { 209 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 210 | t.Run(name, func(t *testing.T) { 211 | testEnqueueTx(t, openFunc(t)) 212 | }) 213 | } 214 | } 215 | 216 | func testEnqueueTx(t *testing.T, connPool adapter.ConnPool) { 217 | ctx := context.Background() 218 | 219 | c, err := NewClient(connPool) 220 | require.NoError(t, err) 221 | 222 | tx, err := connPool.Begin(ctx) 223 | require.NoError(t, err) 224 | 225 | job := Job{Type: "MyJob"} 226 | err = c.EnqueueTx(ctx, &job, tx) 227 | require.NoError(t, err) 228 | 229 | j := findOneJob(t, tx) 230 | require.NotNil(t, j) 231 | 232 | err = tx.Rollback(ctx) 233 | require.NoError(t, err) 234 | 235 | j = findOneJob(t, connPool) 236 | require.Nil(t, j) 237 | } 238 | 239 | func TestClient_EnqueueBatchTx(t *testing.T) { 240 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 241 | t.Run(name, func(t *testing.T) { 242 | testEnqueueBatchTx(t, openFunc(t)) 243 | }) 244 | } 245 | } 246 | 247 | func TestEnqueueTxWithID(t *testing.T) { 248 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 249 | t.Run(name, func(t *testing.T) { 250 | testEnqueueTxWithID(t, openFunc(t)) 251 | }) 252 | } 253 | } 254 | 255 | func testEnqueueTxWithID(t *testing.T, connPool adapter.ConnPool) { 256 | ctx := context.Background() 257 | 258 | c, err := NewClient(connPool) 259 | require.NoError(t, err) 260 | 261 | tx, err := connPool.Begin(ctx) 262 | require.NoError(t, err) 263 | 264 | specifiedID := ulid.Make() 265 | 266 | job := Job{Type: "MyJob"} 267 | err = c.EnqueueTxWithID(ctx, &job, specifiedID, tx) 268 | require.NoError(t, err) 269 | 270 | j := findOneJob(t, tx) 271 | require.NotNil(t, j) 272 | require.Equal(t, specifiedID, j.ID) 273 | 274 | err = tx.Rollback(ctx) 275 | require.NoError(t, err) 276 | 277 | j = findOneJob(t, connPool) 278 | require.Nil(t, j) 279 | } 280 | 281 | func testEnqueueBatchTx(t *testing.T, connPool adapter.ConnPool) { 282 | ctx := context.Background() 283 | 284 | c, err := NewClient(connPool) 285 | require.NoError(t, err) 286 | 287 | tx, err := connPool.Begin(ctx) 288 | require.NoError(t, err) 289 | 290 | err = c.EnqueueBatchTx(ctx, []*Job{{Type: "MyJob1"}, {Type: "MyJob2"}}, tx) 291 | require.NoError(t, err) 292 | 293 | j := findOneJob(t, tx) 294 | require.NotNil(t, j) 295 | 296 | err = tx.Rollback(ctx) 297 | require.NoError(t, err) 298 | 299 | j = findOneJob(t, connPool) 300 | require.Nil(t, j) 301 | } 302 | -------------------------------------------------------------------------------- /error.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | var ( 10 | // ErrJobPanicked is returned when the job failed to be handled because it is panicked. 11 | // Error is normally returned wrapped, so use `errors.Is(err, gue.ErrJobPanicked)` to ensure this is the error you're 12 | // looking for. 13 | ErrJobPanicked = errors.New("job panicked") 14 | 15 | // ErrHookJobDonePanicked is returned when the hook job done panicked while panicked job recovery. 16 | // Error is normally returned wrapped, so use `errors.Is(err, gue.ErrHookJobDonePanicked)` to ensure this is the error you're 17 | // looking for. 18 | ErrHookJobDonePanicked = errors.New("hook job done panicked in job panic recovery") 19 | ) 20 | 21 | // ErrJobReschedule interface implementation allows errors to reschedule jobs in the individual basis. 22 | type ErrJobReschedule interface { 23 | rescheduleJobAt() time.Time 24 | } 25 | 26 | type errJobRescheduleIn struct { 27 | d time.Duration 28 | s string 29 | } 30 | 31 | // ErrRescheduleJobIn spawns an error that reschedules a job to run after some predefined duration. 32 | func ErrRescheduleJobIn(d time.Duration, reason string) error { 33 | return errJobRescheduleIn{d: d, s: reason} 34 | } 35 | 36 | // Error implements error.Error() 37 | func (e errJobRescheduleIn) Error() string { 38 | return fmt.Sprintf("rescheduling job in %q because %q", e.d.String(), e.s) 39 | } 40 | 41 | func (e errJobRescheduleIn) rescheduleJobAt() time.Time { 42 | return time.Now().Add(e.d) 43 | } 44 | 45 | type errJobRescheduleAt struct { 46 | t time.Time 47 | s string 48 | } 49 | 50 | // ErrRescheduleJobAt spawns an error that reschedules a job to run at some predefined time. 51 | func ErrRescheduleJobAt(t time.Time, reason string) error { 52 | return errJobRescheduleAt{t: t, s: reason} 53 | } 54 | 55 | // Error implements error.Error() 56 | func (e errJobRescheduleAt) Error() string { 57 | return fmt.Sprintf("rescheduling job at %q because %q", e.t.String(), e.s) 58 | } 59 | 60 | func (e errJobRescheduleAt) rescheduleJobAt() time.Time { 61 | return e.t 62 | } 63 | 64 | type errJobDiscard struct { 65 | s string 66 | } 67 | 68 | // ErrDiscardJob spawns an error that unconditionally discards a job. 69 | func ErrDiscardJob(reason string) error { 70 | return errJobDiscard{s: reason} 71 | } 72 | 73 | // Error implements error.Error() 74 | func (e errJobDiscard) Error() string { 75 | return fmt.Sprintf("discarding job because %q", e.s) 76 | } 77 | 78 | func (e errJobDiscard) rescheduleJobAt() time.Time { 79 | return time.Time{} 80 | } 81 | -------------------------------------------------------------------------------- /error_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/vgarvardt/gue/v5/adapter" 13 | adapterTesting "github.com/vgarvardt/gue/v5/adapter/testing" 14 | ) 15 | 16 | func TestErrRescheduleJobIn(t *testing.T) { 17 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 18 | t.Run(name, func(t *testing.T) { 19 | testErrRescheduleJobIn(t, openFunc(t)) 20 | }) 21 | } 22 | } 23 | 24 | func testErrRescheduleJobIn(t *testing.T, connPool adapter.ConnPool) { 25 | t.Helper() 26 | 27 | ctx := context.Background() 28 | now := time.Now() 29 | 30 | c, err := NewClient(connPool) 31 | require.NoError(t, err) 32 | 33 | j := Job{RunAt: now, Type: "foo"} 34 | err = c.Enqueue(ctx, &j) 35 | require.NoError(t, err) 36 | require.NotEmpty(t, j.ID) 37 | 38 | jLocked1, err := c.LockJobByID(ctx, j.ID) 39 | require.NoError(t, err) 40 | 41 | errReschedule := ErrRescheduleJobIn(10*time.Second, "reschedule me for later time") 42 | errRescheduleStr := `rescheduling job in "10s" because "reschedule me for later time"` 43 | assert.Equal(t, errRescheduleStr, errReschedule.Error()) 44 | 45 | err = jLocked1.Error(ctx, errReschedule) 46 | require.NoError(t, err) 47 | 48 | jLocked2, err := c.LockJobByID(ctx, j.ID) 49 | require.NoError(t, err) 50 | 51 | assert.Equal(t, int32(1), jLocked2.ErrorCount) 52 | assert.True(t, jLocked2.LastError.Valid) 53 | assert.Equal(t, errRescheduleStr, jLocked2.LastError.String) 54 | assert.GreaterOrEqual(t, jLocked2.RunAt.Sub(jLocked1.RunAt), 10*time.Second) 55 | 56 | err = jLocked2.Done(ctx) 57 | require.NoError(t, err) 58 | } 59 | 60 | func TestErrRescheduleJobAt(t *testing.T) { 61 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 62 | t.Run(name, func(t *testing.T) { 63 | testErrRescheduleJobAt(t, openFunc(t)) 64 | }) 65 | } 66 | } 67 | 68 | func testErrRescheduleJobAt(t *testing.T, connPool adapter.ConnPool) { 69 | t.Helper() 70 | 71 | ctx := context.Background() 72 | now := time.Now() 73 | rescheduleAt := now.Add(3 * time.Hour) 74 | 75 | c, err := NewClient(connPool) 76 | require.NoError(t, err) 77 | 78 | j := Job{RunAt: now, Type: "foo"} 79 | err = c.Enqueue(ctx, &j) 80 | require.NoError(t, err) 81 | require.NotEmpty(t, j.ID) 82 | 83 | jLocked1, err := c.LockJobByID(ctx, j.ID) 84 | require.NoError(t, err) 85 | 86 | errReschedule := ErrRescheduleJobAt(rescheduleAt, "reschedule me for later time") 87 | errRescheduleStr := fmt.Sprintf(`rescheduling job at "%s" because "reschedule me for later time"`, rescheduleAt.String()) 88 | assert.Equal(t, errRescheduleStr, errReschedule.Error()) 89 | 90 | err = jLocked1.Error(ctx, errReschedule) 91 | require.NoError(t, err) 92 | 93 | jLocked2, err := c.LockJobByID(ctx, j.ID) 94 | require.NoError(t, err) 95 | 96 | assert.Equal(t, int32(1), jLocked2.ErrorCount) 97 | assert.True(t, jLocked2.LastError.Valid) 98 | assert.Equal(t, errRescheduleStr, jLocked2.LastError.String) 99 | assert.True(t, jLocked2.RunAt.Round(time.Second).Equal(rescheduleAt.Round(time.Second))) 100 | 101 | err = jLocked2.Done(ctx) 102 | require.NoError(t, err) 103 | } 104 | 105 | func TestErrDiscardJob(t *testing.T) { 106 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 107 | t.Run(name, func(t *testing.T) { 108 | testErrDiscardJob(t, openFunc(t)) 109 | }) 110 | } 111 | } 112 | 113 | func testErrDiscardJob(t *testing.T, connPool adapter.ConnPool) { 114 | t.Helper() 115 | 116 | ctx := context.Background() 117 | now := time.Now() 118 | 119 | c, err := NewClient(connPool) 120 | require.NoError(t, err) 121 | 122 | j := Job{RunAt: now, Type: "foo"} 123 | err = c.Enqueue(ctx, &j) 124 | require.NoError(t, err) 125 | require.NotEmpty(t, j.ID) 126 | 127 | jLocked1, err := c.LockJobByID(ctx, j.ID) 128 | require.NoError(t, err) 129 | 130 | errReschedule := ErrDiscardJob("no job - no fear of being fired") 131 | errRescheduleStr := `discarding job because "no job - no fear of being fired"` 132 | assert.Equal(t, errRescheduleStr, errReschedule.Error()) 133 | 134 | err = jLocked1.Error(ctx, errReschedule) 135 | require.NoError(t, err) 136 | 137 | jLocked2, err := c.LockJobByID(ctx, j.ID) 138 | require.Error(t, err) 139 | assert.Nil(t, jLocked2) 140 | } 141 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/vgarvardt/gue/v5 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/jackc/pgconn v1.14.3 7 | github.com/jackc/pgx/v4 v4.18.3 8 | github.com/jackc/pgx/v5 v5.7.5 9 | github.com/lib/pq v1.10.9 10 | github.com/oklog/ulid/v2 v2.1.1 11 | github.com/rs/zerolog v1.34.0 12 | github.com/stretchr/testify v1.10.0 13 | github.com/vgarvardt/backoff v1.0.0 14 | go.opentelemetry.io/otel v1.36.0 15 | go.opentelemetry.io/otel/metric v1.36.0 16 | go.opentelemetry.io/otel/trace v1.36.0 17 | go.uber.org/zap v1.27.0 18 | golang.org/x/sync v0.15.0 19 | ) 20 | 21 | require ( 22 | github.com/davecgh/go-spew v1.1.1 // indirect 23 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 24 | github.com/jackc/pgio v1.0.0 // indirect 25 | github.com/jackc/pgpassfile v1.0.0 // indirect 26 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect 27 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 28 | github.com/jackc/pgtype v1.14.0 // indirect 29 | github.com/jackc/puddle v1.3.0 // indirect 30 | github.com/jackc/puddle/v2 v2.2.2 // indirect 31 | github.com/kr/pretty v0.3.1 // indirect 32 | github.com/mattn/go-colorable v0.1.13 // indirect 33 | github.com/mattn/go-isatty v0.0.19 // indirect 34 | github.com/pmezard/go-difflib v1.0.0 // indirect 35 | github.com/rogpeppe/go-internal v1.13.1 // indirect 36 | github.com/stretchr/objx v0.5.2 // indirect 37 | go.uber.org/multierr v1.11.0 // indirect 38 | golang.org/x/crypto v0.37.0 // indirect 39 | golang.org/x/sys v0.32.0 // indirect 40 | golang.org/x/text v0.24.0 // indirect 41 | gopkg.in/yaml.v3 v3.0.1 // indirect 42 | ) 43 | -------------------------------------------------------------------------------- /helpers.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "fmt" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | // RandomStringID returns random alphanumeric string that can be used as ID. 13 | func RandomStringID() string { 14 | hash := sha256.Sum256([]byte(time.Now().Format(time.RFC3339Nano))) 15 | return hex.EncodeToString(hash[:])[:6] 16 | } 17 | 18 | // RunLock ensures that there is only one instance of the running callback function "f" (worker). 19 | func RunLock(ctx context.Context, f func(ctx context.Context) error, mu *sync.Mutex, running *bool, id string) error { 20 | mu.Lock() 21 | if *running { 22 | mu.Unlock() 23 | return fmt.Errorf("worker[id=%s] is already running", id) 24 | } 25 | *running = true 26 | mu.Unlock() 27 | 28 | defer func() { 29 | mu.Lock() 30 | *running = false 31 | mu.Unlock() 32 | }() 33 | 34 | return f(ctx) 35 | } 36 | -------------------------------------------------------------------------------- /job.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "sync" 8 | "time" 9 | 10 | "github.com/oklog/ulid/v2" 11 | 12 | "github.com/vgarvardt/gue/v5/adapter" 13 | ) 14 | 15 | // JobPriority is the wrapper type for Job.Priority 16 | type JobPriority int16 17 | 18 | // Some shortcut values for JobPriority that can be any, but chances are high that one of these will be the most used. 19 | const ( 20 | JobPriorityHighest JobPriority = -32768 21 | JobPriorityHigh JobPriority = -16384 22 | JobPriorityDefault JobPriority = 0 23 | JobPriorityLow JobPriority = 16384 24 | JobPriorityLowest JobPriority = 32767 25 | ) 26 | 27 | // Job is a single unit of work for Gue to perform. 28 | type Job struct { 29 | // ID is the unique database ID of the Job. It is ignored on job creation. 30 | ID ulid.ULID 31 | 32 | // Queue is the name of the queue. It defaults to the empty queue "". 33 | Queue string 34 | 35 | // Priority is the priority of the Job. The default priority is 0, and a 36 | // lower number means a higher priority. 37 | // 38 | // The highest priority is JobPriorityHighest, the lowest one is JobPriorityLowest 39 | Priority JobPriority 40 | 41 | // RunAt is the time that this job should be executed. It defaults to now(), 42 | // meaning the job will execute immediately. Set it to a value in the future 43 | // to delay a job's execution. 44 | RunAt time.Time 45 | 46 | // Type maps job to a worker func. 47 | Type string 48 | 49 | // Args for the job. 50 | Args []byte 51 | 52 | // ErrorCount is the number of times this job has attempted to run, but failed with an error. 53 | // It is ignored on job creation. 54 | // This field is initialised only when the Job is being retrieved from the DB and is not 55 | // being updated when the current Job handler errored. 56 | ErrorCount int32 57 | 58 | // LastError is the error message or stack trace from the last time the job failed. It is ignored on job creation. 59 | // This field is initialised only when the Job is being retrieved from the DB and is not 60 | // being updated when the current Job run errored. This field supposed to be used mostly for the debug reasons. 61 | LastError sql.NullString 62 | 63 | // CreatedAt is the job creation time. 64 | // This field is initialised only when the Job is being retrieved from the DB and is not 65 | // being updated when the current Job run errored. This field can be used as a decision parameter in some handlers 66 | // whether it makes sense to retry the job or it can be dropped. 67 | CreatedAt time.Time 68 | 69 | mu sync.Mutex 70 | deleted bool 71 | tx adapter.Tx 72 | backoff Backoff 73 | logger adapter.Logger 74 | } 75 | 76 | // Tx returns DB transaction that this job is locked to. You may use 77 | // it as you please until you call Done(). At that point, this transaction 78 | // will be committed. This function will return nil if the Job's 79 | // transaction was closed with Done(). 80 | func (j *Job) Tx() adapter.Tx { 81 | return j.tx 82 | } 83 | 84 | // Delete marks this job as complete by deleting it from the database. 85 | // 86 | // You must also later call Done() to return this job's database connection to 87 | // the pool. If you got the job from the worker - it will take care of cleaning up the job and resources, 88 | // no need to do this manually in a WorkFunc. 89 | func (j *Job) Delete(ctx context.Context) error { 90 | j.mu.Lock() 91 | defer j.mu.Unlock() 92 | 93 | if j.deleted { 94 | return nil 95 | } 96 | 97 | _, err := j.tx.Exec(ctx, `DELETE FROM gue_jobs WHERE job_id = $1`, j.ID.String()) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | j.deleted = true 103 | return nil 104 | } 105 | 106 | // Done commits transaction that marks job as done. If you got the job from the worker - it will take care of 107 | // cleaning up the job and resources, no need to do this manually in a WorkFunc. 108 | func (j *Job) Done(ctx context.Context) error { 109 | j.mu.Lock() 110 | defer j.mu.Unlock() 111 | 112 | if j.tx == nil { 113 | // already marked as done 114 | return nil 115 | } 116 | 117 | if err := j.tx.Commit(ctx); err != nil { 118 | return err 119 | } 120 | 121 | j.tx = nil 122 | 123 | return nil 124 | } 125 | 126 | // Error marks the job as failed and schedules it to be reworked. An error 127 | // message or backtrace can be provided as msg, which will be saved on the job. 128 | // It will also increase the error count. 129 | // 130 | // This call marks job as done and releases (commits) transaction, 131 | // so calling Done() is not required, although calling it will not cause any issues. 132 | // If you got the job from the worker - it will take care of cleaning up the job and resources, 133 | // no need to do this manually in a WorkFunc. 134 | func (j *Job) Error(ctx context.Context, jErr error) (err error) { 135 | defer func() { 136 | doneErr := j.Done(ctx) 137 | if doneErr != nil { 138 | err = fmt.Errorf("failed to mark job as done (original error: %v): %w", err, doneErr) 139 | } 140 | }() 141 | 142 | errorCount := j.ErrorCount + 1 143 | now := time.Now().UTC() 144 | newRunAt := j.calculateErrorRunAt(jErr, now, errorCount) 145 | if newRunAt.IsZero() { 146 | j.logger.Info( 147 | "Got empty new run at for the errored job, discarding it", 148 | adapter.F("job-type", j.Type), 149 | adapter.F("job-queue", j.Queue), 150 | adapter.F("job-errors", errorCount), 151 | adapter.Err(jErr), 152 | ) 153 | err = j.Delete(ctx) 154 | return 155 | } 156 | 157 | _, err = j.tx.Exec( 158 | ctx, 159 | `UPDATE gue_jobs SET error_count = $1, run_at = $2, last_error = $3, updated_at = $4 WHERE job_id = $5`, 160 | errorCount, newRunAt, jErr.Error(), now, j.ID.String(), 161 | ) 162 | 163 | return err 164 | } 165 | 166 | func (j *Job) calculateErrorRunAt(err error, now time.Time, errorCount int32) time.Time { 167 | errReschedule, ok := err.(ErrJobReschedule) 168 | if ok { 169 | return errReschedule.rescheduleJobAt() 170 | } 171 | 172 | backoff := j.backoff(int(errorCount)) 173 | if backoff < 0 { 174 | return time.Time{} 175 | } 176 | 177 | return now.Add(backoff) 178 | } 179 | -------------------------------------------------------------------------------- /job_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | 10 | "github.com/vgarvardt/gue/v5/adapter" 11 | "github.com/vgarvardt/gue/v5/adapter/libpq" 12 | "github.com/vgarvardt/gue/v5/adapter/pgxv4" 13 | "github.com/vgarvardt/gue/v5/adapter/pgxv5" 14 | adapterTesting "github.com/vgarvardt/gue/v5/adapter/testing" 15 | ) 16 | 17 | func TestJob_Tx(t *testing.T) { 18 | for name, openFunc := range adapterTesting.AllAdaptersOpenTestPool { 19 | t.Run(name, func(t *testing.T) { 20 | testJobTxUnwrapTx(t, name, openFunc(t)) 21 | }) 22 | } 23 | } 24 | 25 | func testJobTxUnwrapTx(t *testing.T, name string, connPool adapter.ConnPool) { 26 | ctx := context.Background() 27 | 28 | c, err := NewClient(connPool) 29 | require.NoError(t, err) 30 | 31 | newJob := &Job{Type: "MyJob", Args: []byte(`{}`)} 32 | err = c.Enqueue(ctx, newJob) 33 | require.NoError(t, err) 34 | require.NotEmpty(t, newJob.ID) 35 | 36 | j, err := c.LockJob(ctx, "") 37 | require.NoError(t, err) 38 | require.NotNil(t, j) 39 | require.NotNil(t, j.tx) 40 | 41 | t.Cleanup(func() { 42 | err := j.Done(ctx) 43 | assert.NoError(t, err) 44 | }) 45 | 46 | switch name { 47 | case "pgx/v4": 48 | _, okPgxV5 := pgxv5.UnwrapTx(j.Tx()) 49 | require.False(t, okPgxV5) 50 | _, okLibPQ := libpq.UnwrapTx(j.Tx()) 51 | require.False(t, okLibPQ) 52 | 53 | tx, okPgxV4 := pgxv4.UnwrapTx(j.Tx()) 54 | require.True(t, okPgxV4) 55 | 56 | _, err := tx.Exec(ctx, `SELECT COUNT(1) FROM gue_jobs`) 57 | require.NoError(t, err) 58 | 59 | case "pgx/v5": 60 | _, okPgxV4 := pgxv4.UnwrapTx(j.Tx()) 61 | require.False(t, okPgxV4) 62 | _, okLibPQ := libpq.UnwrapTx(j.Tx()) 63 | require.False(t, okLibPQ) 64 | 65 | tx, okPgxV5 := pgxv5.UnwrapTx(j.Tx()) 66 | require.True(t, okPgxV5) 67 | 68 | _, err := tx.Exec(ctx, `SELECT COUNT(1) FROM gue_jobs`) 69 | require.NoError(t, err) 70 | 71 | case "lib/pq": 72 | _, okPgxV4 := pgxv4.UnwrapTx(j.Tx()) 73 | require.False(t, okPgxV4) 74 | _, okPgxV5 := pgxv5.UnwrapTx(j.Tx()) 75 | require.False(t, okPgxV5) 76 | 77 | tx, okLibPQ := libpq.UnwrapTx(j.Tx()) 78 | require.True(t, okLibPQ) 79 | 80 | _, err := tx.ExecContext(ctx, `SELECT COUNT(1) FROM gue_jobs`) 81 | require.NoError(t, err) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /migrations/job_id_to_ulid.sql: -------------------------------------------------------------------------------- 1 | -- heavily based on https://github.com/geckoboard/pgulid/blob/master/pgulid.sql 2 | CREATE OR REPLACE FUNCTION migrate_job_id_ulid(job_id BIGINT, created_at TIMESTAMPTZ) 3 | RETURNS TEXT 4 | AS $$ 5 | DECLARE 6 | -- Crockford's Base32 7 | encoding BYTEA = '0123456789ABCDEFGHJKMNPQRSTVWXYZ'; 8 | timestamp BYTEA = E'\\000\\000\\000\\000\\000\\000'; 9 | output TEXT = ''; 10 | 11 | unix_time BIGINT; 12 | ulid BYTEA; 13 | BEGIN 14 | -- 6 timestamp bytes 15 | unix_time = (EXTRACT(EPOCH FROM created_at) * 1000)::BIGINT; 16 | timestamp = SET_BYTE(timestamp, 0, (unix_time >> 40)::BIT(8)::INTEGER); 17 | timestamp = SET_BYTE(timestamp, 1, (unix_time >> 32)::BIT(8)::INTEGER); 18 | timestamp = SET_BYTE(timestamp, 2, (unix_time >> 24)::BIT(8)::INTEGER); 19 | timestamp = SET_BYTE(timestamp, 3, (unix_time >> 16)::BIT(8)::INTEGER); 20 | timestamp = SET_BYTE(timestamp, 4, (unix_time >> 8)::BIT(8)::INTEGER); 21 | timestamp = SET_BYTE(timestamp, 5, unix_time::BIT(8)::INTEGER); 22 | 23 | -- 10 entropy bytes 24 | ulid = timestamp || substring(md5(job_id::text), 0, 11)::bytea; 25 | 26 | -- Encode the timestamp 27 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 0) & 224) >> 5)); 28 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 0) & 31))); 29 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 1) & 248) >> 3)); 30 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 1) & 7) << 2) | ((GET_BYTE(ulid, 2) & 192) >> 6))); 31 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 2) & 62) >> 1)); 32 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 2) & 1) << 4) | ((GET_BYTE(ulid, 3) & 240) >> 4))); 33 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 3) & 15) << 1) | ((GET_BYTE(ulid, 4) & 128) >> 7))); 34 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 4) & 124) >> 2)); 35 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 4) & 3) << 3) | ((GET_BYTE(ulid, 5) & 224) >> 5))); 36 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 5) & 31))); 37 | 38 | -- Encode the entropy 39 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 6) & 248) >> 3)); 40 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 6) & 7) << 2) | ((GET_BYTE(ulid, 7) & 192) >> 6))); 41 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 7) & 62) >> 1)); 42 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 7) & 1) << 4) | ((GET_BYTE(ulid, 8) & 240) >> 4))); 43 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 8) & 15) << 1) | ((GET_BYTE(ulid, 9) & 128) >> 7))); 44 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 9) & 124) >> 2)); 45 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 9) & 3) << 3) | ((GET_BYTE(ulid, 10) & 224) >> 5))); 46 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 10) & 31))); 47 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 11) & 248) >> 3)); 48 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 11) & 7) << 2) | ((GET_BYTE(ulid, 12) & 192) >> 6))); 49 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 12) & 62) >> 1)); 50 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 12) & 1) << 4) | ((GET_BYTE(ulid, 13) & 240) >> 4))); 51 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 13) & 15) << 1) | ((GET_BYTE(ulid, 14) & 128) >> 7))); 52 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 14) & 124) >> 2)); 53 | output = output || CHR(GET_BYTE(encoding, ((GET_BYTE(ulid, 14) & 3) << 3) | ((GET_BYTE(ulid, 15) & 224) >> 5))); 54 | output = output || CHR(GET_BYTE(encoding, (GET_BYTE(ulid, 15) & 31))); 55 | 56 | RETURN output; 57 | END 58 | $$ 59 | LANGUAGE plpgsql 60 | VOLATILE; 61 | 62 | ALTER TABLE gue_jobs ALTER COLUMN job_id TYPE text USING migrate_job_id_ulid(job_id, created_at); 63 | 64 | DROP FUNCTION IF EXISTS migrate_job_id_ulid; 65 | -------------------------------------------------------------------------------- /migrations/schema.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS gue_jobs 2 | ( 3 | job_id TEXT NOT NULL PRIMARY KEY, 4 | priority SMALLINT NOT NULL, 5 | run_at TIMESTAMPTZ NOT NULL, 6 | job_type TEXT NOT NULL, 7 | args BYTEA NOT NULL, 8 | error_count INTEGER NOT NULL DEFAULT 0, 9 | last_error TEXT, 10 | queue TEXT NOT NULL, 11 | created_at TIMESTAMPTZ NOT NULL, 12 | updated_at TIMESTAMPTZ NOT NULL 13 | ); 14 | 15 | CREATE INDEX IF NOT EXISTS idx_gue_jobs_selector ON gue_jobs (queue, run_at, priority); 16 | -------------------------------------------------------------------------------- /worker.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "runtime" 9 | "sync" 10 | "time" 11 | 12 | "go.opentelemetry.io/otel/attribute" 13 | "go.opentelemetry.io/otel/metric" 14 | noopM "go.opentelemetry.io/otel/metric/noop" 15 | "go.opentelemetry.io/otel/trace" 16 | noopT "go.opentelemetry.io/otel/trace/noop" 17 | "golang.org/x/sync/errgroup" 18 | 19 | "github.com/vgarvardt/gue/v5/adapter" 20 | ) 21 | 22 | // PollStrategy determines how the DB is queried for the next job to work on 23 | type PollStrategy string 24 | 25 | const ( 26 | defaultPollInterval = 5 * time.Second 27 | defaultQueueName = "" 28 | 29 | defaultPanicStackBufSize = 1024 30 | 31 | // PriorityPollStrategy cares about the priority first to lock top priority jobs first even if there are available 32 | // ones that should be executed earlier but with lower priority. 33 | PriorityPollStrategy PollStrategy = "OrderByPriority" 34 | // RunAtPollStrategy cares about the scheduled time first to lock earliest to execute jobs first even if there 35 | // are ones with a higher priority scheduled to a later time but already eligible for execution 36 | RunAtPollStrategy PollStrategy = "OrderByRunAtPriority" 37 | ) 38 | 39 | // WorkFunc is the handler function that performs the Job. If an error is returned, the Job 40 | // is either re-enqueued with the given backoff or is discarded based on the worker backoff strategy 41 | // and returned error. 42 | // 43 | // Modifying Job fields and calling any methods that are modifying its state within the handler may lead to undefined 44 | // behaviour. Please never do this. 45 | type WorkFunc func(ctx context.Context, j *Job) error 46 | 47 | // HookFunc is a function that may react to a Job lifecycle events. All the callbacks are being executed synchronously, 48 | // so be careful with the long-running locking operations. Hooks do not return an error, therefore they can not and 49 | // must not be used to affect the Job execution flow, e.g. cancel it - this is the WorkFunc responsibility. 50 | // Modifying Job fields and calling any methods that are modifying its state within hooks may lead to undefined 51 | // behaviour. Please never do this. 52 | // 53 | // Depending on the event err parameter may be empty or not - check the event description for its meaning. 54 | type HookFunc func(ctx context.Context, j *Job, err error) 55 | 56 | // WorkMap is a map of Job names to WorkFuncs that are used to perform Jobs of a 57 | // given type. 58 | type WorkMap map[string]WorkFunc 59 | 60 | // pollFunc is a function that queries the DB for the next job to work on 61 | type pollFunc func(context.Context, string) (*Job, error) 62 | 63 | // Worker is a single worker that pulls jobs off the specified queue. If no Job 64 | // is found, the Worker will sleep for interval seconds. 65 | type Worker struct { 66 | wm WorkMap 67 | interval time.Duration 68 | queue string 69 | c *Client 70 | id string 71 | logger adapter.Logger 72 | mu sync.Mutex 73 | running bool 74 | pollStrategy PollStrategy 75 | pollFunc pollFunc 76 | jobTTL time.Duration 77 | 78 | graceful bool 79 | gracefulCtx func() context.Context 80 | 81 | tracer trace.Tracer 82 | meter metric.Meter 83 | 84 | unknownJobTypeWF WorkFunc 85 | 86 | hooksJobLocked []HookFunc 87 | hooksUnknownJobType []HookFunc 88 | hooksJobDone []HookFunc 89 | hooksJobUndone []HookFunc 90 | 91 | mWorked metric.Int64Counter 92 | mDuration metric.Int64Histogram 93 | 94 | panicStackBufSize int 95 | spanWorkOneNoJob bool 96 | } 97 | 98 | // NewWorker returns a Worker that fetches Jobs from the Client and executes 99 | // them using WorkMap. If the type of Job is not registered in the WorkMap, it's 100 | // considered an error and the job is re-enqueued with a backoff. 101 | // 102 | // Worker defaults to a poll interval of 5 seconds, which can be overridden by 103 | // WithWorkerPollInterval option. 104 | // The default queue is the nameless queue "", which can be overridden by 105 | // WithWorkerQueue option. 106 | func NewWorker(c *Client, wm WorkMap, options ...WorkerOption) (*Worker, error) { 107 | w := Worker{ 108 | interval: defaultPollInterval, 109 | queue: defaultQueueName, 110 | c: c, 111 | id: RandomStringID(), 112 | wm: wm, 113 | logger: adapter.NoOpLogger{}, 114 | pollStrategy: PriorityPollStrategy, 115 | tracer: noopT.NewTracerProvider().Tracer("noop"), 116 | meter: noopM.NewMeterProvider().Meter("noop"), 117 | 118 | panicStackBufSize: defaultPanicStackBufSize, 119 | } 120 | 121 | for _, option := range options { 122 | option(&w) 123 | } 124 | 125 | switch w.pollStrategy { 126 | case RunAtPollStrategy: 127 | w.pollFunc = w.c.LockNextScheduledJob 128 | default: 129 | w.pollFunc = w.c.LockJob 130 | } 131 | 132 | w.logger = w.logger.With(adapter.F("worker-id", w.id)) 133 | 134 | return &w, w.initMetrics() 135 | } 136 | 137 | // Run pulls jobs off the Worker's queue at its interval. This function does 138 | // not run in its own goroutine, so it’s possible to wait for completion. Use 139 | // context cancellation to shut it down. 140 | func (w *Worker) Run(ctx context.Context) error { 141 | return RunLock(ctx, w.runLoop, &w.mu, &w.running, w.id) 142 | } 143 | 144 | // runLoop pulls jobs off the Worker's queue at its interval. 145 | func (w *Worker) runLoop(ctx context.Context) error { 146 | defer w.logger.Info("Worker finished") 147 | 148 | timer := time.NewTimer(w.interval) 149 | defer timer.Stop() 150 | 151 | for { 152 | handlerCtx := ctx 153 | if w.graceful { 154 | if w.gracefulCtx == nil { 155 | handlerCtx = context.Background() 156 | } else { 157 | handlerCtx = w.gracefulCtx() 158 | } 159 | } 160 | 161 | // Try to work a job 162 | if w.WorkOne(handlerCtx) { 163 | // Since we just did work, non-blocking check whether we should exit 164 | select { 165 | case <-ctx.Done(): 166 | return nil 167 | default: 168 | continue 169 | } 170 | } 171 | 172 | // Reset or create the timer; time.After is leaky 173 | // on context cancellation since we can’t stop it. 174 | timer.Reset(w.interval) 175 | 176 | // No work found, block until exit or timer expires 177 | select { 178 | case <-ctx.Done(): 179 | return nil 180 | case <-timer.C: 181 | continue 182 | } 183 | } 184 | } 185 | 186 | // WorkOne tries to consume single message from the queue. 187 | func (w *Worker) WorkOne(ctx context.Context) (didWork bool) { 188 | ctx, span := w.tracer.Start(ctx, "Worker.WorkOne") 189 | // worker option is set to generate spans even when no job is found - let it be 190 | if w.spanWorkOneNoJob { 191 | defer span.End() 192 | } 193 | 194 | j, err := w.pollFunc(ctx, w.queue) 195 | if err != nil { 196 | span.RecordError(fmt.Errorf("worker failed to lock a job: %w", err)) 197 | w.mWorked.Add(ctx, 1, metric.WithAttributes(attrJobType.String(""), attrSuccess.Bool(false))) 198 | w.logger.Error("Worker failed to lock a job", adapter.Err(err)) 199 | 200 | for _, hook := range w.hooksJobLocked { 201 | hook(ctx, nil, err) 202 | } 203 | return 204 | } 205 | if j == nil { 206 | return // no job was available 207 | } 208 | 209 | // at this point we have a job, so we need to ensure that span will be generated 210 | if !w.spanWorkOneNoJob { 211 | defer span.End() 212 | } 213 | 214 | processingStartedAt := time.Now() 215 | span.SetAttributes( 216 | attribute.String("job-id", j.ID.String()), 217 | attribute.String("job-queue", j.Queue), 218 | attribute.String("job-type", j.Type), 219 | ) 220 | 221 | ll := w.logger.With(adapter.F("job-id", j.ID.String()), adapter.F("job-type", j.Type)) 222 | 223 | defer w.markJobDone(ctx, j, processingStartedAt, span, ll) 224 | defer w.recoverPanic(ctx, j, ll) 225 | 226 | for _, hook := range w.hooksJobLocked { 227 | hook(ctx, j, nil) 228 | } 229 | 230 | didWork = true 231 | 232 | wf, ok := w.wm[j.Type] 233 | if !ok { 234 | if w.unknownJobTypeWF == nil { 235 | w.handleUnknownJobType(ctx, j, span, ll) 236 | return 237 | } 238 | 239 | wf = w.unknownJobTypeWF 240 | } 241 | 242 | handlerCtx := ctx 243 | cancel := context.CancelFunc(func() {}) 244 | if w.jobTTL > 0 { 245 | handlerCtx, cancel = context.WithTimeout(ctx, w.jobTTL) 246 | } 247 | defer cancel() 248 | 249 | if err = wf(handlerCtx, j); err != nil { 250 | w.mWorked.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(false))) 251 | 252 | for _, hook := range w.hooksJobDone { 253 | hook(ctx, j, err) 254 | } 255 | 256 | if jErr := j.Error(ctx, err); jErr != nil { 257 | span.RecordError(fmt.Errorf("failed to mark job as error: %w", err)) 258 | ll.Error("Got an error on setting an error to an errored job", adapter.Err(jErr), adapter.F("job-error", err)) 259 | } 260 | 261 | return 262 | } 263 | 264 | for _, hook := range w.hooksJobDone { 265 | hook(ctx, j, nil) 266 | } 267 | 268 | err = j.Delete(ctx) 269 | if err != nil { 270 | span.RecordError(fmt.Errorf("failed to delete finished job: %w", err)) 271 | ll.Error("Got an error on deleting a job", adapter.Err(err)) 272 | } 273 | 274 | w.mWorked.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(err == nil))) 275 | ll.Debug("Job finished") 276 | return 277 | } 278 | 279 | func (w *Worker) handleUnknownJobType(ctx context.Context, j *Job, span trace.Span, ll adapter.Logger) { 280 | w.mWorked.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(false))) 281 | 282 | span.RecordError(fmt.Errorf("job with unknown type: %q", j.Type)) 283 | ll.Error("Got a job with unknown type") 284 | 285 | errUnknownType := fmt.Errorf("worker[id=%s] unknown job type: %q", w.id, j.Type) 286 | if err := j.Error(ctx, errUnknownType); err != nil { 287 | span.RecordError(fmt.Errorf("failed to mark job as error: %w", err)) 288 | ll.Error("Got an error on setting an error to unknown job", adapter.Err(err)) 289 | } 290 | 291 | for _, hook := range w.hooksUnknownJobType { 292 | hook(ctx, j, errUnknownType) 293 | } 294 | } 295 | 296 | func (w *Worker) initMetrics() (err error) { 297 | if w.mWorked, err = w.meter.Int64Counter( 298 | "gue_worker_jobs_worked", 299 | metric.WithDescription("Number of jobs processed"), 300 | metric.WithUnit("1"), 301 | ); err != nil { 302 | return fmt.Errorf("could not register mWorked metric: %w", err) 303 | } 304 | 305 | if w.mDuration, err = w.meter.Int64Histogram( 306 | "gue_worker_jobs_duration", 307 | metric.WithDescription("Duration of the single locked job to be processed with all the hooks"), 308 | metric.WithUnit("ms"), 309 | ); err != nil { 310 | return fmt.Errorf("could not register mDuration metric: %w", err) 311 | } 312 | 313 | return nil 314 | } 315 | 316 | func (w *Worker) markJobDone(ctx context.Context, j *Job, processingStartedAt time.Time, span trace.Span, ll adapter.Logger) { 317 | if err := j.Done(ctx); err != nil { 318 | span.RecordError(fmt.Errorf("failed to mark job as done: %w", err)) 319 | ll.Error("Failed to mark job as done", adapter.Err(err)) 320 | 321 | // let user handle critical job failure 322 | for _, hook := range w.hooksJobUndone { 323 | hook(ctx, j, err) 324 | } 325 | } 326 | 327 | w.mDuration.Record( 328 | ctx, 329 | time.Since(processingStartedAt).Milliseconds(), 330 | metric.WithAttributes(attrJobType.String(j.Type)), 331 | ) 332 | } 333 | 334 | // recoverPanic tries to handle panics in job execution. 335 | // A stacktrace is stored into Job last_error. 336 | func (w *Worker) recoverPanic(ctx context.Context, j *Job, logger adapter.Logger) { 337 | r := recover() 338 | if r == nil { 339 | return 340 | } 341 | 342 | defer w.recoverPanicRecovery(ctx, j, logger) 343 | 344 | ctx, span := w.tracer.Start(ctx, "Worker.recoverPanic") 345 | defer span.End() 346 | 347 | stacktrace := buildStackTrace(r, w.panicStackBufSize, logger) 348 | 349 | w.mWorked.Add(ctx, 1, metric.WithAttributes(attrJobType.String(j.Type), attrSuccess.Bool(false))) 350 | span.RecordError(ErrJobPanicked, trace.WithAttributes(attribute.String("stacktrace", stacktrace))) 351 | logger.Error("Job panicked", adapter.F("stacktrace", stacktrace)) 352 | 353 | errPanic := fmt.Errorf("%w:\n%s", ErrJobPanicked, stacktrace) 354 | for _, hook := range w.hooksJobDone { 355 | hook(ctx, j, errPanic) 356 | } 357 | 358 | // record an error on the job with panic message and stacktrace 359 | if err := j.Error(ctx, errPanic); err != nil { 360 | span.RecordError(fmt.Errorf("failed to mark panicked job as error: %w", err)) 361 | logger.Error("Got an error on setting an error to a panicked job", adapter.Err(err)) 362 | } 363 | } 364 | 365 | // recoverPanicRecovery tries to handle panics in hook job done thrown in the process of panicked job recovery. 366 | // A stacktrace is stored into Job last_error. 367 | func (w *Worker) recoverPanicRecovery(ctx context.Context, j *Job, logger adapter.Logger) { 368 | r := recover() 369 | if r == nil { 370 | return 371 | } 372 | 373 | ctx, span := w.tracer.Start(ctx, "Worker.recoverPanicRecovery") 374 | defer span.End() 375 | 376 | stacktrace := buildStackTrace(r, w.panicStackBufSize, logger) 377 | 378 | span.RecordError(ErrHookJobDonePanicked, trace.WithAttributes(attribute.String("stacktrace", stacktrace))) 379 | logger.Error("Job panicked during the panic recovery", adapter.F("stacktrace", stacktrace)) 380 | 381 | errPanic := fmt.Errorf("%w (%w):\n%s", ErrHookJobDonePanicked, ErrJobPanicked, stacktrace) 382 | // record an error on the job with panic message and stacktrace 383 | if err := j.Error(ctx, errPanic); err != nil { 384 | span.RecordError(fmt.Errorf("failed to mark panicked job (hook job done) as error: %w", err)) 385 | logger.Error("Got an error on setting an error to a panicked job (hook job done)", adapter.Err(err)) 386 | } 387 | } 388 | 389 | func buildStackTrace(r any, bufSize int, logger adapter.Logger) string { 390 | stackBuf := make([]byte, bufSize) 391 | n := runtime.Stack(stackBuf, false) 392 | 393 | buf := new(bytes.Buffer) 394 | _, printRErr := fmt.Fprintf(buf, "%v\n", r) 395 | _, printStackErr := fmt.Fprintln(buf, string(stackBuf[:n])) 396 | _, printEllipsisErr := fmt.Fprintln(buf, "[...]") 397 | 398 | if err := errors.Join(printRErr, printStackErr, printEllipsisErr); err != nil { 399 | logger.Error("Could not build panicked job stacktrace", adapter.Err(err), adapter.F("runtime-stack", string(stackBuf[:n]))) 400 | } 401 | 402 | return buf.String() 403 | } 404 | 405 | // WorkerPool is a pool of Workers, each working jobs from the queue 406 | // at the specified interval using the WorkMap. 407 | type WorkerPool struct { 408 | wm WorkMap 409 | interval time.Duration 410 | queue string 411 | c *Client 412 | workers []*Worker 413 | id string 414 | logger adapter.Logger 415 | mu sync.Mutex 416 | running bool 417 | pollStrategy PollStrategy 418 | jobTTL time.Duration 419 | 420 | graceful bool 421 | gracefulCtx func() context.Context 422 | 423 | tracer trace.Tracer 424 | meter metric.Meter 425 | 426 | unknownJobTypeWF WorkFunc 427 | 428 | hooksJobLocked []HookFunc 429 | hooksUnknownJobType []HookFunc 430 | hooksJobDone []HookFunc 431 | hooksJobUndone []HookFunc 432 | 433 | panicStackBufSize int 434 | spanWorkOneNoJob bool 435 | } 436 | 437 | // NewWorkerPool creates a new WorkerPool with count workers using the Client c. 438 | // 439 | // Each Worker in the pool default to a poll interval of 5 seconds, which can be 440 | // overridden by WithPoolPollInterval option. The default queue is the 441 | // nameless queue "", which can be overridden by WithPoolQueue option. 442 | func NewWorkerPool(c *Client, wm WorkMap, poolSize int, options ...WorkerPoolOption) (*WorkerPool, error) { 443 | w := WorkerPool{ 444 | wm: wm, 445 | interval: defaultPollInterval, 446 | queue: defaultQueueName, 447 | c: c, 448 | id: RandomStringID(), 449 | workers: make([]*Worker, poolSize), 450 | logger: adapter.NoOpLogger{}, 451 | pollStrategy: PriorityPollStrategy, 452 | tracer: noopT.NewTracerProvider().Tracer("noop"), 453 | meter: noopM.NewMeterProvider().Meter("noop"), 454 | 455 | panicStackBufSize: defaultPanicStackBufSize, 456 | } 457 | 458 | for _, option := range options { 459 | option(&w) 460 | } 461 | 462 | w.logger = w.logger.With(adapter.F("worker-pool-id", w.id)) 463 | 464 | var err error 465 | for i := range w.workers { 466 | w.workers[i], err = NewWorker( 467 | w.c, 468 | w.wm, 469 | WithWorkerPollInterval(w.interval), 470 | WithWorkerQueue(w.queue), 471 | WithWorkerID(fmt.Sprintf("%s/worker-%d", w.id, i)), 472 | WithWorkerLogger(w.logger), 473 | WithWorkerPollStrategy(w.pollStrategy), 474 | WithWorkerTracer(w.tracer), 475 | WithWorkerMeter(w.meter), 476 | WithWorkerHooksJobLocked(w.hooksJobLocked...), 477 | WithWorkerHooksUnknownJobType(w.hooksUnknownJobType...), 478 | WithWorkerHooksJobDone(w.hooksJobDone...), 479 | WithWorkerHooksJobUndone(w.hooksJobUndone...), 480 | WithWorkerPanicStackBufSize(w.panicStackBufSize), 481 | WithWorkerSpanWorkOneNoJob(w.spanWorkOneNoJob), 482 | WithWorkerJobTTL(w.jobTTL), 483 | WithWorkerUnknownJobWorkFunc(w.unknownJobTypeWF), 484 | ) 485 | 486 | if err != nil { 487 | return nil, fmt.Errorf("could not init worker instance: %w", err) 488 | } 489 | 490 | w.workers[i].graceful = w.graceful 491 | w.workers[i].gracefulCtx = w.gracefulCtx 492 | } 493 | 494 | return &w, nil 495 | } 496 | 497 | // Run runs all the Workers in the WorkerPool in own goroutines. 498 | // Run blocks until all workers exit. Use context cancellation for 499 | // shutdown. 500 | func (w *WorkerPool) Run(ctx context.Context) error { 501 | return RunLock(ctx, w.runGroup, &w.mu, &w.running, w.id) 502 | } 503 | 504 | // WorkOne tries to consume single message from the queue. 505 | func (w *WorkerPool) WorkOne(ctx context.Context) (didWork bool) { 506 | return w.workers[0].WorkOne(ctx) 507 | } 508 | 509 | // runGroup starts all the Workers in the WorkerPool in own goroutines 510 | // managed by errgroup.Group. 511 | func (w *WorkerPool) runGroup(ctx context.Context) error { 512 | defer w.logger.Info("Worker pool finished") 513 | 514 | grp, ctx := errgroup.WithContext(ctx) 515 | for i := range w.workers { 516 | idx := i 517 | worker := w.workers[idx] 518 | grp.Go(func() error { 519 | return worker.Run(setWorkerIdx(ctx, idx)) 520 | }) 521 | } 522 | 523 | return grp.Wait() 524 | } 525 | -------------------------------------------------------------------------------- /worker_option.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "go.opentelemetry.io/otel/metric" 8 | "go.opentelemetry.io/otel/trace" 9 | 10 | "github.com/vgarvardt/gue/v5/adapter" 11 | ) 12 | 13 | // WorkerOption defines a type that allows to set worker properties during the build-time. 14 | type WorkerOption func(*Worker) 15 | 16 | // WorkerPoolOption defines a type that allows to set worker pool properties during the build-time. 17 | type WorkerPoolOption func(pool *WorkerPool) 18 | 19 | // WithWorkerPollInterval overrides default poll interval with the given value. 20 | // Poll interval is the "sleep" duration if there were no jobs found in the DB. 21 | func WithWorkerPollInterval(d time.Duration) WorkerOption { 22 | return func(w *Worker) { 23 | w.interval = d 24 | } 25 | } 26 | 27 | // WithWorkerQueue overrides default worker queue name with the given value. 28 | func WithWorkerQueue(queue string) WorkerOption { 29 | return func(w *Worker) { 30 | w.queue = queue 31 | } 32 | } 33 | 34 | // WithWorkerID sets worker ID for easier identification in logs 35 | func WithWorkerID(id string) WorkerOption { 36 | return func(w *Worker) { 37 | w.id = id 38 | } 39 | } 40 | 41 | // WithWorkerLogger sets Logger implementation to worker 42 | func WithWorkerLogger(logger adapter.Logger) WorkerOption { 43 | return func(w *Worker) { 44 | w.logger = logger 45 | } 46 | } 47 | 48 | // WithWorkerTracer sets trace.Tracer instance to the worker. 49 | func WithWorkerTracer(tracer trace.Tracer) WorkerOption { 50 | return func(w *Worker) { 51 | w.tracer = tracer 52 | } 53 | } 54 | 55 | // WithWorkerMeter sets metric.Meter instance to the worker. 56 | func WithWorkerMeter(meter metric.Meter) WorkerOption { 57 | return func(w *Worker) { 58 | w.meter = meter 59 | } 60 | } 61 | 62 | // WithWorkerPanicStackBufSize sets max size for the stacktrace buffer for panicking jobs. 63 | // Default value is 1024 that is enough for most of the cases. Be careful setting buffer suze to the big values 64 | // as this may affect overall performance. 65 | func WithWorkerPanicStackBufSize(size int) WorkerOption { 66 | return func(w *Worker) { 67 | w.panicStackBufSize = size 68 | } 69 | } 70 | 71 | // WithWorkerHooksJobLocked sets hooks that are called right after the job was polled from the DB. 72 | // Depending on the polling results hook will have either error or job set, but not both. 73 | // If the error field is set - no other lifecycle hooks will be called for the job. 74 | func WithWorkerHooksJobLocked(hooks ...HookFunc) WorkerOption { 75 | return func(w *Worker) { 76 | w.hooksJobLocked = hooks 77 | } 78 | } 79 | 80 | // WithWorkerHooksUnknownJobType sets hooks that are called when worker finds a job with unknown type. 81 | // Error field for this event type is always set since this is an error situation. 82 | // If this hook is called - no other lifecycle hooks will be called for the job. 83 | // When the handler for unknown job types is set with WithWorkerUnknownJobWorkFunc - these hooks are never called 84 | // as the job is handled in the regular way using that handler. 85 | func WithWorkerHooksUnknownJobType(hooks ...HookFunc) WorkerOption { 86 | return func(w *Worker) { 87 | w.hooksUnknownJobType = hooks 88 | } 89 | } 90 | 91 | // WithWorkerHooksJobDone sets hooks that are called when worker finished working the job, 92 | // right before the successfully executed job will be removed or errored job handler will be called to decide 93 | // if the Job will be re-queued or discarded. 94 | // Error field is set for the cases when the job was worked with an error. 95 | func WithWorkerHooksJobDone(hooks ...HookFunc) WorkerOption { 96 | return func(w *Worker) { 97 | w.hooksJobDone = hooks 98 | } 99 | } 100 | 101 | // WithWorkerHooksJobUndone sets hooks that are called when worker fails to mark the job as done. 102 | // This is an exceptional situation, most likely caused by transaction failed to be committed. 103 | // Hook implementation MUST NOT rely on the transaction provided by the job as it may already be marked as failed. 104 | func WithWorkerHooksJobUndone(hooks ...HookFunc) WorkerOption { 105 | return func(w *Worker) { 106 | w.hooksJobUndone = hooks 107 | } 108 | } 109 | 110 | // WithWorkerPollStrategy overrides default poll strategy with given value 111 | func WithWorkerPollStrategy(s PollStrategy) WorkerOption { 112 | return func(w *Worker) { 113 | w.pollStrategy = s 114 | } 115 | } 116 | 117 | // WithWorkerGracefulShutdown enables graceful shutdown mode in the worker. 118 | // When graceful shutdown is enabled - worker does not propagate cancel context to Job, 119 | // as a result worker is waiting for the Job being currently executed and only then shuts down. 120 | // Use this mode carefully, as Job handler is not aware anymore of the worker context state and 121 | // dependencies may already be cancelled/closed, so it is up to the job to ensure everything is 122 | // still working. Values of the original context are not propagated to the handler context as well 123 | // when the graceful mode is enabled. 124 | // 125 | // Use "handlerCtx" to set up custom handler context. When set to nil - defaults to context.Background(). 126 | func WithWorkerGracefulShutdown(handlerCtx func() context.Context) WorkerOption { 127 | return func(w *Worker) { 128 | w.graceful = true 129 | w.gracefulCtx = handlerCtx 130 | } 131 | } 132 | 133 | // WithWorkerSpanWorkOneNoJob enables tracing span generation for every try to get one. 134 | // When set to true - generates a span for every DB poll, even when no job was acquired. This may 135 | // generate a lot of empty spans, but may help with some debugging, so use carefully. 136 | func WithWorkerSpanWorkOneNoJob(spanWorkOneNoJob bool) WorkerOption { 137 | return func(w *Worker) { 138 | w.spanWorkOneNoJob = spanWorkOneNoJob 139 | } 140 | } 141 | 142 | // WithWorkerJobTTL sets max time a job can run. Implementation-wise the job runs with the timeout context, 143 | // so it is up to the job implementation to handle context cancellation properly. 144 | func WithWorkerJobTTL(d time.Duration) WorkerOption { 145 | return func(w *Worker) { 146 | w.jobTTL = d 147 | } 148 | } 149 | 150 | // WithWorkerUnknownJobWorkFunc sets the handler for unknown job types. 151 | // When the handler is set - hooks set with WithWorkerHooksUnknownJobType are never called as the job is 152 | // handled in the regular way. 153 | func WithWorkerUnknownJobWorkFunc(wf WorkFunc) WorkerOption { 154 | return func(w *Worker) { 155 | w.unknownJobTypeWF = wf 156 | } 157 | } 158 | 159 | // WithPoolPollInterval overrides default poll interval with the given value. 160 | // Poll interval is the "sleep" duration if there were no jobs found in the DB. 161 | func WithPoolPollInterval(d time.Duration) WorkerPoolOption { 162 | return func(w *WorkerPool) { 163 | w.interval = d 164 | } 165 | } 166 | 167 | // WithPoolQueue overrides default worker queue name with the given value. 168 | func WithPoolQueue(queue string) WorkerPoolOption { 169 | return func(w *WorkerPool) { 170 | w.queue = queue 171 | } 172 | } 173 | 174 | // WithPoolID sets worker pool ID for easier identification in logs 175 | func WithPoolID(id string) WorkerPoolOption { 176 | return func(w *WorkerPool) { 177 | w.id = id 178 | } 179 | } 180 | 181 | // WithPoolLogger sets Logger implementation to worker pool 182 | func WithPoolLogger(logger adapter.Logger) WorkerPoolOption { 183 | return func(w *WorkerPool) { 184 | w.logger = logger 185 | } 186 | } 187 | 188 | // WithPoolPollStrategy overrides default poll strategy with given value 189 | func WithPoolPollStrategy(s PollStrategy) WorkerPoolOption { 190 | return func(w *WorkerPool) { 191 | w.pollStrategy = s 192 | } 193 | } 194 | 195 | // WithPoolTracer sets trace.Tracer instance to every worker in the pool. 196 | func WithPoolTracer(tracer trace.Tracer) WorkerPoolOption { 197 | return func(w *WorkerPool) { 198 | w.tracer = tracer 199 | } 200 | } 201 | 202 | // WithPoolMeter sets metric.Meter instance to every worker in the pool. 203 | func WithPoolMeter(meter metric.Meter) WorkerPoolOption { 204 | return func(w *WorkerPool) { 205 | w.meter = meter 206 | } 207 | } 208 | 209 | // WithPoolHooksJobLocked calls WithWorkerHooksJobLocked for every worker in the pool. 210 | func WithPoolHooksJobLocked(hooks ...HookFunc) WorkerPoolOption { 211 | return func(w *WorkerPool) { 212 | w.hooksJobLocked = hooks 213 | } 214 | } 215 | 216 | // WithPoolHooksUnknownJobType calls WithWorkerHooksUnknownJobType for every worker in the pool. 217 | // When the handler for unknown job types is set with WithPoolUnknownJobWorkFunc - these hooks are never called 218 | // as the job is handled in the regular way using that handler. 219 | func WithPoolHooksUnknownJobType(hooks ...HookFunc) WorkerPoolOption { 220 | return func(w *WorkerPool) { 221 | w.hooksUnknownJobType = hooks 222 | } 223 | } 224 | 225 | // WithPoolHooksJobDone calls WithWorkerHooksJobDone for every worker in the pool. 226 | func WithPoolHooksJobDone(hooks ...HookFunc) WorkerPoolOption { 227 | return func(w *WorkerPool) { 228 | w.hooksJobDone = hooks 229 | } 230 | } 231 | 232 | // WithPoolHooksJobUndone calls WithWorkerHooksJobUndone for every worker in the pool. 233 | func WithPoolHooksJobUndone(hooks ...HookFunc) WorkerPoolOption { 234 | return func(w *WorkerPool) { 235 | w.hooksJobUndone = hooks 236 | } 237 | } 238 | 239 | // WithPoolGracefulShutdown enables graceful shutdown mode for all workers in the pool. 240 | // See WithWorkerGracefulShutdown for details. 241 | func WithPoolGracefulShutdown(handlerCtx func() context.Context) WorkerPoolOption { 242 | return func(w *WorkerPool) { 243 | w.graceful = true 244 | w.gracefulCtx = handlerCtx 245 | } 246 | } 247 | 248 | // WithPoolPanicStackBufSize sets max size for the stacktrace buffer for panicking jobs. 249 | // Default value is 1024 that is enough for most of the cases. Be careful setting buffer suze to the big values 250 | // as this may affect overall performance. 251 | func WithPoolPanicStackBufSize(size int) WorkerPoolOption { 252 | return func(w *WorkerPool) { 253 | w.panicStackBufSize = size 254 | } 255 | } 256 | 257 | // WithPoolSpanWorkOneNoJob enables tracing span generation for every try to get one. 258 | // When set to true - generates a span for every DB poll, even when no job was acquired. This may 259 | // generate a lot of empty spans, but may help with some debugging, so use carefully. 260 | func WithPoolSpanWorkOneNoJob(spanWorkOneNoJob bool) WorkerPoolOption { 261 | return func(w *WorkerPool) { 262 | w.spanWorkOneNoJob = spanWorkOneNoJob 263 | } 264 | } 265 | 266 | // WithPoolJobTTL sets max time a job can run. Implementation-wise the job runs with the timeout context, 267 | // so it is up to the job implementation to handle context cancellation properly. 268 | func WithPoolJobTTL(d time.Duration) WorkerPoolOption { 269 | return func(w *WorkerPool) { 270 | w.jobTTL = d 271 | } 272 | } 273 | 274 | // WithPoolUnknownJobWorkFunc sets the handler for unknown job types. 275 | // When the handler is set - hooks set with WithPoolHooksUnknownJobType are never called as the job is 276 | // handled in the regular way. 277 | func WithPoolUnknownJobWorkFunc(wf WorkFunc) WorkerPoolOption { 278 | return func(w *WorkerPool) { 279 | w.unknownJobTypeWF = wf 280 | } 281 | } 282 | -------------------------------------------------------------------------------- /worker_option_test.go: -------------------------------------------------------------------------------- 1 | package gue 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/mock" 10 | "github.com/stretchr/testify/require" 11 | "go.opentelemetry.io/otel/metric/noop" 12 | noopT "go.opentelemetry.io/otel/trace/noop" 13 | 14 | "github.com/vgarvardt/gue/v5/adapter" 15 | ) 16 | 17 | type mockLogger struct { 18 | mock.Mock 19 | } 20 | 21 | func (m *mockLogger) Debug(msg string, fields ...adapter.Field) { 22 | m.Called(msg, fields) 23 | } 24 | 25 | func (m *mockLogger) Info(msg string, fields ...adapter.Field) { 26 | m.Called(msg, fields) 27 | } 28 | 29 | func (m *mockLogger) Error(msg string, fields ...adapter.Field) { 30 | m.Called(msg, fields) 31 | } 32 | 33 | func (m *mockLogger) With(fields ...adapter.Field) adapter.Logger { 34 | args := m.Called(fields) 35 | return args.Get(0).(adapter.Logger) 36 | } 37 | 38 | var dummyWM = WorkMap{ 39 | "MyJob": func(ctx context.Context, j *Job) error { 40 | return nil 41 | }, 42 | } 43 | 44 | func TestWithWorkerPollInterval(t *testing.T) { 45 | workerWithDefaultInterval, err := NewWorker(nil, dummyWM) 46 | require.NoError(t, err) 47 | assert.Equal(t, defaultPollInterval, workerWithDefaultInterval.interval) 48 | 49 | customInterval := 12345 * time.Millisecond 50 | workerWithCustomInterval, err := NewWorker(nil, dummyWM, WithWorkerPollInterval(customInterval)) 51 | require.NoError(t, err) 52 | assert.Equal(t, customInterval, workerWithCustomInterval.interval) 53 | } 54 | 55 | func TestWithWorkerQueue(t *testing.T) { 56 | workerWithDefaultQueue, err := NewWorker(nil, dummyWM) 57 | require.NoError(t, err) 58 | assert.Equal(t, defaultQueueName, workerWithDefaultQueue.queue) 59 | 60 | customQueue := "fooBarBaz" 61 | workerWithCustomQueue, err := NewWorker(nil, dummyWM, WithWorkerQueue(customQueue)) 62 | require.NoError(t, err) 63 | assert.Equal(t, customQueue, workerWithCustomQueue.queue) 64 | } 65 | 66 | func TestWithWorkerID(t *testing.T) { 67 | workerWithDefaultID, err := NewWorker(nil, dummyWM) 68 | require.NoError(t, err) 69 | assert.NotEmpty(t, workerWithDefaultID.id) 70 | 71 | customID := "some-meaningful-id" 72 | workerWithCustomID, err := NewWorker(nil, dummyWM, WithWorkerID(customID)) 73 | require.NoError(t, err) 74 | assert.Equal(t, customID, workerWithCustomID.id) 75 | } 76 | 77 | func TestWithWorkerLogger(t *testing.T) { 78 | workerWithDefaultLogger, err := NewWorker(nil, dummyWM) 79 | require.NoError(t, err) 80 | assert.IsType(t, adapter.NoOpLogger{}, workerWithDefaultLogger.logger) 81 | 82 | logMessage := "hello" 83 | 84 | l := new(mockLogger) 85 | l.On("Info", logMessage, mock.Anything) 86 | // worker sets id as default logger field 87 | l.On("With", mock.Anything).Return(l) 88 | 89 | workerWithCustomLogger, err := NewWorker(nil, dummyWM, WithWorkerLogger(l)) 90 | require.NoError(t, err) 91 | workerWithCustomLogger.logger.Info(logMessage) 92 | 93 | l.AssertExpectations(t) 94 | } 95 | 96 | func TestWithWorkerPollStrategy(t *testing.T) { 97 | workerWithWorkerPollStrategy, err := NewWorker(nil, dummyWM, WithWorkerPollStrategy(RunAtPollStrategy)) 98 | require.NoError(t, err) 99 | assert.Equal(t, RunAtPollStrategy, workerWithWorkerPollStrategy.pollStrategy) 100 | } 101 | 102 | func TestWithWorkerGracefulShutdown(t *testing.T) { 103 | workerWithNoGraceful, err := NewWorker(nil, dummyWM) 104 | require.NoError(t, err) 105 | assert.False(t, workerWithNoGraceful.graceful) 106 | assert.Nil(t, workerWithNoGraceful.gracefulCtx) 107 | 108 | workerWithGracefulDefault, err := NewWorker(nil, dummyWM, WithWorkerGracefulShutdown(nil)) 109 | require.NoError(t, err) 110 | assert.True(t, workerWithGracefulDefault.graceful) 111 | assert.Nil(t, workerWithGracefulDefault.gracefulCtx) 112 | 113 | ctx := context.WithValue(context.Background(), "foo", "bar") 114 | workerWithGracefulCtx, err := NewWorker(nil, dummyWM, WithWorkerGracefulShutdown(func() context.Context { 115 | return ctx 116 | })) 117 | require.NoError(t, err) 118 | assert.True(t, workerWithGracefulCtx.graceful) 119 | assert.Same(t, ctx, workerWithGracefulCtx.gracefulCtx()) 120 | } 121 | 122 | func TestWithWorkerPanicStackBufSize(t *testing.T) { 123 | workerWithDefaultSize, err := NewWorker(nil, dummyWM) 124 | require.NoError(t, err) 125 | assert.Equal(t, defaultPanicStackBufSize, workerWithDefaultSize.panicStackBufSize) 126 | 127 | workerWithCustomSize, err := NewWorker(nil, dummyWM, WithWorkerPanicStackBufSize(1234)) 128 | require.NoError(t, err) 129 | assert.Equal(t, 1234, workerWithCustomSize.panicStackBufSize) 130 | } 131 | 132 | func TestWithWorkerUnknownJobWorkFunc(t *testing.T) { 133 | workerWithoutHandler, err := NewWorker(nil, dummyWM) 134 | require.NoError(t, err) 135 | assert.Nil(t, workerWithoutHandler.unknownJobTypeWF) 136 | 137 | var wfCalled int 138 | wf := WorkFunc(func(ctx context.Context, j *Job) error { 139 | wfCalled++ 140 | return nil 141 | }) 142 | 143 | workerWithHandler, err := NewWorker(nil, dummyWM, WithWorkerUnknownJobWorkFunc(wf)) 144 | require.NoError(t, err) 145 | require.NotNil(t, workerWithHandler.unknownJobTypeWF) 146 | 147 | assert.Equal(t, 0, wfCalled) 148 | err = workerWithHandler.unknownJobTypeWF(nil, nil) 149 | assert.Equal(t, 1, wfCalled) 150 | assert.NoError(t, err) 151 | } 152 | 153 | func TestWithPoolPollInterval(t *testing.T) { 154 | workerPoolWithDefaultInterval, err := NewWorkerPool(nil, dummyWM, 2) 155 | require.NoError(t, err) 156 | assert.Equal(t, defaultPollInterval, workerPoolWithDefaultInterval.interval) 157 | 158 | customInterval := 12345 * time.Millisecond 159 | workerPoolWithCustomInterval, err := NewWorkerPool(nil, dummyWM, 2, WithPoolPollInterval(customInterval)) 160 | require.NoError(t, err) 161 | assert.Equal(t, customInterval, workerPoolWithCustomInterval.interval) 162 | } 163 | 164 | func TestWithPoolQueue(t *testing.T) { 165 | workerPoolWithDefaultQueue, err := NewWorkerPool(nil, dummyWM, 2) 166 | require.NoError(t, err) 167 | assert.Equal(t, defaultQueueName, workerPoolWithDefaultQueue.queue) 168 | 169 | customQueue := "fooBarBaz" 170 | workerPoolWithCustomQueue, err := NewWorkerPool(nil, dummyWM, 2, WithPoolQueue(customQueue)) 171 | require.NoError(t, err) 172 | assert.Equal(t, customQueue, workerPoolWithCustomQueue.queue) 173 | } 174 | 175 | func TestWithPoolID(t *testing.T) { 176 | workerPoolWithDefaultID, err := NewWorkerPool(nil, dummyWM, 2) 177 | require.NoError(t, err) 178 | assert.NotEmpty(t, workerPoolWithDefaultID.id) 179 | 180 | customID := "some-meaningful-id" 181 | workerPoolWithCustomID, err := NewWorkerPool(nil, dummyWM, 2, WithPoolID(customID)) 182 | require.NoError(t, err) 183 | assert.Equal(t, customID, workerPoolWithCustomID.id) 184 | } 185 | 186 | func TestWithPoolLogger(t *testing.T) { 187 | workerPoolWithDefaultLogger, err := NewWorkerPool(nil, dummyWM, 2) 188 | require.NoError(t, err) 189 | assert.IsType(t, adapter.NoOpLogger{}, workerPoolWithDefaultLogger.logger) 190 | 191 | logMessage := "hello" 192 | 193 | l := new(mockLogger) 194 | l.On("Info", logMessage, mock.Anything) 195 | // worker pool sets id as default logger field 196 | l.On("With", mock.Anything).Return(l) 197 | 198 | workerPoolWithCustomLogger, err := NewWorkerPool(nil, dummyWM, 2, WithPoolLogger(l)) 199 | require.NoError(t, err) 200 | workerPoolWithCustomLogger.logger.Info(logMessage) 201 | 202 | l.AssertExpectations(t) 203 | } 204 | 205 | func TestWithPoolPollStrategy(t *testing.T) { 206 | workerPoolWithPoolPollStrategy, err := NewWorkerPool(nil, dummyWM, 2, WithPoolPollStrategy(RunAtPollStrategy)) 207 | require.NoError(t, err) 208 | assert.Equal(t, RunAtPollStrategy, workerPoolWithPoolPollStrategy.pollStrategy) 209 | } 210 | 211 | func TestWithPoolTracer(t *testing.T) { 212 | customTracer := noopT.NewTracerProvider().Tracer("custom") 213 | 214 | workerPoolWithTracer, err := NewWorkerPool(nil, dummyWM, 2, WithPoolTracer(customTracer)) 215 | require.NoError(t, err) 216 | assert.Equal(t, customTracer, workerPoolWithTracer.tracer) 217 | 218 | for i := range workerPoolWithTracer.workers { 219 | assert.Equal(t, customTracer, workerPoolWithTracer.workers[i].tracer) 220 | } 221 | } 222 | 223 | func TestWithPoolMeter(t *testing.T) { 224 | customMeter := noop.NewMeterProvider().Meter("custom") 225 | 226 | workerPoolWithMeter, err := NewWorkerPool(nil, dummyWM, 2, WithPoolMeter(customMeter)) 227 | require.NoError(t, err) 228 | assert.Equal(t, customMeter, workerPoolWithMeter.meter) 229 | 230 | for i := range workerPoolWithMeter.workers { 231 | assert.Equal(t, customMeter, workerPoolWithMeter.workers[i].meter) 232 | } 233 | } 234 | 235 | type dummyHook struct { 236 | counter int 237 | } 238 | 239 | func (h *dummyHook) handler(context.Context, *Job, error) { 240 | h.counter++ 241 | } 242 | 243 | func TestWithWorkerHooksJobLocked(t *testing.T) { 244 | ctx := context.Background() 245 | hook := new(dummyHook) 246 | 247 | workerWOutHooks, err := NewWorker(nil, dummyWM) 248 | require.NoError(t, err) 249 | for _, h := range workerWOutHooks.hooksJobLocked { 250 | h(ctx, nil, nil) 251 | } 252 | require.Equal(t, 0, hook.counter) 253 | 254 | workerWithHooks, err := NewWorker(nil, dummyWM, WithWorkerHooksJobLocked(hook.handler, hook.handler, hook.handler)) 255 | require.NoError(t, err) 256 | for _, h := range workerWithHooks.hooksJobLocked { 257 | h(ctx, nil, nil) 258 | } 259 | require.Equal(t, 3, hook.counter) 260 | } 261 | 262 | func TestWithWorkerHooksUnknownJobType(t *testing.T) { 263 | ctx := context.Background() 264 | hook := new(dummyHook) 265 | 266 | workerWOutHooks, err := NewWorker(nil, dummyWM) 267 | require.NoError(t, err) 268 | for _, h := range workerWOutHooks.hooksUnknownJobType { 269 | h(ctx, nil, nil) 270 | } 271 | require.Equal(t, 0, hook.counter) 272 | 273 | workerWithHooks, err := NewWorker(nil, dummyWM, WithWorkerHooksUnknownJobType(hook.handler, hook.handler, hook.handler)) 274 | require.NoError(t, err) 275 | for _, h := range workerWithHooks.hooksUnknownJobType { 276 | h(ctx, nil, nil) 277 | } 278 | require.Equal(t, 3, hook.counter) 279 | } 280 | 281 | func TestWithWorkerHooksJobDone(t *testing.T) { 282 | ctx := context.Background() 283 | hook := new(dummyHook) 284 | 285 | workerWOutHooks, err := NewWorker(nil, dummyWM) 286 | require.NoError(t, err) 287 | for _, h := range workerWOutHooks.hooksJobDone { 288 | h(ctx, nil, nil) 289 | } 290 | require.Equal(t, 0, hook.counter) 291 | 292 | workerWithHooks, err := NewWorker(nil, dummyWM, WithWorkerHooksJobDone(hook.handler, hook.handler, hook.handler)) 293 | require.NoError(t, err) 294 | for _, h := range workerWithHooks.hooksJobDone { 295 | h(ctx, nil, nil) 296 | } 297 | require.Equal(t, 3, hook.counter) 298 | } 299 | 300 | func TestWithWorkerHooksJobUndone(t *testing.T) { 301 | ctx := context.Background() 302 | hook := new(dummyHook) 303 | 304 | workerWOutHooks, err := NewWorker(nil, dummyWM) 305 | require.NoError(t, err) 306 | for _, h := range workerWOutHooks.hooksJobUndone { 307 | h(ctx, nil, nil) 308 | } 309 | require.Equal(t, 0, hook.counter) 310 | 311 | workerWithHooks, err := NewWorker(nil, dummyWM, WithWorkerHooksJobUndone(hook.handler, hook.handler, hook.handler)) 312 | require.NoError(t, err) 313 | for _, h := range workerWithHooks.hooksJobUndone { 314 | h(ctx, nil, nil) 315 | } 316 | require.Equal(t, 3, hook.counter) 317 | } 318 | 319 | func TestWithWorkerSpanWorkOneNoJob(t *testing.T) { 320 | workerWOutSpanWorkOneNoJob, err := NewWorker(nil, dummyWM) 321 | require.NoError(t, err) 322 | assert.False(t, workerWOutSpanWorkOneNoJob.spanWorkOneNoJob) 323 | 324 | workerWithSpanWorkOneNoJob, err := NewWorker(nil, dummyWM, WithWorkerSpanWorkOneNoJob(true)) 325 | require.NoError(t, err) 326 | assert.True(t, workerWithSpanWorkOneNoJob.spanWorkOneNoJob) 327 | } 328 | 329 | func TestWithWorkerJobTTL(t *testing.T) { 330 | workerWOutJobTTL, err := NewWorker(nil, dummyWM) 331 | require.NoError(t, err) 332 | assert.Equal(t, time.Duration(0), workerWOutJobTTL.jobTTL) 333 | 334 | workerWithJobTTL, err := NewWorker(nil, dummyWM, WithWorkerJobTTL(5*time.Minute)) 335 | require.NoError(t, err) 336 | assert.Equal(t, 5*time.Minute, workerWithJobTTL.jobTTL) 337 | } 338 | 339 | func TestWithPoolHooksJobLocked(t *testing.T) { 340 | ctx := context.Background() 341 | hook := new(dummyHook) 342 | 343 | poolWOutHooks, err := NewWorkerPool(nil, dummyWM, 3) 344 | require.NoError(t, err) 345 | for _, w := range poolWOutHooks.workers { 346 | for _, h := range w.hooksJobLocked { 347 | h(ctx, nil, nil) 348 | } 349 | } 350 | require.Equal(t, 0, hook.counter) 351 | 352 | poolWithHooks, err := NewWorkerPool(nil, dummyWM, 3, WithPoolHooksJobLocked(hook.handler, hook.handler, hook.handler)) 353 | require.NoError(t, err) 354 | for _, w := range poolWithHooks.workers { 355 | for _, h := range w.hooksJobLocked { 356 | h(ctx, nil, nil) 357 | } 358 | } 359 | require.Equal(t, 9, hook.counter) 360 | } 361 | 362 | func TestWithPoolHooksUnknownJobType(t *testing.T) { 363 | ctx := context.Background() 364 | hook := new(dummyHook) 365 | 366 | poolWOutHooks, err := NewWorkerPool(nil, dummyWM, 3) 367 | require.NoError(t, err) 368 | for _, w := range poolWOutHooks.workers { 369 | for _, h := range w.hooksUnknownJobType { 370 | h(ctx, nil, nil) 371 | } 372 | } 373 | require.Equal(t, 0, hook.counter) 374 | 375 | poolWithHooks, err := NewWorkerPool(nil, dummyWM, 3, WithPoolHooksUnknownJobType(hook.handler, hook.handler, hook.handler)) 376 | require.NoError(t, err) 377 | for _, w := range poolWithHooks.workers { 378 | for _, h := range w.hooksUnknownJobType { 379 | h(ctx, nil, nil) 380 | } 381 | } 382 | require.Equal(t, 9, hook.counter) 383 | } 384 | 385 | func TestWithPoolHooksJobDone(t *testing.T) { 386 | ctx := context.Background() 387 | hook := new(dummyHook) 388 | 389 | poolWOutHooks, err := NewWorkerPool(nil, dummyWM, 3) 390 | require.NoError(t, err) 391 | for _, w := range poolWOutHooks.workers { 392 | for _, h := range w.hooksJobDone { 393 | h(ctx, nil, nil) 394 | } 395 | } 396 | require.Equal(t, 0, hook.counter) 397 | 398 | poolWithHooks, err := NewWorkerPool(nil, dummyWM, 3, WithPoolHooksJobDone(hook.handler, hook.handler, hook.handler)) 399 | require.NoError(t, err) 400 | for _, w := range poolWithHooks.workers { 401 | for _, h := range w.hooksJobDone { 402 | h(ctx, nil, nil) 403 | } 404 | } 405 | require.Equal(t, 9, hook.counter) 406 | } 407 | 408 | func TestWithPoolHooksJobUndone(t *testing.T) { 409 | ctx := context.Background() 410 | hook := new(dummyHook) 411 | 412 | poolWOutHooks, err := NewWorkerPool(nil, dummyWM, 3) 413 | require.NoError(t, err) 414 | for _, w := range poolWOutHooks.workers { 415 | for _, h := range w.hooksJobUndone { 416 | h(ctx, nil, nil) 417 | } 418 | } 419 | require.Equal(t, 0, hook.counter) 420 | 421 | poolWithHooks, err := NewWorkerPool(nil, dummyWM, 3, WithPoolHooksJobUndone(hook.handler, hook.handler, hook.handler)) 422 | require.NoError(t, err) 423 | for _, w := range poolWithHooks.workers { 424 | for _, h := range w.hooksJobUndone { 425 | h(ctx, nil, nil) 426 | } 427 | } 428 | require.Equal(t, 9, hook.counter) 429 | } 430 | 431 | func TestWithPoolGracefulShutdown(t *testing.T) { 432 | poolWithNoGraceful, err := NewWorkerPool(nil, dummyWM, 5) 433 | require.NoError(t, err) 434 | assert.False(t, poolWithNoGraceful.graceful) 435 | assert.Nil(t, poolWithNoGraceful.gracefulCtx) 436 | for _, w := range poolWithNoGraceful.workers { 437 | assert.False(t, w.graceful) 438 | assert.Nil(t, w.gracefulCtx) 439 | } 440 | 441 | poolWithGracefulDefault, err := NewWorkerPool(nil, dummyWM, 5, WithPoolGracefulShutdown(nil)) 442 | require.NoError(t, err) 443 | assert.True(t, poolWithGracefulDefault.graceful) 444 | assert.Nil(t, poolWithGracefulDefault.gracefulCtx) 445 | for _, w := range poolWithGracefulDefault.workers { 446 | assert.True(t, w.graceful) 447 | assert.Nil(t, w.gracefulCtx) 448 | } 449 | 450 | ctx := context.WithValue(context.Background(), "foo", "bar") 451 | poolWithGracefulCtx, err := NewWorkerPool(nil, dummyWM, 5, WithPoolGracefulShutdown(func() context.Context { 452 | return ctx 453 | })) 454 | require.NoError(t, err) 455 | assert.True(t, poolWithGracefulCtx.graceful) 456 | assert.Same(t, ctx, poolWithGracefulCtx.gracefulCtx()) 457 | for _, w := range poolWithGracefulCtx.workers { 458 | assert.True(t, w.graceful) 459 | assert.Same(t, ctx, poolWithGracefulCtx.gracefulCtx()) 460 | } 461 | } 462 | 463 | func TestWithPoolPanicStackBufSize(t *testing.T) { 464 | poolWithDefaultSize, err := NewWorkerPool(nil, dummyWM, 2) 465 | require.NoError(t, err) 466 | assert.Equal(t, defaultPanicStackBufSize, poolWithDefaultSize.panicStackBufSize) 467 | for _, w := range poolWithDefaultSize.workers { 468 | assert.Equal(t, defaultPanicStackBufSize, w.panicStackBufSize) 469 | } 470 | 471 | poolWithCustomSize, err := NewWorkerPool(nil, dummyWM, 3, WithPoolPanicStackBufSize(12345)) 472 | require.NoError(t, err) 473 | assert.Equal(t, 12345, poolWithCustomSize.panicStackBufSize) 474 | for _, w := range poolWithCustomSize.workers { 475 | assert.Equal(t, 12345, w.panicStackBufSize) 476 | } 477 | } 478 | 479 | func TestWithPoolSpanWorkOneNoJob(t *testing.T) { 480 | poolWOutSpanWorkOneNoJob, err := NewWorkerPool(nil, dummyWM, 2) 481 | require.NoError(t, err) 482 | for _, w := range poolWOutSpanWorkOneNoJob.workers { 483 | assert.False(t, w.spanWorkOneNoJob) 484 | } 485 | 486 | poolWithSpanWorkOneNoJob, err := NewWorkerPool(nil, dummyWM, 2, WithPoolSpanWorkOneNoJob(true)) 487 | require.NoError(t, err) 488 | for _, w := range poolWithSpanWorkOneNoJob.workers { 489 | assert.True(t, w.spanWorkOneNoJob) 490 | } 491 | } 492 | 493 | func TestWithPoolJobTTL(t *testing.T) { 494 | poolWOutJobTTL, err := NewWorkerPool(nil, dummyWM, 2) 495 | require.NoError(t, err) 496 | assert.Equal(t, time.Duration(0), poolWOutJobTTL.jobTTL) 497 | for _, w := range poolWOutJobTTL.workers { 498 | assert.Equal(t, time.Duration(0), w.jobTTL) 499 | } 500 | 501 | poolWithJobTTL, err := NewWorkerPool(nil, dummyWM, 2, WithPoolJobTTL(10*time.Minute)) 502 | require.NoError(t, err) 503 | assert.Equal(t, 10*time.Minute, poolWithJobTTL.jobTTL) 504 | for _, w := range poolWithJobTTL.workers { 505 | assert.Equal(t, 10*time.Minute, w.jobTTL) 506 | } 507 | } 508 | 509 | func TestWithPoolUnknownJobWorkFunc(t *testing.T) { 510 | poolWithoutHandler, err := NewWorkerPool(nil, dummyWM, 2) 511 | require.NoError(t, err) 512 | for _, w := range poolWithoutHandler.workers { 513 | assert.Nil(t, w.unknownJobTypeWF) 514 | } 515 | 516 | var wfCalled int 517 | wf := WorkFunc(func(ctx context.Context, j *Job) error { 518 | wfCalled++ 519 | return nil 520 | }) 521 | 522 | poolWithHandler, err := NewWorkerPool(nil, dummyWM, 2, WithPoolUnknownJobWorkFunc(wf)) 523 | require.NoError(t, err) 524 | require.NotNil(t, poolWithHandler.unknownJobTypeWF) 525 | 526 | assert.Equal(t, 0, wfCalled) 527 | for _, w := range poolWithHandler.workers { 528 | require.NotNil(t, w.unknownJobTypeWF) 529 | 530 | err := w.unknownJobTypeWF(nil, nil) 531 | assert.NoError(t, err) 532 | } 533 | assert.Equal(t, 2, wfCalled) 534 | } 535 | --------------------------------------------------------------------------------