├── .github
└── workflows
│ └── tests.yaml
├── LICENSE
├── README.md
├── consumer.go
├── consumer_test.go
├── docs
├── consumer_loop.png
└── monitoring.png
├── errors.go
├── errors_test.go
├── example_consumer_test.go
├── example_publisher_test.go
├── examples_test.go
├── go.mod
├── go.sum
├── integtest
├── consumer_test.go
└── publisher_test.go
├── internal
├── pg
│ └── pg.go
├── query
│ ├── query_builder.go
│ └── query_builder_test.go
└── require
│ └── require.go
├── logo.png
├── logo.svg
├── message.go
├── message_test.go
├── publisher.go
├── publisher_test.go
├── validator.go
├── validator_test.go
└── x
└── schema
└── queue.go
/.github/workflows/tests.yaml:
--------------------------------------------------------------------------------
1 | name: test
2 |
3 | # Controls when the workflow will run
4 | on:
5 | # Triggers the workflow on push or pull request events but only for the "main" branch
6 | push:
7 | branches: [ "main" ]
8 | pull_request:
9 | branches: [ "main" ]
10 |
11 | # Allows you to run this workflow manually from the Actions tab
12 | workflow_dispatch:
13 |
14 | permissions:
15 | contents: write
16 |
17 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
18 | jobs:
19 | # This workflow contains a single job called "test"
20 | test:
21 | name: test
22 | strategy:
23 | matrix:
24 | # version must be string, otherwise it will be converted to float
25 | # and the trailing zero will be removed. 1.20 -> 1.2
26 | go-version: [ "1.22", "1.23", "1.24" ]
27 | postgres-version: [ 17, 16, 15, 14, 13, 12 ]
28 | # Ensure that all combinations of Go and Postgres versions will run
29 | continue-on-error: true
30 |
31 | # The type of runner that the job will run on
32 | runs-on: ubuntu-latest
33 |
34 | # Environment variables that are available to all jobs and steps in this workflow
35 | env:
36 | TEST_POSTGRES_DSN: postgres://postgres:postgres@127.0.0.1:5432/postgres?sslmode=disable
37 |
38 | # Services are Docker containers that are run during a job
39 | services:
40 | # Start the postgres database
41 | postgres:
42 | image: postgres:${{ matrix.postgres-version }}
43 | env:
44 | POSTGRES_USER: postgres
45 | POSTGRES_PASSWORD: postgres
46 | POSTGRES_DB: postgres
47 | ports:
48 | - 5432:5432
49 | # Set health checks to wait until postgres has started
50 | options: >-
51 | --health-cmd pg_isready
52 | --health-interval 10s
53 | --health-timeout 5s
54 | --health-retries 5
55 |
56 | # Steps represent a sequence of tasks that will be executed as part of the job
57 | steps:
58 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
59 | - name: checkout
60 | uses: actions/checkout@v4
61 |
62 | # Setting up Go in the runner
63 | - name: setup go
64 | uses: actions/setup-go@v5
65 | with:
66 | go-version: ${{ matrix.go-version }}
67 |
68 | # Disabled because of incompatibility with Matrix Strategy
69 | # # Cache Go build cache and modcache
70 | # - name: cache deps
71 | # uses: actions/cache@v3
72 | # with:
73 | # path: |
74 | # ~/.cache/go-build
75 | # ~/go/pkg/mod
76 | # key: ${{ runner.os }}}-go-${{ matrix.go-version }}-postgres-${{ matrix.postgres-version }}-${{ hashFiles('**/go.sum') }}
77 | # restore-keys: |
78 | # ${{ runner.os }}-go-${{ matrix.go-version }}-
79 |
80 | # Runs Go tests
81 | - name: test
82 | run: go test ./...
83 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PGQ - go queues on top of postgres
2 |
3 | [](https://pkg.go.dev/go.dataddo.com/pgq)
4 | [](https://goreportcard.com/report/go.dataddo.com/pgq)
5 |
6 |
7 |
8 | PGQ is a [Go](http://golang.org) package that provides a queuing mechanism for your Go applications.
9 | It is built on top of the postgres database and enables developers to implement efficient and reliable,
10 | but simple message queues for their services composed architecture using the familiar postgres infrastructure.
11 |
12 | ## Features
13 | - __Postgres__-backed: Leverages the power of SQL to store and manage queues.
14 | - __Reliable__: Guarantees message persistence and delivery, even if facing the various failures.
15 | - __Transactional__: Supports transactional message handling, ensuring consistency.
16 | - __Simple usage__: Provides a clean and easy-to-use API for interacting with the queue.
17 | - __Efficient__: Optimized for medium to long jobs durations.
18 | - __Scheduled Messages__: Schedule messages to be processed at a specific future time.
19 |
20 | ## Why PGQ?
21 | - __Postgres__: The `postgres` just works and is feature rich, scalable and performant.
22 | - __SQL__: You are already familiar with SQLm right? No need to learn anything new.
23 | - __Stack__: You do not have to maintain/manage/administer/patch any additional message broker component.
24 | - __Simplicity__: The `pgq` is a simple and straightforward solution for your messaging needs.
25 | - __Usability__: The `pgq` can be used for many scenarios.
26 |
27 | Of course, you can implement your own implementation or use other messaging technologies,
28 | but why would you do that when you can use the `postgres` which is already there and is a proven technology?
29 |
30 | See the benefits using pgq in the following video:
31 |
32 | [](https://www.youtube.com/watch?v=feJbKEAvBLk)
33 |
34 | ## When to pick PGQ?
35 |
36 | Even though there are other great technologies and tools for complex messaging including the robust routing configuration,
37 | sometimes you do not need it, and you can be just fine with the simpler tooling.
38 |
39 | Pick pgq if you:
40 | - need to distribute the traffic fairly among your app replicas
41 | - need to protect your services from overload
42 | - want the out-of-a-box observability of your queues
43 | - want to use SQL for managing your queues
44 | - already use `postgres` and you want to keep your tech stack simple
45 | - don't want to manage another technology and learn its specifics
46 | - need to schedule messages to be processed at a specific time
47 |
48 |
49 | No need to bring the new technology to your existing stack when you can be pretty satisfied with `postgres`.
50 | Write the consumers and publishers in various languages with the simple idea behind - __use postgres table as a queue__.
51 | While using `pgq` you have a superb observability of the queue.
52 | You can easily see the payloads of the messages waiting to be processed, but moreover payloads of both the currently being processed and already processed messages.
53 | You can get the processing results, duration and other statistics pretty simply.
54 | As the `pgq` queue table contains the records of already processed jobs too, you already have out of the box the historical statistics of all messages and can view it effortlessly by using simple SQL queries.
55 |
56 | Pgq is intended to replace the specialized message brokers in environments where you already use postgres, and you want clean, simple and straightforward communication among your services.
57 |
58 | ## Basic principles
59 | - Every `queue` is the single postgres `table`.
60 | - You maintain the table on your own. You can extend it as you need.
61 | - Publishers add new rows to the queue table.
62 | - Consumers update the pgq mandatory fields of the rows in the queue table.
63 |
64 | ## Installation
65 | To install PGQ, use the go get command:
66 | ```
67 | go get go.dataddo.com/pgq@latest
68 | ```
69 |
70 | ## Setup
71 | Prerequisites:
72 |
73 | In order to make the `pgq` functional, there must exist the `postgres table` with all the necessary `pgq` fields.
74 | You can create the table on your own with classic `CREATE TABLE ...`, or you can use the query generator to generate the query for you.
75 | The generated query creates the queue table alongside with indexes which improve the consumer queries performance.
76 |
77 | You usually run the setup commands just once during the queue setup.
78 |
79 | ```go
80 | package main
81 |
82 | import (
83 | "fmt"
84 | "go.dataddo.com/pgq/x/schema"
85 | )
86 |
87 | func main() {
88 | queueName := "my_queue"
89 |
90 | // create string contains the "CREATE TABLE queueName ..."
91 | // which you may use for table and indexes creation.
92 | create := schema.GenerateCreateTableQuery(queueName)
93 | fmt.Println(create)
94 |
95 | // You may also use the "GenerateDropTableQuery" for dropping all the pgq artifacts (down migration)
96 | }
97 | ```
98 |
99 | ## Usage
100 |
101 | ### Publishing the message
102 |
103 | ```go
104 | package main
105 |
106 | import (
107 | "context"
108 | "database/sql"
109 | "encoding/json"
110 | "fmt"
111 |
112 | _ "github.com/jackc/pgx/v4/stdlib"
113 |
114 | "go.dataddo.com/pgq"
115 | )
116 |
117 | func main() {
118 | postgresDSN := "your_postgres_dsn"
119 | queueName := "your_queue_name"
120 |
121 | // create a new postgres connection
122 | db, err := sql.Open("pgx", postgresDSN)
123 | if err != nil {
124 | panic(err.Error())
125 | }
126 | defer db.Close()
127 |
128 | // create the publisher which may be reused for multiple messages
129 | // you may pass the optional PublisherOptions when creating it
130 | publisher := pgq.NewPublisher(db)
131 |
132 | // publish the message to the queue
133 | // provide the payload which is the JSON object
134 | // and optional metadata which is the map[string]string
135 | msg := &pgq.MessageOutgoing{
136 | Payload: json.RawMessage(`{"foo":"bar"}`),
137 | }
138 | msgId, err := publisher.Publish(context.Background(), queueName, msg)
139 | if err != nil {
140 | panic(err.Error())
141 | }
142 |
143 | fmt.Println("Message published with ID:", msgId)
144 | }
145 |
146 | ```
147 |
148 | After the message is successfully published, you can see the new row with given `msgId` in the queue table.
149 |
150 | ### Publisher options
151 |
152 | Very often you want some metadata to be part of the message, so you can filter the messages in the queue table by it.
153 | Metadata can be any additional information you think is worth to be part of the message, but you do not want to be part of the payload.
154 | It can be the publisher app name/version, payload schema version, customer identifiers etc etc.
155 |
156 | You can simply attach the metadata to single message by:
157 | ```go
158 | metadata := pgq.Metadata{
159 | "publisherHost": "localhost",
160 | "payloadVersion": "v1.0"
161 | }
162 | ```
163 |
164 | or you can configure the `publisher` to attach the metadata to all messages it publishes:
165 | ```go
166 | opts := []pgq.PublisherOption{
167 | pgq.WithMetaInjectors(
168 | pgq.StaticMetaInjector(
169 | pgq.Metadata{
170 | "publisherHost": "localhost",
171 | "publisherVersion": "commitRSA"
172 | }
173 | ),
174 | ),
175 | },
176 |
177 | publisher := pgq.NewPublisher(db, opts)
178 | metadata := pgq.Metadata{
179 | "payloadVersion": "v1.0" // message specific meta field
180 | }
181 | ```
182 |
183 | ### Consuming the messages
184 |
185 | ```go
186 | package main
187 |
188 | import (
189 | "context"
190 | "database/sql"
191 | "encoding/json"
192 | "fmt"
193 | "go.dataddo.com/pgq"
194 | _ "github.com/jackc/pgx/v4/stdlib"
195 | )
196 |
197 | func main() {
198 | postgresDSN := "your_postgres_dsn"
199 | queueName := "your_queue_name"
200 |
201 | // create a new postgres connection and publisher
202 | db, err := sql.Open("pgx", postgresDSN)
203 | if err != nil {
204 | panic(err.Error())
205 | }
206 | defer db.Close()
207 |
208 | // create the consumer which gets attached to handling function we defined above
209 | h := &handler{}
210 | consumer, err := pgq.NewConsumer(db, queueName, h)
211 | if err != nil {
212 | panic(err.Error())
213 | }
214 |
215 | err = consumer.Run(context.Background())
216 | if err != nil {
217 | panic(err.Error())
218 | }
219 | }
220 |
221 | // we must specify the message handler, which implements simple interface
222 | type handler struct {}
223 | func (h *handler) HandleMessage(_ context.Context, msg *pgq.MessageIncoming) (processed bool, err error) {
224 | fmt.Println("Message payload:", string(msg.Payload))
225 | return true, nil
226 | }
227 | ```
228 |
229 | ### Consumer options
230 |
231 | You can configure the consumer by passing the optional `ConsumeOptions` when creating it.
232 |
233 | | Option | Description |
234 | |:-----------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
235 | | WithLogger | Provide your own `*slog.Logger` to have the pgq logs under control |
236 | | WithMaxParallelMessages | Set how many consumers you want to run concurently in your app. |
237 | | WithLockDuration | You can set your own locks effective duration according to your needs `time.Duration`. If you handle messages quickly, set the duration in seconds/minutes. If you play with long-duration jobs it makes sense to set this to bigger value than your longest job takes to be processed. |
238 | | WithPollingInterval | Defines the frequency of asking postgres table for the new message `[time.Duration]`. |
239 | | WithInvalidMessageCallback | Handle the invalid messages which may appear in the queue. You may re-publish it to some junk queue etc. |
240 | | WithHistoryLimit | how far in the history you want to search for messages in the queue. Sometimes you want to ignore messages created days ago even though the are unprocessed. |
241 | | WithMetrics | No problem to attach your own metrics provider (prometheus, ...) here. |
242 | | WithMetadataFilter | Allows to filter consumed message. At this point OpEqual and OpNotEqual are supported |
243 |
244 | ```go
245 | consumer, err := NewConsumer(db, queueName, handler,
246 | WithLogger(slog.New(slog.NewTextHandler(&tbWriter{tb: t}, &slog.HandlerOptions{Level: slog.LevelDebug}))),
247 | WithLockDuration(10 * time.Minute),
248 | WithPollingInterval(2 * time.Second),
249 | WithMaxParallelMessages(1),
250 | WithMetrics(noop.Meter{}),
251 | )
252 | ```
253 |
254 | For more detailed usage examples and API documentation, please refer to the Dataddo pgq GoDoc page.
255 |
256 | ## Message
257 |
258 | The message is the essential structure for communication between services using `pgq`.
259 | The message struct matches the postgres table schema. You can modify the table structure on your own by adding extra columns, but `pgq` depends on following mandatory fields only:
260 |
261 | | Field | Description |
262 | |:-----------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
263 | | `id` | The unique ID of the message in the db. |
264 | | `payload` | User's custom message content in JSON format. |
265 | | `metadata` | User's custom metadata about the message in JSON format so your `payload` remains cleansed from unnecessary data. This is the good place where to put information like the `publisher` app name, payload schema version, customer related information for easier debugging etc. |
266 | | `created_at` | The timestamp when the record in db was created (message received to the queue). |
267 | | `started_at` | Timestamp indicating when the consumer started to process the message. |
268 | | `scheduled_for` | Timestamp to delay message processing until a specific time. If NULL, the message is processed immediately. |
269 | | `locked_until` | Contains the consumer lock validity timestamp. If this field is set and has not expired yet, no other consumer can process the message. |
270 | | `processed_at` | Timestamp when the message was processed (either success or failure). |
271 | | `error_detail` | The reason why the processing of the message failed provided by the consumer. `NULL` means no error. |
272 | | `consumed_count` | The integer incremented by consumer retries is preventing the consumption of the message which can cause infinite processing loops because of OOM errors etc. |
273 |
274 | ## Handy pgq SQL queries
275 |
276 | ### Queue size
277 | Get the messages waiting in the queue to be fetched by consumer.
278 | ```sql
279 | select * from queue_name where processed_at is null and locked_until is null;
280 | ```
281 | Get the number of messages waiting in the queue. A good candidate for the queue length metric in your monitoring system.
282 | ```sql
283 | select count(*) from queue_name where processed_at is null and locked_until is null;
284 | ```
285 |
286 | ### Messages currently being processed
287 | Get the messages being processed at the moment.
288 | ```sql
289 | select * from queue_name where processed_at is null and locked_until is null;
290 | ```
291 | Get the number of messages currently being processed. Another good candidate for metric in your monitoring system.
292 | ```sql
293 | select count(*) from queue_name where processed_at is null and locked_until is null;
294 | ```
295 |
296 | _Tip: You can use the `pgq` table as a source for your monitoring system and enable alerting for suspicious values. It is usually good not to monitor only the peak size but also the empty queues, which may indicate some troubles on publishers side._
297 |
298 |
299 | ### Processed messages
300 | The messages which have already been successfully processed.
301 | ```sql
302 | select * from queue_name where processed_at is not null and error_detail is null;
303 | ```
304 | The messages which have already been processed, but ended with an error.
305 | ```sql
306 | select * from queue_name where processed_at is not null and error_detail is not null;
307 | ```
308 |
309 | ### Other useful queries
310 | ```sql
311 | -- messages created in last 1 day which have not been processed yet
312 | select * from queue_name where processed_at is null and created_at > NOW() - INTERVAL '1 DAY';
313 |
314 | -- messages causing unexpected failures of consumers (ususally OOM)
315 | select * from queue_name where consumed_count > 1;
316 |
317 | -- top 10 slowest processed messages
318 | select id, queue_name - started_at as duration from extractor_input where processed_at is not null and started_at is not null order by duration desc limit 10;
319 | ```
320 | ## Under the hood
321 |
322 | The pgq internally uses the classic `UPDATE` + `SELECT ... FOR UPDATE` postgres statement which creates the transactional lock for the selected rows in the postgres table and enables the table to behave like the queue.
323 | The Select statement is using the `SKIP LOCKED` clause
324 | which enables the consumer to fetch the messages in the queue in the order they were created, and doesn't get stuck on the locked rows.
325 |
326 | ### Consumer loop
327 |
328 |
329 | Consumers periodically ask the queue table for the new messages to be processed.
330 |
331 | - When there is no message to be processed, consumer idles for a `polling interval` duration and then tries again.
332 | - When the consumer finds the message to be processed, it locks it by updating the `locked_until` field with the `lock duration` timestamp.
333 | - If the consumer fails to update the `locked_until` field, it means that another consumer has already locked the message, and the current consumer tries to find the message again.
334 | - If the consumer successfully locks the message, it starts to process it.
335 | - When the consumer finishes the processing, it updates the `processed_at` field with the current timestamp.
336 |
337 |
338 | ## Optimizing performance
339 |
340 | When using the pgq in production environment you should focus on the following areas to improve the performance:
341 | ### Queue table Indexes
342 | Having indexes on the fields which are used for sending is the essential key for the good performance.
343 | When postgres lacks the indexes, it can very negatively influence the performance of searching of the queue, which may lead to slowing down the whole database instance.
344 |
345 | Each queue table should have at least the following indexes:
346 | ```sql
347 | CREATE INDEX IDX_CREATED_AT ON my_queue_name (created_at);
348 | CREATE INDEX IDX_PROCESSED_AT_CONSUMED_COUNT ON my_queue_name (consumed_count, processed_at) WHERE (processed_at IS NULL);
349 | ```
350 | These indexes are automatically part of the output query of the `GenerateCreateTableQuery` function.
351 | But if you create tables on your own, please make sure you have them.
352 |
353 | ### Queue table partitioning
354 |
355 | Usually you do not need to keep the full history of the queue table in the database for months back.
356 | You may delete such rows with the `DELETE` command in some cron jobs, but do not forget that DELETE is a very expensive operation in postgres,
357 | and it may affect insertions and updates in the table.
358 |
359 | The better solution is to use the __postgres table partitioning__.
360 | The easiest way how to set up the partitioning is to use the `pg_partman` postgres extension.
361 |
362 | If the query returns 0, you need to install the extension first, otherwise you're ready to partition.
363 | ```sql
364 | SELECT count(name) FROM pg_available_extensions where name = 'pg_partman';
365 | ```
366 |
367 | 1. we create the `template table` to be used for creation of new partitions:
368 |
369 | The template table must have exactly the same structure as the original queue, and it has the `_template` name suffix.
370 | It must also contain the indexes so the partition derived tables have it too.
371 | ```sql
372 | CREATE TABLE my_queue_name_template (id UUID NOT NULL DEFAULT gen_random_uuid(), created_at TIMESTAMP(0) WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL, payload JSONB DEFAULT NULL, metadata JSONB DEFAULT NULL, locked_until TIMESTAMP(0) WITH TIME ZONE DEFAULT NULL, processed_at TIMESTAMP(0) WITH TIME ZONE DEFAULT NULL, error_detail TEXT DEFAULT NULL, started_at TIMESTAMP(0) WITH TIME ZONE DEFAULT NULL, consumed_count INT DEFAULT 0 NOT NULL, PRIMARY KEY(id, created_at));
373 | CREATE INDEX IDX_CREATED_AT_TPL ON my_queue_name_template (created_at);
374 | CREATE INDEX IDX_PROCESED_AT_TPL ON my_queue_name_template (consumed_count, processed_at) WHERE (processed_at IS NULL);
375 | ```
376 |
377 | 2. we create the partitioned table with the same structure as the template table, but with the partitioning key:
378 | ```sql
379 | -- DROP the table if it already exists
380 | DROP table IF EXISTS my_queue_name;
381 | -- and let it be created like partman does it. This is the default queue to be used when no partitioned one is matched
382 | CREATE TABLE IF NOT EXISTS my_queue_name
383 | (LIKE my_queue_name_template INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS) PARTITION BY RANGE (created_at);
384 | ```
385 | 3. we instruct partman to create the partitions every day automatically:
386 | ```sql
387 | SELECT partman.create_parent('my_queue_name', 'created_at', 'native', 'daily', p_template_table := 'my_queue_name_template';
388 | ```
389 |
390 | 4. we configure partman how to rotate the tables setting the 14 days retention period:
391 | ```sql
392 | UPDATE partman.part_config
393 | SET infinite_time_partitions = true,
394 | retention = '14 days',
395 | retention_keep_table = false,
396 | retention_keep_index = false
397 | WHERE parent_table = 'my_queue_name';
398 | ```
399 |
400 | ## Contribution
401 |
402 | We are open to any contribution to the pgq package, but since we use it in our production environment, we have to be very careful about the changes.
403 | We don't need to add any new features, but we are open to any bug fixes, performance improvements, and documentation enhancements.
404 |
405 | ### Run integration tests
406 |
407 | The unit tests will run without any additional setup, but the integration tests require the running postgres instance, otherwise are skipped.
408 |
409 | In one shell start the postgres docker container:
410 | ```shell
411 | docker run --rm -it -e POSTGRES_PASSWORD=postgres -p 5432:5432 postgres:15-alpine
412 | ```
413 |
414 | In another shell run the tests:
415 | ```shell
416 | TEST_POSTGRES_DSN=postgres://postgres:postgres@localhost:5432/postgres go test ./...
417 | ```
418 |
--------------------------------------------------------------------------------
/consumer.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "database/sql"
7 | "encoding/json"
8 | "fmt"
9 | "io"
10 | "log/slog"
11 | "math"
12 | "strings"
13 | "sync"
14 | "time"
15 | "unicode"
16 |
17 | "github.com/google/uuid"
18 | "github.com/jackc/pgtype"
19 | "github.com/jmoiron/sqlx"
20 | "github.com/pkg/errors"
21 | "go.opentelemetry.io/otel"
22 | "go.opentelemetry.io/otel/attribute"
23 | "go.opentelemetry.io/otel/codes"
24 | "go.opentelemetry.io/otel/metric"
25 | "go.opentelemetry.io/otel/metric/noop"
26 | "go.opentelemetry.io/otel/propagation"
27 | "golang.org/x/sync/semaphore"
28 |
29 | "go.dataddo.com/pgq/internal/pg"
30 | "go.dataddo.com/pgq/internal/query"
31 | )
32 |
33 | type fatalError struct {
34 | Err error
35 | }
36 |
37 | func (e fatalError) Error() string {
38 | return fmt.Sprintf("pgq consumer fatal error: %v", e.Err)
39 | }
40 |
41 | func (e fatalError) Unwrap() error { return e.Err }
42 |
43 | const (
44 | // MessageProcessed signals that message was processed and shouldn't be processed
45 | // again. If processed with an error, it is expected permanent, and new run would
46 | // result in the same error.
47 | MessageProcessed = true
48 | // MessageNotProcessed signals that message wasn't processed and can be processed
49 | // again. The error interrupting processing is considered temporary.
50 | MessageNotProcessed = false
51 | )
52 |
53 | // MessageHandler handles message received from queue. Returning false means
54 | // message wasn't processed correctly and shouldn't be acknowledged. Error
55 | // contains additional information.
56 | //
57 | // Possible combinations:
58 | //
59 | // // | processed | err | description |
60 | // // | --------- | ---------- | ---------------------------------------------------- |
61 | // // | false | | missing failure info, but the message can be retried |
62 | // // | false | some error | not processed, because of some error, can be retried |
63 | // // | true | | processed, no error. |
64 | // // | true | some error | processed, ended with error. Don't retry! |
65 | type MessageHandler interface {
66 | HandleMessage(context.Context, *MessageIncoming) (processed bool, err error)
67 | }
68 |
69 | // MessageHandlerFunc is MessageHandler implementation by simple function.
70 | type MessageHandlerFunc func(context.Context, *MessageIncoming) (processed bool, err error)
71 |
72 | // HandleMessage calls self. It also implements MessageHandler interface.
73 | func (fn MessageHandlerFunc) HandleMessage(ctx context.Context, msg *MessageIncoming) (processed bool, err error) {
74 | return fn(ctx, msg)
75 | }
76 |
77 | // consumerConfig contains consumer configuration.
78 | type consumerConfig struct {
79 | // LockDuration is the maximal duration for how long the message remains locked for other consumers.
80 | LockDuration time.Duration
81 | // PollingInterval defines how frequently consumer checks the queue for new messages.
82 | PollingInterval time.Duration
83 | // AckTimeout is the timeout for updating the message status when message is processed.
84 | AckTimeout time.Duration
85 | // MaxParallelMessages sets how many jobs can single consumer process simultaneously.
86 | MaxParallelMessages int
87 | // Metrics define prometheus parameters.
88 | Metrics metric.Meter
89 | // InvalidMessageCallback defines what should happen to messages which are identified as invalid.
90 | // Such messages usually have missing or malformed required fields.
91 | InvalidMessageCallback InvalidMessageCallback
92 | // HistoryLimit means how long in history you want to search for unprocessed messages
93 | // If not set, it will look for message in the whole table.
94 | // You may set this value when using partitioned table to search just in partitions you are interested in
95 | HistoryLimit time.Duration
96 | // MaxConsumeCount is the maximal number of times a message can be consumed before it is ignored.
97 | // This is a safety mechanism to prevent failure infinite loops when a message causes unhandled panic, OOM etc.
98 | MaxConsumeCount uint
99 |
100 | // MsgProcessingReserveDuration is the duration for which the message is reserved for handling result state.
101 | MessageProcessingReserveDuration time.Duration
102 |
103 | MetadataFilters []MetadataFilter
104 |
105 | // FiniteConsumption, when true it runs until no messages can be consumed
106 | FiniteConsumption bool
107 |
108 | Logger *slog.Logger
109 | }
110 |
111 | var noopLogger = slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.Level(math.MaxInt)}))
112 |
113 | var defaultConsumerConfig = consumerConfig{
114 | LockDuration: time.Hour,
115 | PollingInterval: 5 * time.Second,
116 | AckTimeout: 1 * time.Second,
117 | MessageProcessingReserveDuration: 1 * time.Second,
118 | MaxParallelMessages: 1,
119 | InvalidMessageCallback: func(context.Context, InvalidMessage, error) {},
120 | Metrics: noop.Meter{},
121 | MaxConsumeCount: 3,
122 | Logger: noopLogger,
123 | }
124 |
125 | // InvalidMessageCallback defines what should happen to messages which are identified as invalid.
126 | // Such messages usually have missing or malformed required fields.
127 | type InvalidMessageCallback func(ctx context.Context, msg InvalidMessage, err error)
128 |
129 | // InvalidMessage is definition of invalid message used as argument for InvalidMessageCallback by Consumer.
130 | type InvalidMessage struct {
131 | ID string
132 | Metadata json.RawMessage
133 | Payload json.RawMessage
134 | }
135 |
136 | // Consumer is the preconfigured subscriber of the write input messages
137 | type Consumer struct {
138 | db *sqlx.DB
139 | queueName string
140 | cfg consumerConfig
141 | handler MessageHandler
142 | metrics *metrics
143 | sem *semaphore.Weighted
144 | }
145 |
146 | // ConsumerOption applies option to consumerConfig.
147 | type ConsumerOption func(c *consumerConfig)
148 |
149 | // WithLockDuration sets the maximal duration for how long the message remains
150 | // locked for other consumers.
151 | func WithLockDuration(d time.Duration) ConsumerOption {
152 | return func(c *consumerConfig) {
153 | c.LockDuration = d
154 | }
155 | }
156 |
157 | // WithPollingInterval sets how frequently consumer checks the queue for new
158 | // messages.
159 | func WithPollingInterval(d time.Duration) ConsumerOption {
160 | return func(c *consumerConfig) {
161 | c.PollingInterval = d
162 | }
163 | }
164 |
165 | // WithAckTimeout sets the timeout for updating the message status when message
166 | // is processed.
167 | func WithAckTimeout(d time.Duration) ConsumerOption {
168 | return func(c *consumerConfig) {
169 | c.AckTimeout = d
170 | }
171 | }
172 |
173 | // WithMaxParallelMessages sets how many jobs can single consumer process
174 | // simultaneously.
175 | func WithMaxParallelMessages(n int) ConsumerOption {
176 | return func(c *consumerConfig) {
177 | c.MaxParallelMessages = n
178 | }
179 | }
180 |
181 | // WithMetrics sets metrics meter. Default is noop.Meter{}.
182 | func WithMetrics(m metric.Meter) ConsumerOption {
183 | return func(c *consumerConfig) {
184 | c.Metrics = m
185 | }
186 | }
187 |
188 | // WithMessageProcessingReserveDuration sets the duration for which the message is reserved for handling result state.
189 | func WithMessageProcessingReserveDuration(d time.Duration) ConsumerOption {
190 | return func(c *consumerConfig) {
191 | c.MessageProcessingReserveDuration = d
192 | }
193 | }
194 |
195 | // WithInvalidMessageCallback sets callback for invalid messages.
196 | func WithInvalidMessageCallback(fn InvalidMessageCallback) ConsumerOption {
197 | return func(c *consumerConfig) {
198 | c.InvalidMessageCallback = fn
199 | }
200 | }
201 |
202 | // WithHistoryLimit sets how long in history you want to search for unprocessed
203 | // messages (default is no limit). If not set, it will look for message in the
204 | // whole table. You may set this value when using partitioned table to search
205 | // just in partitions you are interested in.
206 | func WithHistoryLimit(d time.Duration) ConsumerOption {
207 | return func(c *consumerConfig) {
208 | c.HistoryLimit = d
209 | }
210 | }
211 |
212 | // WithMaxConsumeCount sets the maximal number of times a message can be consumed before it is ignored.
213 | // Unhandled SIGKILL or uncaught panic, OOM error etc. could lead to consumer failure infinite loop.
214 | // Setting this value to greater than 0 will prevent happening this loop.
215 | // Setting this to value 0 disableS this safe mechanism.
216 | func WithMaxConsumeCount(max uint) ConsumerOption {
217 | return func(c *consumerConfig) {
218 | c.MaxConsumeCount = max
219 | }
220 | }
221 |
222 | // WithLogger sets logger. Default is no logging.
223 | func WithLogger(logger *slog.Logger) ConsumerOption {
224 | return func(c *consumerConfig) {
225 | c.Logger = logger
226 | }
227 | }
228 |
229 | // WithStopOnEmptyQueue sets whether the consumer should run until no messages can be consumed.
230 | func WithStopOnEmptyQueue(finiteConsumption bool) ConsumerOption {
231 | return func(c *consumerConfig) {
232 | c.FiniteConsumption = finiteConsumption
233 | }
234 | }
235 |
236 | // MetadataFilter is a filter for metadata. Right now support only direct matching of key/value
237 | type (
238 | MetadataOperation string
239 |
240 | MetadataFilter struct {
241 | Key string
242 | Operation MetadataOperation
243 | Value string
244 | }
245 | )
246 |
247 | const (
248 | OpEqual MetadataOperation = "="
249 | OpNotEqual MetadataOperation = "<>"
250 | )
251 |
252 | func WithMetadataFilter(filter *MetadataFilter) ConsumerOption {
253 | return func(c *consumerConfig) {
254 | filters := c.MetadataFilters
255 | if filter == nil {
256 | filters = make([]MetadataFilter, 0, 1)
257 | }
258 |
259 | c.MetadataFilters = append(filters, *filter)
260 | }
261 | }
262 |
263 | // NewConsumer creates Consumer with proper settings
264 | func NewConsumer(db *sql.DB, queueName string, handler MessageHandler, opts ...ConsumerOption) (*Consumer, error) {
265 | return NewConsumerExt(sqlx.NewDb(db, "pgx"), queueName, handler, opts...)
266 | }
267 |
268 | // NewConsumer creates Consumer with proper settings, using sqlx.DB (until refactored to use pgx directly)
269 | func NewConsumerExt(db *sqlx.DB, queueName string, handler MessageHandler, opts ...ConsumerOption) (*Consumer, error) {
270 | config := defaultConsumerConfig
271 | for _, opt := range opts {
272 | opt(&config)
273 | }
274 | metrics, err := prepareProcessMetric(queueName, config.Metrics)
275 | if err != nil {
276 | return nil, errors.Wrap(err, "registering metrics")
277 | }
278 | sem := semaphore.NewWeighted(int64(config.MaxParallelMessages))
279 | return &Consumer{
280 | db: db,
281 | queueName: queueName,
282 | cfg: config,
283 | handler: handler,
284 | metrics: metrics,
285 | sem: sem,
286 | }, nil
287 | }
288 |
289 | type metrics struct {
290 | jobsCounter metric.Int64Counter
291 | failedProcessingCounter metric.Int64Counter
292 | }
293 |
294 | func prepareProcessMetric(queueName string, meter metric.Meter) (*metrics, error) {
295 | queueName = strings.ReplaceAll(queueName, "/", "_")
296 | // '_total' suffix is added to all counters by default by OpenTelemetry.
297 | jobsCounter, err := meter.Int64Counter(
298 | fmt.Sprintf("pgq_%s_processed_jobs", queueName),
299 | metric.WithDescription("Total number of processed jobs. The label 'resolution' says how the job was handled."),
300 | )
301 | if err != nil {
302 | return nil, errors.WithStack(err)
303 | }
304 | failedProcessingCounter, err := meter.Int64Counter(
305 | fmt.Sprintf("pgq_%s_failed_job_processing", queueName),
306 | metric.WithDescription("Total number of errors during marking a job as processed. Example is a failed job ACK. This metric signals a chance of inconsistencies in the queue."),
307 | )
308 | if err != nil {
309 | return nil, errors.WithStack(err)
310 | }
311 | return &metrics{
312 | jobsCounter: jobsCounter,
313 | failedProcessingCounter: failedProcessingCounter,
314 | }, nil
315 | }
316 |
317 | // Run consumes messages until the context isn't cancelled.
318 | func (c *Consumer) Run(ctx context.Context) error {
319 | c.cfg.Logger.InfoContext(ctx, "starting consumption...",
320 | "inputQueue", c.queueName,
321 | )
322 | if err := c.verifyTable(ctx); err != nil {
323 | return errors.Wrap(err, "verifying table")
324 | }
325 | query, err := c.generateQuery()
326 | if err != nil {
327 | return errors.Wrap(err, "generating query")
328 | }
329 |
330 | var wg sync.WaitGroup
331 | defer wg.Wait() // wait for handlers to finish
332 | for {
333 | msgs, err := c.consumeMessages(ctx, query)
334 | if err != nil {
335 | if errors.Is(err, io.EOF) {
336 | return io.EOF
337 | }
338 | if errors.As(err, &fatalError{}) {
339 | return errors.Wrapf(err, "consuming from PostgreSQL queue %s", c.queueName)
340 | }
341 | c.cfg.Logger.InfoContext(ctx, "pgq: consume failed, will retry",
342 | "error", err,
343 | )
344 | continue
345 | }
346 | wg.Add(len(msgs))
347 | for _, msg := range msgs {
348 | go func(msg *MessageIncoming) {
349 | defer wg.Done()
350 | defer c.sem.Release(1)
351 | c.handleMessage(ctx, msg)
352 | }(msg)
353 | }
354 | }
355 | }
356 |
357 | func (c *Consumer) verifyTable(ctx context.Context) error {
358 | // --- (1) ----
359 | // Validate the queue mandatory fields
360 | err := ValidateFields(ctx, c.db, c.queueName)
361 | if err != nil {
362 | return errors.Wrap(err, "error validating queue mandatory fields")
363 | }
364 |
365 | // --- (2) ----
366 | // Validate the queue mandatory indexes
367 | err = ValidateIndexes(ctx, c.db, c.queueName)
368 | if err != nil {
369 | return errors.Wrap(err, "error validating queue mandatory indexes")
370 | }
371 |
372 | return nil
373 | }
374 |
375 | func (c *Consumer) generateQuery() (*query.Builder, error) {
376 | qb := query.NewBuilder()
377 |
378 | qb.WriteString(`UPDATE `)
379 | qb.WriteString(pg.QuoteIdentifier(c.queueName))
380 | qb.WriteString(` SET locked_until = :locked_until`)
381 | qb.WriteString(`, started_at = CURRENT_TIMESTAMP`)
382 | qb.WriteString(`, consumed_count = consumed_count+1`)
383 | qb.WriteString(` WHERE id IN (`)
384 | {
385 | qb.WriteString(`SELECT id FROM `)
386 | qb.WriteString(pg.QuoteIdentifier(c.queueName))
387 | qb.WriteString(` WHERE`)
388 | if c.cfg.HistoryLimit > 0 {
389 | qb.WriteString(` created_at >= CURRENT_TIMESTAMP - CAST((:history_limit) AS interval) AND`)
390 | qb.WriteString(` created_at < CURRENT_TIMESTAMP AND`)
391 | }
392 | qb.WriteString(` (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP)`)
393 | if c.cfg.MaxConsumeCount > 0 {
394 | qb.WriteString(` AND consumed_count < :max_consume_count`)
395 | }
396 |
397 | for i, filter := range c.cfg.MetadataFilters {
398 | if len(filter.Operation) == 0 {
399 | return nil, fatalError{Err: fmt.Errorf("metadata filter operation is empty")}
400 | }
401 |
402 | qb.WriteString(fmt.Sprintf(" AND metadata->>:metadata_key_%d %s :metadata_value_%d", i, filter.Operation, i))
403 | }
404 |
405 | qb.WriteString(` AND processed_at IS NULL`)
406 | qb.WriteString(` AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP)`)
407 | // prioritize scheduled messages and messages with lower consumed_count
408 | qb.WriteString(` ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC`)
409 | qb.WriteString(` LIMIT :limit`)
410 | qb.WriteString(` FOR UPDATE SKIP LOCKED`)
411 | }
412 | qb.WriteString(`) RETURNING id, payload, metadata, consumed_count, locked_until`)
413 |
414 | return qb, nil
415 | }
416 |
417 | func (c *Consumer) handleMessage(ctx context.Context, msg *MessageIncoming) {
418 | ctx, msg.cancelCtx = context.WithDeadline(ctx, msg.Deadline)
419 | defer msg.cancelCtx()
420 |
421 | ctxTimeout, cancel := prepareCtxTimeout()
422 | defer cancel()
423 | // TODO configurable Propagator
424 | propagator := otel.GetTextMapPropagator()
425 | carrier := propagation.MapCarrier(msg.Metadata)
426 | ctx = propagator.Extract(ctx, carrier)
427 |
428 | ctx, span := otel.Tracer("pgq").Start(ctx, "HandleMessage")
429 | defer span.End()
430 | span.SetAttributes(
431 | attribute.String("messageId", msg.id.String()),
432 | attribute.String("queueName", c.queueName),
433 | )
434 |
435 | processed, err := c.handler.HandleMessage(ctx, msg)
436 | if !processed {
437 | reason := "unknown"
438 | if err != nil {
439 | span.RecordError(err)
440 | reason = err.Error()
441 | }
442 | span.SetStatus(codes.Ok, "Message Nacked")
443 | if err := msg.nack(ctxTimeout(c.cfg.AckTimeout), reason); err != nil {
444 | c.cfg.Logger.ErrorContext(ctx, "pgq: nack failed",
445 | "error", err.Error(),
446 | "ackTimeout", c.cfg.AckTimeout,
447 | "reason", reason,
448 | "msg.metadata", msg.Metadata,
449 | )
450 | }
451 | return
452 | }
453 | if err != nil {
454 | span.RecordError(err)
455 | span.SetStatus(codes.Error, "Message Discarded")
456 | discardReason := err.Error()
457 | if err := msg.discard(ctxTimeout(c.cfg.AckTimeout), discardReason); err != nil {
458 | c.cfg.Logger.ErrorContext(ctx, "pgq: discard failed",
459 | "error", err,
460 | "ackTimeout", c.cfg.AckTimeout,
461 | "reason", discardReason,
462 | "msg.metadata", msg.Metadata,
463 | )
464 | }
465 | return
466 | }
467 | span.SetStatus(codes.Ok, "Message Acked")
468 | if err := msg.ack(ctxTimeout(c.cfg.AckTimeout)); err != nil {
469 | c.cfg.Logger.ErrorContext(ctx, "pgq: ack failed",
470 | "error", err,
471 | "ackTimeout", c.cfg.AckTimeout,
472 | "msg.metadata", msg.Metadata,
473 | )
474 | }
475 | }
476 |
477 | func prepareCtxTimeout() (func(td time.Duration) context.Context, context.CancelFunc) {
478 | parent, cancel := context.WithCancel(context.Background())
479 | fn := func(td time.Duration) context.Context {
480 | // ctx will be released by parent cancellation
481 | ctx, _ := context.WithTimeout(parent, td) //nolint:govet
482 | return ctx
483 | }
484 | return fn, cancel
485 | }
486 |
487 | func (c *Consumer) consumeMessages(ctx context.Context, query *query.Builder) ([]*MessageIncoming, error) {
488 | for {
489 | maxMsg, err := acquireMaxFromSemaphore(ctx, c.sem, int64(c.cfg.MaxParallelMessages))
490 | if err != nil {
491 | return nil, fatalError{Err: errors.WithStack(err)}
492 | }
493 | msgs, err := c.tryConsumeMessages(ctx, query, maxMsg)
494 | if err != nil {
495 | c.sem.Release(maxMsg)
496 | if c.cfg.FiniteConsumption && errors.Is(err, sql.ErrNoRows) {
497 | return nil, io.EOF
498 | }
499 | if !errors.Is(err, sql.ErrNoRows) {
500 | return nil, errors.WithStack(err)
501 | }
502 | select {
503 | case <-ctx.Done():
504 | return nil, fatalError{Err: ctx.Err()}
505 | case <-time.After(c.cfg.PollingInterval):
506 | continue
507 | }
508 | }
509 | // release unused resources
510 | c.sem.Release(maxMsg - int64(len(msgs)))
511 | return msgs, nil
512 | }
513 | }
514 |
515 | type pgMessage struct {
516 | ID pgtype.UUID
517 | Payload pgtype.JSONB
518 | Metadata pgtype.JSONB
519 | Attempt pgtype.Int4
520 | LockedUntil pgtype.Timestamptz
521 | }
522 |
523 | func (c *Consumer) tryConsumeMessages(ctx context.Context, query *query.Builder, limit int64) (_ []*MessageIncoming, err error) {
524 | tx, err := c.db.BeginTxx(ctx, nil)
525 | if err != nil {
526 | // TODO not necessary fatal, network could wiggle.
527 | return nil, fatalError{Err: errors.WithStack(err)}
528 | }
529 | defer func() {
530 | txRollbackErr := tx.Rollback()
531 | if errors.Is(txRollbackErr, sql.ErrTxDone) {
532 | return
533 | }
534 | if txRollbackErr != nil {
535 | c.cfg.Logger.ErrorContext(ctx, "pgq: rollback failed",
536 | "error", txRollbackErr.Error(),
537 | "rollbackReason", err,
538 | )
539 | return
540 | }
541 | }()
542 |
543 | lockedUntil := time.Now().Add(c.cfg.LockDuration)
544 | namedParams := map[string]interface{}{
545 | "locked_until": lockedUntil,
546 | "limit": limit,
547 | }
548 |
549 | if query.HasParam("history_limit") {
550 | var scanInterval pgtype.Interval
551 | // time.Duration doesn't ever fail
552 | _ = scanInterval.Set(c.cfg.HistoryLimit)
553 |
554 | namedParams["history_limit"] = scanInterval
555 | }
556 |
557 | if query.HasParam("max_consume_count") {
558 | namedParams["max_consume_count"] = c.cfg.MaxConsumeCount
559 | }
560 |
561 | for i, filter := range c.cfg.MetadataFilters {
562 | namedParams[fmt.Sprintf("metadata_key_%d", i)] = filter.Key
563 | namedParams[fmt.Sprintf("metadata_value_%d", i)] = filter.Value
564 | }
565 |
566 | queryString, err := query.Build(namedParams)
567 | if err != nil {
568 | return nil, errors.WithStack(err)
569 | }
570 |
571 | rows, err := sqlx.NamedQueryContext(ctx, tx, queryString, namedParams)
572 | if err != nil {
573 | if isErrorCode(err, undefinedColumnErrCode) {
574 | return nil, fatalError{Err: err}
575 | }
576 | return nil, errors.WithStack(err)
577 | }
578 | defer rows.Close()
579 |
580 | var msgs []*MessageIncoming
581 | for rows.Next() {
582 | msg, err := c.parseRow(ctx, rows)
583 | if err != nil {
584 | return nil, errors.WithStack(err)
585 | }
586 | msgs = append(msgs, msg)
587 | }
588 | if err := rows.Err(); err != nil {
589 | return nil, errors.WithStack(err)
590 | }
591 | if len(msgs) == 0 {
592 | return nil, sql.ErrNoRows
593 | }
594 | if err := tx.Commit(); err != nil {
595 | return nil, errors.Wrap(err, "commit message consumption")
596 | }
597 | return msgs, nil
598 | }
599 |
600 | func (c *Consumer) parseRow(ctx context.Context, rows *sqlx.Rows) (*MessageIncoming, error) {
601 | var pgMsg pgMessage
602 | if err := rows.Scan(
603 | &pgMsg.ID,
604 | &pgMsg.Payload,
605 | &pgMsg.Metadata,
606 | &pgMsg.Attempt,
607 | &pgMsg.LockedUntil,
608 | ); err != nil {
609 | if isErrorCode(err, undefinedTableErrCode, undefinedColumnErrCode) {
610 | return nil, fatalError{Err: err}
611 | }
612 | return nil, errors.Wrap(err, "retrieving message")
613 | }
614 | msg, err := c.finishParsing(pgMsg)
615 | if err != nil {
616 | c.cfg.Logger.ErrorContext(ctx, "reading message", c.logFields(pgMsg, err)...)
617 | c.discardInvalidMsg(ctx, pgMsg.ID, err)
618 | go c.cfg.InvalidMessageCallback(ctx, InvalidMessage{
619 | ID: uuid.UUID(pgMsg.ID.Bytes).String(),
620 | Metadata: pgMsg.Metadata.Bytes,
621 | Payload: pgMsg.Payload.Bytes,
622 | }, err)
623 | return nil, errors.WithStack(err)
624 | }
625 | return msg, nil
626 | }
627 |
628 | func (c *Consumer) logFields(msg pgMessage, err error) []any {
629 | entry := []any{
630 | "msg.id", uuid.UUID(msg.ID.Bytes).String(),
631 | "msg.metadata", json.RawMessage(msg.Metadata.Bytes),
632 | }
633 | if err != nil {
634 | entry = append(entry, []any{
635 | "error", err,
636 | "msg.payload", json.RawMessage(msg.Payload.Bytes),
637 | })
638 | }
639 | return entry
640 | }
641 |
642 | func (c *Consumer) discardInvalidMsg(ctx context.Context, id pgtype.UUID, err error) {
643 | ctxTimeout, cancel := prepareCtxTimeout()
644 | defer cancel()
645 | reason := err.Error()
646 | if err := c.discardMessage(c.db, id)(ctxTimeout(c.cfg.AckTimeout), reason); err != nil {
647 | c.cfg.Logger.ErrorContext(ctx, "pgq: discard failed",
648 | "error", err,
649 | "msg.id", id,
650 | "ackTimeout", c.cfg.AckTimeout,
651 | "reason", reason,
652 | )
653 | return
654 | }
655 | }
656 |
657 | func (c *Consumer) finishParsing(pgMsg pgMessage) (*MessageIncoming, error) {
658 | msg := &MessageIncoming{
659 | id: uuid.UUID(pgMsg.ID.Bytes),
660 | once: sync.Once{},
661 | ackFn: c.ackMessage(c.db, pgMsg.ID),
662 | nackFn: c.nackMessage(c.db, pgMsg.ID),
663 | discardFn: c.discardMessage(c.db, pgMsg.ID),
664 | updateLockedUntilFn: c.updateLockedUntil(c.db, pgMsg.ID),
665 | }
666 | var err error
667 | msg.Payload, err = parsePayload(pgMsg)
668 | if err != nil {
669 | return msg, errors.Wrap(err, "parsing payload")
670 | }
671 | msg.Metadata, err = parseMetadata(pgMsg)
672 | if err != nil {
673 | return msg, errors.Wrap(err, "parsing metadata")
674 | }
675 | msg.Attempt = int(pgMsg.Attempt.Int)
676 | msg.maxConsumedCount = c.cfg.MaxConsumeCount
677 | msg.Deadline = pgMsg.LockedUntil.Time.Add(-c.cfg.AckTimeout).Add(-c.cfg.MessageProcessingReserveDuration)
678 | return msg, nil
679 | }
680 |
681 | func parsePayload(pgMsg pgMessage) (json.RawMessage, error) {
682 | if pgMsg.Payload.Status != pgtype.Present {
683 | return nil, errors.New("missing message payload")
684 | }
685 | if !isJSONObject(pgMsg.Payload.Bytes) {
686 | return nil, errors.New("payload is invalid JSON object")
687 | }
688 | return pgMsg.Payload.Bytes, nil
689 | }
690 |
691 | func parseMetadata(pgMsg pgMessage) (map[string]string, error) {
692 | if pgMsg.Metadata.Status != pgtype.Present {
693 | return map[string]string{}, nil
694 | }
695 | var m map[string]string
696 | if err := json.Unmarshal(pgMsg.Metadata.Bytes, &m); err != nil {
697 | if !isJSONObject(pgMsg.Metadata.Bytes) {
698 | return nil, errors.New("metadata is invalid JSON object")
699 | }
700 | return nil, errors.Wrap(err, "parsing metadata")
701 | }
702 | return m, nil
703 | }
704 |
705 | func isJSONObject(b json.RawMessage) bool {
706 | if !json.Valid(b) {
707 | return false
708 | }
709 | // remove insignificant characters.
710 | b = bytes.TrimLeftFunc(b, unicode.IsSpace)
711 | return bytes.HasPrefix(b, []byte{'{'})
712 | }
713 |
714 | type execer interface {
715 | ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
716 | }
717 |
718 | func (c *Consumer) ackMessage(exec execer, msgID pgtype.UUID) func(ctx context.Context) error {
719 | query := `UPDATE ` + pg.QuoteIdentifier(c.queueName) + ` SET locked_until = NULL, processed_at = CURRENT_TIMESTAMP WHERE id = $1`
720 | return func(ctx context.Context) error {
721 | if _, err := exec.ExecContext(ctx, query, msgID); err != nil {
722 | c.metrics.failedProcessingCounter.Add(ctx, 1,
723 | metric.WithAttributes(
724 | attribute.String("resolution", "ack"),
725 | attribute.String("queue_name", c.queueName),
726 | ),
727 | )
728 | return errors.WithStack(err)
729 | }
730 | c.metrics.jobsCounter.Add(ctx, 1,
731 | metric.WithAttributes(
732 | attribute.String("resolution", "ack"),
733 | attribute.String("queue_name", c.queueName),
734 | ),
735 | )
736 | return nil
737 | }
738 | }
739 |
740 | func (c *Consumer) nackMessage(exec execer, msgID pgtype.UUID) func(ctx context.Context, reason string) error {
741 | query := `UPDATE ` + pg.QuoteIdentifier(c.queueName) + ` SET locked_until = NULL, error_detail = $2 WHERE id = $1`
742 | return func(ctx context.Context, reason string) error {
743 | if _, err := exec.ExecContext(ctx, query, msgID, reason); err != nil {
744 | c.metrics.failedProcessingCounter.Add(ctx, 1,
745 | metric.WithAttributes(
746 | attribute.String("resolution", "nack"),
747 | attribute.String("queue_name", c.queueName),
748 | ),
749 | )
750 | return errors.WithStack(err)
751 | }
752 | c.metrics.jobsCounter.Add(ctx, 1,
753 | metric.WithAttributes(
754 | attribute.String("resolution", "nack"),
755 | attribute.String("queue_name", c.queueName),
756 | ),
757 | )
758 | return nil
759 | }
760 | }
761 |
762 | func (c *Consumer) discardMessage(exec execer, msgID pgtype.UUID) func(ctx context.Context, reason string) error {
763 | query := `UPDATE ` + pg.QuoteIdentifier(c.queueName) + ` SET locked_until = NULL, processed_at = CURRENT_TIMESTAMP, error_detail = $2 WHERE id = $1`
764 | return func(ctx context.Context, reason string) error {
765 | if _, err := exec.ExecContext(ctx, query, msgID, reason); err != nil {
766 | c.metrics.failedProcessingCounter.Add(ctx, 1,
767 | metric.WithAttributes(
768 | attribute.String("resolution", "discard"),
769 | attribute.String("queue_name", c.queueName),
770 | ),
771 | )
772 | return errors.WithStack(err)
773 | }
774 | c.metrics.jobsCounter.Add(ctx, 1,
775 | metric.WithAttributes(
776 | attribute.String("resolution", "discard"),
777 | attribute.String("queue_name", c.queueName),
778 | ),
779 | )
780 | return nil
781 | }
782 | }
783 |
784 | func (c *Consumer) updateLockedUntil(db *sqlx.DB, id pgtype.UUID) func(context.Context, time.Time) error {
785 | query := `UPDATE ` + pg.QuoteIdentifier(c.queueName) + ` SET locked_until = $2 WHERE id = $1`
786 | return func(ctx context.Context, lockedUntil time.Time) error {
787 | _, err := db.ExecContext(ctx, query, id, lockedUntil)
788 | return errors.WithStack(err)
789 | }
790 | }
791 |
792 | // acquireMaxFromSemaphore acquires maximum possible weight. It blocks until resources are
793 | // available or ctx is done. On success, returns acquired weight. On failure,
794 | // returns ctx.Err() and leaves the semaphore unchanged.
795 | //
796 | // If ctx is already done, Acquire may still succeed without blocking.
797 | func acquireMaxFromSemaphore(ctx context.Context, w *semaphore.Weighted, size int64) (int64, error) {
798 | for i := size; i > 1; i-- {
799 | if ok := w.TryAcquire(i); ok {
800 | // Same practise like in underlying library.
801 | // Acquired the semaphore after we were canceled. Rather than trying to
802 | // fix up the queue, just pretend we didn't notice the cancellation.
803 | return i, nil
804 | }
805 | }
806 | if err := w.Acquire(ctx, 1); err != nil {
807 | return 0, err
808 | }
809 | return 1, nil
810 | }
811 |
--------------------------------------------------------------------------------
/consumer_test.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "testing"
6 | "time"
7 |
8 | "golang.org/x/sync/semaphore"
9 |
10 | "go.dataddo.com/pgq/internal/require"
11 | )
12 |
13 | func TestConsumer_generateQuery(t *testing.T) {
14 | type args struct {
15 | queueName string
16 | opts []ConsumerOption
17 | }
18 | tests := []struct {
19 | name string
20 | args args
21 | want string
22 | }{
23 | {
24 | name: "simple",
25 | args: args{queueName: "testing_queue"},
26 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
27 | },
28 | {
29 | name: "scanInterval 12 hours",
30 |
31 | args: args{
32 | queueName: "testing_queue",
33 | opts: []ConsumerOption{
34 | WithHistoryLimit(12 * time.Hour),
35 | },
36 | },
37 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE created_at >= CURRENT_TIMESTAMP - CAST((:history_limit) AS interval) AND created_at < CURRENT_TIMESTAMP AND (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
38 | },
39 | {
40 | name: "consume messages with metadata filter",
41 | args: args{
42 | queueName: "testing_queue",
43 | opts: []ConsumerOption{
44 | WithMetadataFilter(&MetadataFilter{Key: "foo", Operation: OpEqual, Value: "bar"}),
45 | },
46 | },
47 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND metadata->>:metadata_key_0 = :metadata_value_0 AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
48 | },
49 | {
50 | name: "scn interval 12 hours abd max consumed count limit disabled",
51 |
52 | args: args{
53 | queueName: "testing_queue",
54 | opts: []ConsumerOption{
55 | WithHistoryLimit(12 * time.Hour),
56 | WithMaxConsumeCount(0),
57 | },
58 | },
59 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE created_at >= CURRENT_TIMESTAMP - CAST((:history_limit) AS interval) AND created_at < CURRENT_TIMESTAMP AND (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
60 | },
61 | {
62 | name: "with metadata condition",
63 | args: args{queueName: "testing_queue"},
64 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
65 | },
66 | {
67 | name: "scanInterval 12 hours with metadata condition",
68 | args: args{
69 | queueName: "testing_queue",
70 | opts: []ConsumerOption{
71 | WithHistoryLimit(12 * time.Hour),
72 | },
73 | },
74 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE created_at >= CURRENT_TIMESTAMP - CAST((:history_limit) AS interval) AND created_at < CURRENT_TIMESTAMP AND (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
75 | },
76 | {
77 | name: "with negative metadata condition",
78 | args: args{queueName: "testing_queue"},
79 | want: "UPDATE \"testing_queue\" SET locked_until = :locked_until, started_at = CURRENT_TIMESTAMP, consumed_count = consumed_count+1 WHERE id IN (SELECT id FROM \"testing_queue\" WHERE (locked_until IS NULL OR locked_until < CURRENT_TIMESTAMP) AND consumed_count < :max_consume_count AND processed_at IS NULL AND (scheduled_for IS NULL OR scheduled_for < CURRENT_TIMESTAMP) ORDER BY scheduled_for ASC NULLS LAST, consumed_count ASC, created_at ASC LIMIT :limit FOR UPDATE SKIP LOCKED) RETURNING id, payload, metadata, consumed_count, locked_until",
80 | },
81 | }
82 | for _, tt := range tests {
83 | t.Run(tt.name, func(t *testing.T) {
84 | c, err := NewConsumer(nil, tt.args.queueName, nil, tt.args.opts...)
85 | require.NoError(t, err)
86 | got, err := c.generateQuery()
87 | require.NoError(t, err)
88 | require.Equal(t, tt.want, got.String())
89 | })
90 | }
91 | }
92 |
93 | func TestAcquireMaxFromSemaphore(t *testing.T) {
94 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
95 | defer cancel()
96 |
97 | const size int64 = 10
98 |
99 | w := semaphore.NewWeighted(size)
100 |
101 | acquired, err := acquireMaxFromSemaphore(ctx, w, size)
102 | require.NoError(t, err)
103 | require.Equal(t, size, acquired)
104 |
105 | const released1 int64 = 3
106 | w.Release(released1)
107 | acquired, err = acquireMaxFromSemaphore(ctx, w, size)
108 | require.NoError(t, err)
109 | require.Equal(t, released1, acquired)
110 |
111 | const released2 int64 = 1
112 | go func() {
113 | time.Sleep(time.Millisecond)
114 | w.Release(released2)
115 | }()
116 | acquired, err = acquireMaxFromSemaphore(ctx, w, size)
117 | require.NoError(t, err)
118 | require.Equal(t, released2, acquired)
119 |
120 | acquired, err = acquireMaxFromSemaphore(ctx, w, size)
121 | require.ErrorIs(t, err, context.DeadlineExceeded)
122 | require.Equal(t, int64(0), acquired)
123 | }
124 |
--------------------------------------------------------------------------------
/docs/consumer_loop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dataddo/pgq/c8b263b44bb7de8b00e44045008e5e81ed0e6a78/docs/consumer_loop.png
--------------------------------------------------------------------------------
/docs/monitoring.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dataddo/pgq/c8b263b44bb7de8b00e44045008e5e81ed0e6a78/docs/monitoring.png
--------------------------------------------------------------------------------
/errors.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "errors"
5 | "slices"
6 | )
7 |
8 | // block contains used error codes, which are used in the code.
9 | const (
10 | undefinedTableErrCode = "42P01"
11 | undefinedColumnErrCode = "42703"
12 | )
13 |
14 | type pgError interface {
15 | SQLState() string
16 | }
17 |
18 | // legacyPGError is an interface used by previous versions of github.com/lib/pq.
19 | // It is provided only to support legacy code. New code should use the pgError
20 | // interface.
21 | type legacyPGError interface {
22 | Error() string
23 | Fatal() bool
24 | Get(k byte) (v string)
25 | }
26 |
27 | //var (
28 | // _ pgError = (*pgconn.PgError)(nil)
29 | // _ pgError = (*pq.Error)(nil)
30 | // _ legacyPGError = (*pq.Error)(nil)
31 | //)
32 |
33 | func isErrorCode(err error, codes ...string) bool {
34 | var pgErr pgError
35 | if ok := errors.As(err, &pgErr); !ok {
36 | var legacyErr legacyPGError
37 | if ok := errors.As(err, &legacyErr); !ok {
38 | return false
39 | }
40 | return slices.Contains(codes, legacyErr.Get('C'))
41 | }
42 | return slices.Contains(codes, pgErr.SQLState())
43 | }
44 |
--------------------------------------------------------------------------------
/errors_test.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "testing"
7 | )
8 |
9 | type legacyPGErrorImplementation struct {
10 | err error
11 | fatal bool
12 | codes map[byte]string
13 | }
14 |
15 | func (l *legacyPGErrorImplementation) Error() string {
16 | return l.err.Error()
17 | }
18 |
19 | func (l *legacyPGErrorImplementation) Fatal() bool {
20 | return l.fatal
21 | }
22 |
23 | func (l *legacyPGErrorImplementation) Get(k byte) (v string) {
24 | return l.codes[k]
25 | }
26 |
27 | type pgErrorImplementation struct {
28 | sqlState string
29 | }
30 |
31 | func (p *pgErrorImplementation) Error() string {
32 | return fmt.Sprintf("pg error: %s", p.sqlState)
33 | }
34 |
35 | func (p *pgErrorImplementation) SQLState() string {
36 | return p.sqlState
37 | }
38 |
39 | var (
40 | _ legacyPGError = (*legacyPGErrorImplementation)(nil)
41 | _ pgError = (*pgErrorImplementation)(nil)
42 | )
43 |
44 | func Test_isErrorCode(t *testing.T) {
45 | type args struct {
46 | err error
47 | codes []string
48 | }
49 | tests := []struct {
50 | name string
51 | args args
52 | want bool
53 | }{
54 | {
55 | name: "test wrapped pg error",
56 | args: args{
57 | codes: []string{"42P01"},
58 | err: fmt.Errorf("foo: %w",
59 | &pgErrorImplementation{
60 | sqlState: "42P01",
61 | },
62 | ),
63 | },
64 | want: true,
65 | },
66 | {
67 | name: "test wrapped legacy error",
68 | args: args{
69 | codes: []string{"42P01"},
70 | err: fmt.Errorf("foo: %w",
71 | &legacyPGErrorImplementation{
72 | err: errors.New("test"),
73 | fatal: true,
74 | codes: map[byte]string{
75 | 'C': "42P01",
76 | },
77 | },
78 | ),
79 | },
80 | want: true,
81 | },
82 | }
83 | for _, tt := range tests {
84 | t.Run(tt.name, func(t *testing.T) {
85 | if got := isErrorCode(tt.args.err, tt.args.codes...); got != tt.want {
86 | t.Errorf("isErrorCode() = %v, want %v", got, tt.want)
87 | }
88 | })
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/example_consumer_test.go:
--------------------------------------------------------------------------------
1 | package pgq_test
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "log"
10 | "os"
11 | "os/signal"
12 |
13 | "go.dataddo.com/pgq"
14 | )
15 |
16 | type Handler struct{}
17 |
18 | func (h *Handler) HandleMessage(ctx context.Context, msg *pgq.MessageIncoming) (res bool, err error) {
19 | defer func() {
20 | r := recover()
21 | if r == nil {
22 | return
23 | }
24 | log.Println("Recovered in 'Handler.HandleMessage()'", r)
25 | // nack the message, it will be retried
26 | res = pgq.MessageNotProcessed
27 | if e, ok := r.(error); ok {
28 | err = e
29 | } else {
30 | err = fmt.Errorf("%v", r)
31 | }
32 | }()
33 | if msg.Metadata["heaviness"] == "heavy" {
34 | // nack the message, it will be retried
35 | // Message won't contain error detail in the database.
36 | return pgq.MessageNotProcessed, nil
37 | }
38 | var myPayload struct {
39 | Foo string `json:"foo"`
40 | }
41 | if err := json.Unmarshal(msg.Payload, &myPayload); err != nil {
42 | // discard the message, it will not be retried
43 | // Message will contain error detail in the database.
44 | return pgq.MessageProcessed, fmt.Errorf("invalid payload: %v", err)
45 | }
46 | // doSomethingWithThePayload(ctx, myPayload)
47 | return pgq.MessageProcessed, nil
48 | }
49 |
50 | func ExampleConsumer() {
51 | db, err := sql.Open("postgres", "user=postgres password=postgres host=localhost port=5432 dbname=postgres")
52 | if err != nil {
53 | log.Fatal("Error opening database:", err)
54 | }
55 | defer db.Close()
56 | const queueName = "test_queue"
57 | c, err := pgq.NewConsumer(db, queueName, &Handler{})
58 | if err != nil {
59 | log.Fatal("Error creating consumer:", err)
60 | }
61 | ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt)
62 | if err := c.Run(ctx); err != nil && !errors.Is(err, context.Canceled) {
63 | log.Fatal("Error running consumer:", err)
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/example_publisher_test.go:
--------------------------------------------------------------------------------
1 | package pgq_test
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "encoding/json"
7 | "log"
8 | "time"
9 |
10 | "go.dataddo.com/pgq"
11 | )
12 |
13 | type PayloadStruct struct {
14 | Foo string `json:"foo"`
15 | }
16 |
17 | func ExamplePublisher() {
18 | db, err := sql.Open("postgres", "user=postgres password=postgres host=localhost port=5432 dbname=postgres")
19 | if err != nil {
20 | log.Fatal("Error opening database:", err)
21 | }
22 | defer db.Close()
23 | const queueName = "test_queue"
24 | p := pgq.NewPublisher(db)
25 | payload, _ := json.Marshal(PayloadStruct{Foo: "bar"})
26 | messages := []*pgq.MessageOutgoing{
27 | {
28 | Metadata: pgq.Metadata{
29 | "version": "1.0",
30 | },
31 | Payload: json.RawMessage(payload),
32 | },
33 | {
34 | Metadata: pgq.Metadata{
35 | "version": "1.0",
36 | },
37 | Payload: json.RawMessage(payload),
38 | },
39 | {
40 | Metadata: pgq.Metadata{
41 | "version": "1.0",
42 | },
43 | Payload: json.RawMessage(payload),
44 | },
45 | }
46 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
47 | defer cancel()
48 | ids, err := p.Publish(ctx, queueName, messages...)
49 | if err != nil {
50 | log.Fatal("Error publishing message:", err)
51 | }
52 | log.Println("Published messages with ids:", ids)
53 | }
54 |
--------------------------------------------------------------------------------
/examples_test.go:
--------------------------------------------------------------------------------
1 | package pgq_test
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "log/slog"
7 | "os"
8 | "time"
9 |
10 | "go.dataddo.com/pgq"
11 | "go.opentelemetry.io/otel/metric/noop"
12 | )
13 |
14 | var db *sql.DB
15 |
16 | func ExampleNewConsumer() {
17 | slogger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
18 | c, err := pgq.NewConsumer(db, "queue_name", &Handler{},
19 | pgq.WithLockDuration(10*time.Minute),
20 | pgq.WithPollingInterval(500*time.Millisecond),
21 | pgq.WithAckTimeout(5*time.Second),
22 | pgq.WithMessageProcessingReserveDuration(5*time.Second),
23 | pgq.WithMaxParallelMessages(42),
24 | pgq.WithMetrics(noop.Meter{}),
25 | pgq.WithHistoryLimit(24*time.Hour),
26 | pgq.WithLogger(slogger),
27 | pgq.WithInvalidMessageCallback(func(ctx context.Context, msg pgq.InvalidMessage, err error) {
28 | // message Payload and/or Metadata are not JSON object.
29 | // The message will be discarded.
30 | slogger.Warn("invalid message",
31 | "error", err,
32 | "msg.id", msg.ID,
33 | )
34 | }),
35 | )
36 | _, _ = c, err
37 | }
38 |
39 | func ExampleNewPublisher() {
40 | hostname, _ := os.Hostname()
41 | p := pgq.NewPublisher(db,
42 | pgq.WithMetaInjectors(
43 | pgq.StaticMetaInjector(pgq.Metadata{"publisher-id": hostname}),
44 | ),
45 | )
46 | _ = p
47 | }
48 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module go.dataddo.com/pgq
2 |
3 | go 1.22.0
4 |
5 | require (
6 | github.com/google/uuid v1.6.0
7 | github.com/jackc/pgtype v1.14.4
8 | github.com/jmoiron/sqlx v1.4.0
9 | github.com/pkg/errors v0.9.1
10 | go.opentelemetry.io/otel v1.34.0
11 | go.opentelemetry.io/otel/metric v1.34.0
12 | golang.org/x/sync v0.11.0
13 | )
14 |
15 | require (
16 | github.com/go-logr/logr v1.4.2 // indirect
17 | github.com/go-logr/stdr v1.2.2 // indirect
18 | github.com/jackc/pgio v1.0.0 // indirect
19 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect
20 | go.opentelemetry.io/otel/trace v1.34.0 // indirect
21 | )
22 |
23 | // Test dependencies
24 | require github.com/jackc/pgx/v4 v4.18.2
25 |
26 | // Prevent forcing someones to use a vulnerable version of pgx/v4
27 | // https://devhub.checkmarx.com/cve-details/CVE-2024-27289/
28 | exclude github.com/jackc/pgx/v4 v4.18.3
29 |
30 | // dependencies from github.com/jackc/pgx/v4 v4.18.2, that's used only in tests.
31 | require (
32 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect
33 | github.com/jackc/pgconn v1.14.3 // indirect
34 | github.com/jackc/pgpassfile v1.0.0 // indirect
35 | github.com/jackc/pgproto3/v2 v2.3.3 // indirect
36 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
37 | golang.org/x/crypto v0.26.0 // indirect
38 | golang.org/x/text v0.17.0 // indirect
39 | )
40 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
2 | filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
3 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
4 | github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
5 | github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
6 | github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
7 | github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
8 | github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
9 | github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
10 | github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
11 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
12 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
13 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
14 | github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
15 | github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
16 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
17 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
18 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
19 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
20 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
21 | github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
22 | github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
23 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
24 | github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
25 | github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
26 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
27 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
28 | github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
29 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
30 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
31 | github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
32 | github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
33 | github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
34 | github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
35 | github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
36 | github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
37 | github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
38 | github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
39 | github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
40 | github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
41 | github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
42 | github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM=
43 | github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
44 | github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
45 | github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
46 | github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
47 | github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
48 | github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
49 | github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
50 | github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
51 | github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
52 | github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
53 | github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
54 | github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
55 | github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
56 | github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
57 | github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
58 | github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag=
59 | github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
60 | github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
61 | github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
62 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
63 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
64 | github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
65 | github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
66 | github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
67 | github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
68 | github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
69 | github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8=
70 | github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA=
71 | github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
72 | github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
73 | github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
74 | github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
75 | github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU=
76 | github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
77 | github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
78 | github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
79 | github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
80 | github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
81 | github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
82 | github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
83 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
84 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
85 | github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
86 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
87 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
88 | github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
89 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
90 | github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
91 | github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
92 | github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
93 | github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
94 | github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
95 | github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
96 | github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
97 | github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
98 | github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
99 | github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
100 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
101 | github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
102 | github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
103 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
104 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
105 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
106 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
107 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
108 | github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
109 | github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
110 | github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
111 | github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
112 | github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
113 | github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
114 | github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
115 | github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
116 | github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
117 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
118 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
119 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
120 | github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
121 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
122 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
123 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
124 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
125 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
126 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
127 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
128 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
129 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
130 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
131 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
132 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
133 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
134 | github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
135 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
136 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
137 | go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
138 | go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
139 | go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
140 | go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
141 | go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
142 | go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
143 | go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
144 | go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
145 | go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
146 | go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
147 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
148 | go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
149 | go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
150 | go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
151 | go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
152 | go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
153 | go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
154 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
155 | golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
156 | golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
157 | golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
158 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
159 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
160 | golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
161 | golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
162 | golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
163 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
164 | golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
165 | golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
166 | golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
167 | golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
168 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
169 | golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
170 | golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
171 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
172 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
173 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
174 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
175 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
176 | golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
177 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
178 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
179 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
180 | golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
181 | golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
182 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
183 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
184 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
185 | golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
186 | golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
187 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
188 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
189 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
190 | golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
191 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
192 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
193 | golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
194 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
195 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
196 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
197 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
198 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
199 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
200 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
201 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
202 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
203 | golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
204 | golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
205 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
206 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
207 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
208 | golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
209 | golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
210 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
211 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
212 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
213 | golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
214 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
215 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
216 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
217 | golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
218 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
219 | golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
220 | golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
221 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
222 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
223 | golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
224 | golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
225 | golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
226 | golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
227 | golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
228 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
229 | golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
230 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
231 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
232 | golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
233 | golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
234 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
235 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
236 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
237 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
238 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
239 | gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
240 | gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
241 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
242 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
243 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
244 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
245 | honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
246 |
--------------------------------------------------------------------------------
/integtest/consumer_test.go:
--------------------------------------------------------------------------------
1 | package integtest
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "encoding/json"
7 | "fmt"
8 | "log/slog"
9 | "os"
10 | "testing"
11 | "time"
12 |
13 | _ "github.com/jackc/pgx/v4/stdlib"
14 | "go.opentelemetry.io/otel/metric/noop"
15 |
16 | . "go.dataddo.com/pgq"
17 | "go.dataddo.com/pgq/internal/pg"
18 | "go.dataddo.com/pgq/internal/require"
19 | "go.dataddo.com/pgq/x/schema"
20 | )
21 |
22 | func TestConsumer_Run_graceful_shutdown(t *testing.T) {
23 | if testing.Short() {
24 | t.Skip("skipping integration test")
25 | }
26 | ctx := context.Background()
27 |
28 | db := openDB(t)
29 | queueName := t.Name()
30 | _, _ = db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
31 | _, err := db.ExecContext(ctx, schema.GenerateCreateTableQuery(queueName))
32 | t.Cleanup(func() {
33 | db := openDB(t)
34 | _, err := db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
35 | require.NoError(t, err)
36 | err = db.Close()
37 | require.NoError(t, err)
38 | })
39 | require.NoError(t, err)
40 | publisher := NewPublisher(db)
41 |
42 | msgIDs, err := publisher.Publish(ctx, queueName,
43 | &MessageOutgoing{Metadata: Metadata{"foo": "bar"}, Payload: json.RawMessage(`{"foo":"bar"}`)},
44 | )
45 | require.NoError(t, err)
46 | require.Equal(t, 1, len(msgIDs))
47 | msgID := msgIDs[0]
48 |
49 | // consumer
50 | handler := &slowHandler{}
51 | consumer, err := NewConsumer(db, queueName, handler,
52 | WithLogger(slog.New(slog.NewTextHandler(&tbWriter{tb: t}, &slog.HandlerOptions{Level: slog.LevelDebug}))),
53 | WithLockDuration(time.Hour),
54 | WithPollingInterval(time.Second),
55 | WithMaxParallelMessages(1),
56 | WithInvalidMessageCallback(func(_ context.Context, _ InvalidMessage, err error) {
57 | require.NoError(t, err)
58 | }),
59 | WithMetrics(noop.Meter{}),
60 | )
61 | require.NoError(t, err)
62 |
63 | consumeCtx, consumeCancel := context.WithTimeout(ctx, 5*time.Second)
64 | defer consumeCancel()
65 | err = consumer.Run(consumeCtx)
66 | require.ErrorIs(t, err, context.DeadlineExceeded)
67 |
68 | err = db.Close()
69 | require.NoError(t, err)
70 |
71 | // evaluate
72 | query := fmt.Sprintf(
73 | `SELECT locked_until, consumed_count FROM %s WHERE id = $1`,
74 | pg.QuoteIdentifier(queueName),
75 | )
76 | db = openDB(t)
77 | t.Cleanup(func() {
78 | err := db.Close()
79 | require.NoError(t, err)
80 | })
81 | row := db.QueryRowContext(ctx, query, msgID)
82 | var (
83 | lockedUntil sql.NullTime
84 | processedCount int
85 | )
86 | err = row.Scan(&lockedUntil, &processedCount)
87 | require.NoError(t, err)
88 | require.Equal(t, false, lockedUntil.Valid)
89 | require.Equal(t, 1, processedCount)
90 | }
91 |
92 | func TestConsumer_Run_FutureMessage(t *testing.T) {
93 | if testing.Short() {
94 | t.Skip("skipping integration test")
95 | }
96 | ctx := context.Background()
97 |
98 | db := openDB(t)
99 | queueName := t.Name()
100 | _, _ = db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
101 | _, err := db.ExecContext(ctx, schema.GenerateCreateTableQuery(queueName))
102 | t.Cleanup(func() {
103 | db := openDB(t)
104 | _, err := db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
105 | require.NoError(t, err)
106 | err = db.Close()
107 | require.NoError(t, err)
108 | })
109 | require.NoError(t, err)
110 | publisher := NewPublisher(db)
111 |
112 | scheduledFor := time.Now().Add(time.Hour)
113 | msgIDs, err := publisher.Publish(ctx, queueName,
114 | &MessageOutgoing{Payload: json.RawMessage(`{"baz":"queex"}`), ScheduledFor: &scheduledFor},
115 | )
116 |
117 | require.NoError(t, err)
118 | require.Equal(t, 1, len(msgIDs))
119 |
120 | scheduledFor = time.Now().Add(-1 * time.Hour)
121 | simpleMsgIDs, err := publisher.Publish(ctx, queueName,
122 | &MessageOutgoing{Payload: json.RawMessage(`{"foo":"bar"}`), ScheduledFor: &scheduledFor},
123 | )
124 |
125 | require.NoError(t, err)
126 | require.Equal(t, 1, len(simpleMsgIDs))
127 |
128 | // consumer
129 | handler := ®ularHandler{}
130 | consumer, err := NewConsumer(db, queueName, handler,
131 | WithLogger(slog.New(slog.NewTextHandler(&tbWriter{tb: t}, &slog.HandlerOptions{Level: slog.LevelDebug}))),
132 | WithLockDuration(time.Hour),
133 | WithPollingInterval(time.Second),
134 | WithMaxParallelMessages(1),
135 | WithInvalidMessageCallback(func(_ context.Context, _ InvalidMessage, err error) {
136 | require.NoError(t, err)
137 | }),
138 | WithMetrics(noop.Meter{}),
139 | )
140 | require.NoError(t, err)
141 |
142 | consumeCtx, consumeCancel := context.WithTimeout(ctx, 5*time.Second)
143 | defer consumeCancel()
144 | err = consumer.Run(consumeCtx)
145 | require.ErrorIs(t, err, context.DeadlineExceeded)
146 |
147 | err = db.Close()
148 | require.NoError(t, err)
149 |
150 | // evaluate
151 | query := fmt.Sprintf(
152 | `SELECT count(1) FROM %s WHERE processed_at is null`,
153 | pg.QuoteIdentifier(queueName),
154 | )
155 | db = openDB(t)
156 | t.Cleanup(func() {
157 | err := db.Close()
158 | require.NoError(t, err)
159 | })
160 | row := db.QueryRowContext(ctx, query)
161 | var (
162 | msgCount int
163 | )
164 | err = row.Scan(&msgCount)
165 | require.NoError(t, err)
166 | require.Equal(t, 1, msgCount)
167 |
168 | }
169 |
170 | func TestConsumer_Run_MetadataFilter_Equal(t *testing.T) {
171 | if testing.Short() {
172 | t.Skip("skipping integration test")
173 | }
174 | ctx := context.Background()
175 |
176 | db := openDB(t)
177 | queueName := t.Name()
178 | _, _ = db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
179 | _, err := db.ExecContext(ctx, schema.GenerateCreateTableQuery(queueName))
180 | t.Cleanup(func() {
181 | db := openDB(t)
182 | _, err := db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
183 | require.NoError(t, err)
184 | err = db.Close()
185 | require.NoError(t, err)
186 | })
187 | require.NoError(t, err)
188 | publisher := NewPublisher(db)
189 |
190 | msgIDs, err := publisher.Publish(ctx, queueName,
191 | &MessageOutgoing{Metadata: Metadata{"baz": "quux"}, Payload: json.RawMessage(`{"baz":"queex"}`)},
192 | )
193 |
194 | require.NoError(t, err)
195 | require.Equal(t, 1, len(msgIDs))
196 |
197 | simpleMsgIDs, err := publisher.Publish(ctx, queueName,
198 | &MessageOutgoing{Metadata: Metadata{"foo": "bar"}, Payload: json.RawMessage(`{"foo":"bar"}`)},
199 | )
200 |
201 | require.NoError(t, err)
202 | require.Equal(t, 1, len(simpleMsgIDs))
203 |
204 | // consumer
205 | handler := ®ularHandler{}
206 | consumer, err := NewConsumer(db, queueName, handler,
207 | WithLogger(slog.New(slog.NewTextHandler(&tbWriter{tb: t}, &slog.HandlerOptions{Level: slog.LevelDebug}))),
208 | WithLockDuration(time.Hour),
209 | WithPollingInterval(time.Second),
210 | WithMaxParallelMessages(1),
211 | WithMetadataFilter(&MetadataFilter{Key: "baz", Operation: OpEqual, Value: "quux"}),
212 | WithInvalidMessageCallback(func(_ context.Context, _ InvalidMessage, err error) {
213 | require.NoError(t, err)
214 | }),
215 | WithMetrics(noop.Meter{}),
216 | )
217 | require.NoError(t, err)
218 |
219 | consumeCtx, consumeCancel := context.WithTimeout(ctx, 5*time.Second)
220 | defer consumeCancel()
221 | err = consumer.Run(consumeCtx)
222 | require.ErrorIs(t, err, context.DeadlineExceeded)
223 |
224 | err = db.Close()
225 | require.NoError(t, err)
226 |
227 | // evaluate
228 | query := fmt.Sprintf(
229 | `SELECT count(1) FROM %s WHERE processed_at is null`,
230 | pg.QuoteIdentifier(queueName),
231 | )
232 | db = openDB(t)
233 | t.Cleanup(func() {
234 | err := db.Close()
235 | require.NoError(t, err)
236 | })
237 | row := db.QueryRowContext(ctx, query)
238 | var (
239 | msgCount int
240 | )
241 | err = row.Scan(&msgCount)
242 | require.NoError(t, err)
243 | require.Equal(t, 1, msgCount)
244 |
245 | }
246 |
247 | func TestConsumer_Run_MetadataFilter_NotEqual(t *testing.T) {
248 | if testing.Short() {
249 | t.Skip("skipping integration test")
250 | }
251 | ctx := context.Background()
252 |
253 | db := openDB(t)
254 | queueName := t.Name()
255 | _, _ = db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
256 | _, err := db.ExecContext(ctx, schema.GenerateCreateTableQuery(queueName))
257 | t.Cleanup(func() {
258 | db := openDB(t)
259 | _, err := db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
260 | require.NoError(t, err)
261 | err = db.Close()
262 | require.NoError(t, err)
263 | })
264 | require.NoError(t, err)
265 | publisher := NewPublisher(db)
266 |
267 | msgIDs, err := publisher.Publish(ctx, queueName,
268 | &MessageOutgoing{Metadata: Metadata{"baz": "quux"}, Payload: json.RawMessage(`{"baz":"queex"}`)},
269 | )
270 |
271 | require.NoError(t, err)
272 | require.Equal(t, 1, len(msgIDs))
273 |
274 | simpleMsgIDs, err := publisher.Publish(ctx, queueName,
275 | &MessageOutgoing{Metadata: Metadata{"foo": "bar"}, Payload: json.RawMessage(`{"foo":"bar"}`)},
276 | )
277 |
278 | require.NoError(t, err)
279 | require.Equal(t, 1, len(simpleMsgIDs))
280 |
281 | // consumer
282 | handler := ®ularHandler{}
283 | consumer, err := NewConsumer(db, queueName, handler,
284 | WithLogger(slog.New(slog.NewTextHandler(&tbWriter{tb: t}, &slog.HandlerOptions{Level: slog.LevelDebug}))),
285 | WithLockDuration(time.Hour),
286 | WithPollingInterval(time.Second),
287 | WithMaxParallelMessages(1),
288 | WithMetadataFilter(&MetadataFilter{Key: "baz", Operation: OpNotEqual, Value: "quux"}),
289 | WithInvalidMessageCallback(func(_ context.Context, _ InvalidMessage, err error) {
290 | require.NoError(t, err)
291 | }),
292 | WithMetrics(noop.Meter{}),
293 | )
294 | require.NoError(t, err)
295 |
296 | consumeCtx, consumeCancel := context.WithTimeout(ctx, 5*time.Second)
297 | defer consumeCancel()
298 | err = consumer.Run(consumeCtx)
299 | require.ErrorIs(t, err, context.DeadlineExceeded)
300 |
301 | err = db.Close()
302 | require.NoError(t, err)
303 |
304 | // evaluate
305 | query := fmt.Sprintf(
306 | `SELECT count(1) FROM %s WHERE processed_at is null and metadata->>'baz' = 'quux'`,
307 | pg.QuoteIdentifier(queueName),
308 | )
309 | db = openDB(t)
310 | t.Cleanup(func() {
311 | err := db.Close()
312 | require.NoError(t, err)
313 | })
314 | row := db.QueryRowContext(ctx, query)
315 | var (
316 | msgCount int
317 | )
318 | err = row.Scan(&msgCount)
319 | require.NoError(t, err)
320 | require.Equal(t, 1, msgCount)
321 |
322 | }
323 |
324 | func openDB(t *testing.T) *sql.DB {
325 | dsn, ok := os.LookupEnv("TEST_POSTGRES_DSN")
326 | if !ok {
327 | t.Skip("Skipping integration test, TEST_POSTGRES_DSN is not set")
328 | }
329 | db, err := sql.Open("pgx", dsn)
330 | require.NoError(t, err)
331 | t.Cleanup(func() {
332 | err := db.Close()
333 | require.NoError(t, err)
334 | })
335 | ensureUUIDExtension(t, db)
336 | return db
337 | }
338 |
339 | func ensureUUIDExtension(t *testing.T, db *sql.DB) {
340 | _, err := db.Exec(`
341 | DO $$
342 | BEGIN
343 | IF current_setting('server_version_num')::int < 130000 THEN
344 | -- If PostgreSQL version is less than 13, enable pgcrypto
345 | CREATE EXTENSION IF NOT EXISTS pgcrypto;
346 | END IF;
347 | END $$;
348 | `)
349 | require.NoError(t, err)
350 | }
351 |
352 | type (
353 | slowHandler struct{}
354 | regularHandler struct{}
355 | )
356 |
357 | func (s *regularHandler) HandleMessage(ctx context.Context, _ *MessageIncoming) (processed bool, err error) {
358 | <-ctx.Done()
359 | return MessageProcessed, nil
360 | }
361 |
362 | func (s *slowHandler) HandleMessage(ctx context.Context, _ *MessageIncoming) (processed bool, err error) {
363 | <-ctx.Done()
364 | return MessageNotProcessed, ctx.Err()
365 | }
366 |
367 | type tbWriter struct {
368 | tb testing.TB
369 | }
370 |
371 | func (w *tbWriter) Write(p []byte) (n int, err error) {
372 | w.tb.Log(string(p))
373 | return len(p), nil
374 | }
375 |
--------------------------------------------------------------------------------
/integtest/publisher_test.go:
--------------------------------------------------------------------------------
1 | package integtest
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "testing"
8 |
9 | "github.com/jackc/pgtype"
10 |
11 | "go.dataddo.com/pgq"
12 | pgutils "go.dataddo.com/pgq/internal/pg"
13 | "go.dataddo.com/pgq/internal/require"
14 | "go.dataddo.com/pgq/x/schema"
15 | )
16 |
17 | func TestPublisher(t *testing.T) {
18 | if testing.Short() {
19 | t.Skip("skipping integration test")
20 | }
21 | ctx := context.Background()
22 |
23 | type want struct {
24 | metadata pgtype.JSONB
25 | payload pgtype.JSONB
26 | }
27 | tests := []struct {
28 | name string
29 | msg *pgq.MessageOutgoing
30 | publisherOpts []pgq.PublisherOption
31 | want want
32 | wantErr bool
33 | }{
34 | {
35 | name: "Select extra columns",
36 | msg: &pgq.MessageOutgoing{
37 | Metadata: pgq.Metadata{
38 | "test": "test_value",
39 | },
40 | Payload: json.RawMessage(`{"foo":"bar"}`),
41 | },
42 | publisherOpts: []pgq.PublisherOption{
43 | pgq.WithMetaInjectors(
44 | pgq.StaticMetaInjector(pgq.Metadata{"host": "localhost"}),
45 | ),
46 | },
47 | want: want{
48 | metadata: pgtype.JSONB{Bytes: []byte(`{"host": "localhost", "test": "test_value"}`), Status: pgtype.Present},
49 | payload: pgtype.JSONB{Bytes: []byte(`{"foo": "bar"}`), Status: pgtype.Present},
50 | },
51 | wantErr: false,
52 | },
53 | }
54 | for _, tt := range tests {
55 | t.Run(tt.name, func(t *testing.T) {
56 | db := openDB(t)
57 | t.Cleanup(func() {
58 | err := db.Close()
59 | require.NoError(t, err)
60 | })
61 | queueName := t.Name()
62 | _, _ = db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
63 | _, err := db.ExecContext(ctx, schema.GenerateCreateTableQuery(queueName))
64 | require.NoError(t, err)
65 | t.Cleanup(func() {
66 | _, err := db.ExecContext(ctx, schema.GenerateDropTableQuery(queueName))
67 | require.NoError(t, err)
68 | })
69 | d := pgq.NewPublisher(db, tt.publisherOpts...)
70 | msgIDs, err := d.Publish(ctx, queueName, tt.msg)
71 | if tt.wantErr {
72 | require.Error(t, err)
73 | return
74 | }
75 | require.Equal(t, 1, len(msgIDs))
76 | require.NoError(t, err)
77 | row := db.QueryRowContext(ctx,
78 | fmt.Sprintf(
79 | "SELECT id, metadata, payload FROM %s WHERE id = $1",
80 | pgutils.QuoteIdentifier(queueName),
81 | ),
82 | msgIDs[0],
83 | )
84 | var (
85 | id pgtype.UUID
86 | metadata pgtype.JSONB
87 | payload pgtype.JSONB
88 | )
89 | err = row.Scan(&id, &metadata, &payload)
90 | require.NoError(t, err)
91 | require.Equal(t, [16]byte(msgIDs[0]), id.Bytes)
92 | require.Equal(t, tt.want.metadata.Status, metadata.Status)
93 | require.Equal(t, string(tt.want.metadata.Bytes), string(metadata.Bytes))
94 | require.Equal(t, tt.want.payload.Status, payload.Status)
95 | require.Equal(t, string(tt.want.payload.Bytes), string(payload.Bytes))
96 | })
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/internal/pg/pg.go:
--------------------------------------------------------------------------------
1 | package pg
2 |
3 | import (
4 | "strconv"
5 | "strings"
6 | )
7 |
8 | // StmtParams is a helper for generating prepared statement parameters,
9 | // i.e. $1, $2, $3, ...
10 | type StmtParams struct {
11 | counter int
12 | }
13 |
14 | // Next returns next parameter.
15 | func (p *StmtParams) Next() string {
16 | p.counter++
17 | return "$" + strconv.Itoa(p.counter)
18 | }
19 |
20 | // QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
21 | // used as part of an SQL statement. For example:
22 | //
23 | // tblname := "my_table"
24 | // data := "my_data"
25 | // quoted := pq.QuoteIdentifier(tblname)
26 | // err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
27 | //
28 | // Any double quotes in name will be escaped. The quoted identifier will be
29 | // case-sensitive when used in a query. If the input string contains a zero byte,
30 | // the result will be truncated immediately before it.
31 | //
32 | // It's a copy of the function from github.com/lib/pq.
33 | func QuoteIdentifier(name string) string {
34 | end := strings.IndexRune(name, 0)
35 | if end > -1 {
36 | name = name[:end]
37 | }
38 | return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
39 | }
40 |
--------------------------------------------------------------------------------
/internal/query/query_builder.go:
--------------------------------------------------------------------------------
1 | package query
2 |
3 | import (
4 | "fmt"
5 | "regexp"
6 | "strings"
7 | )
8 |
9 | // Builder is a helper for building SQL queries with named parameters (jmoiron/sqlx and psql style for the moment)
10 | type Builder struct {
11 | query strings.Builder
12 | params []string
13 | }
14 |
15 | var (
16 | tagRe = regexp.MustCompile(`:{1,2}(\w+)`)
17 | )
18 |
19 | func NewBuilder() *Builder {
20 | return &Builder{}
21 | }
22 |
23 | // WriteString appends the provided string to the query and extracts any parameters from it.
24 | func (qb *Builder) WriteString(part string) {
25 | params := getParams(part)
26 |
27 | qb.params = append(qb.params, params...)
28 |
29 | qb.query.WriteString(part)
30 | }
31 |
32 | // HasParam checks whether the Builder has a parameter of the given name.
33 | func (qb *Builder) HasParam(name string) bool {
34 | for _, paramName := range qb.params {
35 | if paramName == name {
36 | return true
37 | }
38 | }
39 | return false
40 | }
41 |
42 | func (qb *Builder) String() string {
43 | return qb.query.String()
44 | }
45 |
46 | // Build returns the query string with the parameters replaced by the values from the provided map.
47 | func (qb *Builder) Build(params map[string]interface{}) (string, error) {
48 | // Validate that the params map includes all parameter names from qb.params
49 | for _, paramName := range qb.params {
50 | if _, exists := params[paramName]; !exists {
51 | return "", fmt.Errorf("missing parameter: %s", paramName)
52 | }
53 | }
54 |
55 | queryString := qb.String()
56 |
57 | return queryString, nil
58 | }
59 |
60 | // getParams extracts tags formatted as :tagName from the provided line, ignoring type casting patterns like ::interval.
61 | func getParams(line string) []string {
62 |
63 | matches := tagRe.FindAllStringSubmatch(line, -1)
64 | if matches == nil {
65 | return nil
66 | }
67 |
68 | var params []string
69 | for _, match := range matches {
70 | // Check the preceding character(s) in the match to filter out type casting
71 | if len(match) > 0 && !strings.HasPrefix(match[0], "::") {
72 | params = append(params, match[1])
73 | }
74 | }
75 | return params
76 | }
77 |
--------------------------------------------------------------------------------
/internal/query/query_builder_test.go:
--------------------------------------------------------------------------------
1 | package query
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | func TestGetTags(t *testing.T) {
9 | tests := []struct {
10 | name string
11 | line string
12 | expected []string
13 | }{
14 | {
15 | name: "Single Tag",
16 | line: "Here's a tag:tagName",
17 | expected: []string{"tagName"},
18 | },
19 | {
20 | name: "Multiple Tags No Spaces",
21 | line: "Multiple :tag1:tag2:tag3",
22 | expected: []string{"tag1", "tag2", "tag3"},
23 | },
24 | {
25 | name: "Invalid Tag With Space",
26 | line: "This should not be a tag: tagWithSpace",
27 | expected: nil, // ": tagWithSpace" is not valid due to the space
28 | },
29 | {
30 | name: "Tags With Mixed Characters",
31 | line: "Tags with numbers :tag123 and underscores:tag_name",
32 | expected: []string{"tag123", "tag_name"},
33 | },
34 | {
35 | name: "Mixed Valid and Invalid Tags",
36 | line: "Valid:tag1 and invalid: tag2",
37 | expected: []string{"tag1"},
38 | },
39 | }
40 |
41 | for _, tt := range tests {
42 | t.Run(tt.name, func(t *testing.T) {
43 | result := getParams(tt.line)
44 | if !reflect.DeepEqual(result, tt.expected) {
45 | t.Errorf("getTags(%q) got %v, want %v", tt.line, result, tt.expected)
46 | }
47 | })
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/internal/require/require.go:
--------------------------------------------------------------------------------
1 | // Package require is a minimal alternative to github.com/stretchr/testify/require.
2 | package require
3 |
4 | import (
5 | "errors"
6 | "reflect"
7 | "testing"
8 | )
9 |
10 | // NoError fails the test if err is not nil.
11 | func NoError(t testing.TB, err error) {
12 | t.Helper()
13 | if err != nil {
14 | t.Fatalf("expected no error, got %v", err)
15 | }
16 | }
17 |
18 | // Error fails the test if err is nil.
19 | func Error(t testing.TB, err error) {
20 | t.Helper()
21 | if err == nil {
22 | t.Fatal("expected error, got nil")
23 | }
24 | }
25 |
26 | // ErrorIs fails the test if err is nil or does not match target.
27 | func ErrorIs(t testing.TB, err error, target error) {
28 | t.Helper()
29 | if !errors.Is(err, target) {
30 | t.Fatalf("expected error %v, got %v", target, err)
31 | }
32 | }
33 |
34 | // Equal fails the test if expected is not equal to actual.
35 | func Equal(t testing.TB, expected interface{}, actual interface{}) {
36 | t.Helper()
37 | if !reflect.DeepEqual(expected, actual) {
38 | t.Fatalf("expected:\n\t%[1]T(%#[1]v)\ngot:\n\t%[2]T(%#[2]v)", expected, actual)
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dataddo/pgq/c8b263b44bb7de8b00e44045008e5e81ed0e6a78/logo.png
--------------------------------------------------------------------------------
/message.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "reflect"
8 | "strings"
9 | "sync"
10 | "time"
11 |
12 | "github.com/google/uuid"
13 | "github.com/pkg/errors"
14 | )
15 |
16 | // Metadata is message Metadata definition.
17 | type Metadata map[string]string
18 |
19 | var (
20 | fieldCountPerMessageOutgoing int
21 | dbFieldsPerMessageOutgoing []string
22 | dbFieldsString string
23 | )
24 |
25 | func init() {
26 | var err error
27 |
28 | // Count fields in MessageOutgoing struct. Used for dynamically building the
29 | // queries
30 | t := reflect.TypeOf(MessageOutgoing{})
31 | fieldCountPerMessageOutgoing = t.NumField()
32 |
33 | // Get field names in MessageOutgoing struct. Used for dynamically building the
34 | // queries
35 | dbFieldsPerMessageOutgoing, err = buildColumnListFromTags(MessageOutgoing{})
36 | if err != nil {
37 | panic(err)
38 | }
39 |
40 | dbFieldsString = strings.Join(dbFieldsPerMessageOutgoing, ", ")
41 | }
42 |
43 | // NewMessage creates new message that satisfies Message interface.
44 | func NewMessage(meta Metadata, payload json.RawMessage, attempt int, maxConsumedCount uint) *MessageIncoming {
45 | return &MessageIncoming{
46 | Metadata: meta,
47 | Payload: payload,
48 | Attempt: attempt,
49 | maxConsumedCount: maxConsumedCount,
50 | }
51 | }
52 |
53 | // MessageOutgoing is a record to be inserted into table queue in Postgres
54 | type MessageOutgoing struct {
55 | // ScheduledFor is the time when the message should be processed. If nil, the
56 | // messages gets processed immediately.
57 | ScheduledFor *time.Time `db:"scheduled_for"`
58 | // Payload is the message's Payload.
59 | Payload json.RawMessage `db:"payload"`
60 | // Metadata contains the message Metadata.
61 | Metadata Metadata `db:"metadata"`
62 | }
63 |
64 | // MessageIncoming is a record retrieved from table queue in Postgres
65 | type MessageIncoming struct {
66 | // id is a unique identifier of message
67 | id uuid.UUID
68 | // Metadata contains the message Metadata.
69 | Metadata Metadata
70 | // Payload is the message's Payload.
71 | Payload json.RawMessage
72 | // Attempt number, counts from 1. It is incremented every time the message is
73 | // consumed.
74 | Attempt int
75 | // Deadline is the time when the message will be returned to the queue if not
76 | // finished. It is set by the queue when the message is consumed.
77 | Deadline time.Time
78 | cancelCtx context.CancelFunc
79 |
80 | maxConsumedCount uint
81 | // once ensures that the message will be finished only once. It's easier than
82 | // complicated SQL queries.
83 | once sync.Once
84 | ackFn func(ctx context.Context) error
85 | nackFn func(context.Context, string) error
86 | discardFn func(context.Context, string) error
87 | updateLockedUntilFn func(context.Context, time.Time) error
88 | }
89 |
90 | // LastAttempt returns true if the message is consumed for the last time
91 | // according to maxConsumedCount settings. If the Consumer is not configured to
92 | // limit the number of attempts setting WithMaxConsumeCount to zero, it always
93 | // returns false.
94 | func (m *MessageIncoming) LastAttempt() bool {
95 | if m.maxConsumedCount == 0 {
96 | return false
97 | }
98 | return m.Attempt >= int(m.maxConsumedCount)
99 | }
100 |
101 | // SetTimeout sets the message timeout. If the timeout is after the message
102 | // deadline, it returns ErrInvalidDeadline. The timeout also affects the queue
103 | // lock on the message. Be beware, that the default Deadline is calculated as:
104 | //
105 | // LockedUntil - (AckTimeout + MessageProcessingReserveDuration)
106 | func (m *MessageIncoming) SetTimeout(ctx context.Context, timeout time.Duration) (context.Context, error) {
107 | return m.SetDeadline(ctx, time.Now().Add(timeout))
108 | }
109 |
110 | // ErrInvalidDeadline is returned when the deadline is after the message deadline.
111 | var ErrInvalidDeadline = errors.New("deadline can't be prolonged")
112 |
113 | // SetDeadline sets the message deadline. If the deadline is after the message
114 | // deadline, it returns ErrInvalidDeadline. The timeout also affects the queue
115 | // lock on the message. Be beware, that the default Deadline is calculated as:
116 | //
117 | // LockedUntil - (AckTimeout + MessageProcessingReserveDuration)
118 | func (m *MessageIncoming) SetDeadline(ctx context.Context, deadline time.Time) (context.Context, error) {
119 | if deadline.After(m.Deadline) {
120 | return nil, ErrInvalidDeadline
121 | }
122 | ctx, cancel := context.WithDeadline(ctx, deadline)
123 | if err := m.updateLockedUntilFn(ctx, deadline); err != nil {
124 | cancel()
125 | return nil, errors.Wrap(err, "setting deadline in the database")
126 | }
127 | m.Deadline = deadline
128 | m.cancelCtx = cancel
129 | return ctx, nil
130 | }
131 |
132 | // errMessageAlreadyFinished is error that is returned if message is being
133 | // finished for second time. Example: when trying to ack after the nack has been
134 | // already called.
135 | var errMessageAlreadyFinished = errors.New("message already finished")
136 |
137 | // ack positively acknowledges the message, and the message is marked as processed.
138 | func (m *MessageIncoming) ack(ctx context.Context) error {
139 | err := errMessageAlreadyFinished
140 | m.once.Do(func() {
141 | err = m.ackFn(ctx)
142 | m.cancelCtx()
143 | })
144 | return err
145 | }
146 |
147 | // nack does not the negative acknowledge of the message. The message is
148 | // returned to the queue after nack and may be processed again.
149 | func (m *MessageIncoming) nack(ctx context.Context, reason string) error {
150 | err := errMessageAlreadyFinished
151 | m.once.Do(func() {
152 | err = m.nackFn(ctx, reason)
153 | m.cancelCtx()
154 | })
155 | return err
156 | }
157 |
158 | // discard removes the message from the queue completely. It's like ack, but it
159 | // also records the reason why the message was discarded.
160 | func (m *MessageIncoming) discard(ctx context.Context, reason string) error {
161 | err := errMessageAlreadyFinished
162 | m.once.Do(func() {
163 | err = m.discardFn(ctx, reason)
164 | m.cancelCtx()
165 | })
166 | return err
167 | }
168 |
169 | // buildColumnListFromTags dynamically constructs a list of column names based
170 | // on the `db` struct tags of any given struct. It returns a slice of strings
171 | // containing the column names.
172 | func buildColumnListFromTags(data interface{}) ([]string, error) {
173 | // Ensure that 'data' is a struct
174 | t := reflect.TypeOf(data)
175 | if t.Kind() != reflect.Struct {
176 | return nil, fmt.Errorf("provided argument is not a struct")
177 | }
178 |
179 | var columns []string
180 | for i := 0; i < t.NumField(); i++ {
181 | field := t.Field(i)
182 | dbTag := field.Tag.Get("db") // Get the value of the `db` tag
183 | if dbTag != "" {
184 | columns = append(columns, dbTag)
185 | }
186 | }
187 |
188 | return columns, nil
189 | }
190 |
--------------------------------------------------------------------------------
/message_test.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "testing"
6 | "time"
7 |
8 | "go.dataddo.com/pgq/internal/require"
9 | )
10 |
11 | func TestMessageIncoming_LastAttempt(t *testing.T) {
12 | type fields struct {
13 | Attempt int
14 | maxConsumedCount uint
15 | }
16 | tests := []struct {
17 | name string
18 | fields fields
19 | want bool
20 | }{
21 | {
22 | name: "maxConsumedCount is 0",
23 | fields: fields{
24 | maxConsumedCount: 0,
25 | },
26 | want: false,
27 | },
28 | {
29 | name: "Attempt is less than maxConsumedCount",
30 | fields: fields{
31 | Attempt: 1,
32 | maxConsumedCount: 2,
33 | },
34 | want: false,
35 | },
36 | {
37 | name: "Attempt is equal to maxConsumedCount",
38 | fields: fields{
39 | Attempt: 2,
40 | maxConsumedCount: 2,
41 | },
42 | want: true,
43 | },
44 | {
45 | name: "Attempt is greater than maxConsumedCount",
46 | fields: fields{
47 | Attempt: 3,
48 | maxConsumedCount: 2,
49 | },
50 | want: true,
51 | },
52 | }
53 | for _, tt := range tests {
54 | t.Run(tt.name, func(t *testing.T) {
55 | m := &MessageIncoming{
56 | Attempt: tt.fields.Attempt,
57 | maxConsumedCount: tt.fields.maxConsumedCount,
58 | }
59 | if got := m.LastAttempt(); got != tt.want {
60 | t.Errorf("LastAttempt() = %v, want %v", got, tt.want)
61 | }
62 | })
63 | }
64 | }
65 |
66 | func TestMessageIncoming_SetDeadline(t *testing.T) {
67 | m := &MessageIncoming{
68 | Deadline: time.Date(9999, 0, 0, 0, 0, 0, 0, time.UTC),
69 | updateLockedUntilFn: func(ctx context.Context, t time.Time) error {
70 | return nil
71 | },
72 | }
73 | ctx := context.Background()
74 | ctx, err := m.SetDeadline(ctx, time.Now().Add(time.Second))
75 | require.NoError(t, err)
76 | ctx, err = m.SetDeadline(ctx, time.Now())
77 | require.NoError(t, err)
78 | ctx, err = m.SetDeadline(ctx, time.Now())
79 | require.Error(t, err)
80 | }
81 |
--------------------------------------------------------------------------------
/publisher.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | stderrors "errors"
7 | "maps"
8 | "strings"
9 |
10 | "github.com/google/uuid"
11 | "github.com/jackc/pgtype"
12 | "github.com/jmoiron/sqlx"
13 | "github.com/pkg/errors"
14 |
15 | "go.dataddo.com/pgq/internal/pg"
16 | )
17 |
18 | type publisher struct {
19 | db *sqlx.DB
20 | cfg publisherConfig
21 | }
22 |
23 | // Publisher publishes messages to Postgres queue.
24 | type Publisher interface {
25 | Publish(ctx context.Context, queue string, msg ...*MessageOutgoing) ([]uuid.UUID, error)
26 | }
27 |
28 | type publisherConfig struct {
29 | metaInjectors []func(context.Context, Metadata)
30 | }
31 |
32 | // PublisherOption configures the publisher. Multiple options can be passed to
33 | // NewPublisher. Options are applied in the order they are given. The last option
34 | // overrides any previous ones. If no options are passed to NewPublisher, the
35 | // default values are used.
36 | type PublisherOption func(*publisherConfig)
37 |
38 | // WithMetaInjectors adds Metadata injectors to the publisher. Injectors are run in the order they are given.
39 | func WithMetaInjectors(injectors ...func(context.Context, Metadata)) PublisherOption {
40 | return func(c *publisherConfig) {
41 | c.metaInjectors = append(c.metaInjectors, injectors...)
42 | }
43 | }
44 |
45 | // StaticMetaInjector returns a Metadata injector that injects given Metadata.
46 | func StaticMetaInjector(m Metadata) func(context.Context, Metadata) {
47 | staticMetadata := maps.Clone(m)
48 | return func(_ context.Context, metadata Metadata) {
49 | maps.Copy(metadata, staticMetadata)
50 | }
51 | }
52 |
53 | // NewPublisher initializes the publisher with given *sql.DB client.
54 | func NewPublisher(db *sql.DB, opts ...PublisherOption) Publisher {
55 | return NewPublisherExt(sqlx.NewDb(db, "pgx"), opts...)
56 | }
57 |
58 | // NewPublisher initializes the publisher with given *sqlx.DB client
59 | func NewPublisherExt(db *sqlx.DB, opts ...PublisherOption) Publisher {
60 | cfg := publisherConfig{}
61 | for _, opt := range opts {
62 | opt(&cfg)
63 | }
64 | return &publisher{db: db, cfg: cfg}
65 | }
66 |
67 | // Publish publishes the message.
68 | func (d *publisher) Publish(ctx context.Context, queue string, msgs ...*MessageOutgoing) (ids []uuid.UUID, err error) {
69 | if len(msgs) < 1 {
70 | return []uuid.UUID{}, nil
71 | }
72 | query := buildInsertQuery(queue, len(msgs))
73 | args := d.buildArgs(ctx, msgs)
74 | // transaction is used to have secured read of query result.
75 | tx, err := d.db.BeginTx(ctx, nil)
76 | if err != nil {
77 | return nil, errors.Wrap(err, "couldn't start transaction")
78 | }
79 | defer func() {
80 | r := recover()
81 | rErr := tx.Rollback()
82 | if rErr != nil && !errors.Is(rErr, sql.ErrTxDone) {
83 | if err != nil {
84 | // this is tricky, but we want to return both errors
85 | err = stderrors.Join(err, rErr)
86 | } else {
87 | err = rErr
88 | }
89 | }
90 | if r != nil {
91 | panic(r)
92 | }
93 | }()
94 |
95 | rows, err := tx.QueryContext(ctx, query, args...)
96 | if err != nil {
97 | return nil, errors.WithStack(err)
98 | }
99 | defer rows.Close()
100 | ids = make([]uuid.UUID, 0, len(msgs))
101 | for rows.Next() {
102 | var id pgtype.UUID
103 | if err := rows.Scan(&id); err != nil {
104 | return nil, errors.WithStack(err)
105 | }
106 | ids = append(ids, id.Bytes)
107 | }
108 | if err := rows.Err(); err != nil {
109 | return nil, errors.WithStack(err)
110 | }
111 | if err := tx.Commit(); err != nil {
112 | return nil, errors.WithStack(err)
113 | }
114 | return ids, nil
115 | }
116 |
117 | func buildInsertQuery(queue string, msgCount int) string {
118 | var sb strings.Builder
119 | sb.WriteString("INSERT INTO ")
120 | sb.WriteString(pg.QuoteIdentifier(queue))
121 | sb.WriteString(" (")
122 | sb.WriteString(dbFieldsString)
123 | sb.WriteString(") VALUES ")
124 | var params pg.StmtParams
125 | for rowIdx := 0; rowIdx < msgCount; rowIdx++ {
126 | if rowIdx != 0 {
127 | sb.WriteString(",")
128 | }
129 | sb.WriteString("(")
130 | sb.WriteString(params.Next())
131 | sb.WriteString(",")
132 | sb.WriteString(params.Next())
133 | sb.WriteString(",")
134 | sb.WriteString(params.Next())
135 | sb.WriteString(")")
136 | }
137 | sb.WriteString(` RETURNING "id"`)
138 | return sb.String()
139 | }
140 |
141 | func (d *publisher) buildArgs(ctx context.Context, msgs []*MessageOutgoing) []any {
142 | args := make([]any, 0, len(msgs)*fieldCountPerMessageOutgoing)
143 | for _, msg := range msgs {
144 | for _, injector := range d.cfg.metaInjectors {
145 | injector(ctx, msg.Metadata)
146 | }
147 | args = append(args, msg.ScheduledFor, msg.Payload, msg.Metadata)
148 | }
149 | return args
150 | }
151 |
--------------------------------------------------------------------------------
/publisher_test.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "testing"
7 | "time"
8 |
9 | "go.dataddo.com/pgq/internal/require"
10 | )
11 |
12 | func Test_buildInsertQuery(t *testing.T) {
13 | type args struct {
14 | queue string
15 | msgCount int
16 | }
17 | tests := []struct {
18 | name string
19 | args args
20 | want string
21 | }{
22 | {
23 | name: "single message",
24 | args: args{
25 | queue: "queue",
26 | msgCount: 1,
27 | },
28 | want: `INSERT INTO "queue" (scheduled_for, payload, metadata) VALUES ($1,$2,$3) RETURNING "id"`,
29 | },
30 | {
31 | name: "multiple messages",
32 | args: args{
33 | queue: "queue",
34 | msgCount: 3,
35 | },
36 | want: `INSERT INTO "queue" (scheduled_for, payload, metadata) VALUES ($1,$2,$3),($4,$5,$6),($7,$8,$9) RETURNING "id"`,
37 | },
38 | }
39 | for _, tt := range tests {
40 | t.Run(tt.name, func(t *testing.T) {
41 | if got := buildInsertQuery(tt.args.queue, tt.args.msgCount); got != tt.want {
42 | t.Errorf("buildInsertQuery() = %v, want %v", got, tt.want)
43 | }
44 | })
45 | }
46 | }
47 |
48 | func TestClient_buildArgs(t *testing.T) {
49 | var ScheduledForTime time.Time
50 |
51 | type args struct {
52 | ctx context.Context
53 | msgs []*MessageOutgoing
54 | }
55 | tests := []struct {
56 | name string
57 | args args
58 | want []any
59 | }{
60 | {
61 | name: "",
62 | args: args{
63 | ctx: context.Background(),
64 | msgs: []*MessageOutgoing{
65 | {ScheduledFor: &ScheduledForTime, Metadata: Metadata{}, Payload: nil},
66 | },
67 | },
68 | want: []any{
69 | &ScheduledForTime,
70 | json.RawMessage(nil),
71 | Metadata{
72 | "foo": "bar",
73 | },
74 | },
75 | },
76 | {
77 | name: "",
78 | args: args{
79 | ctx: context.Background(),
80 | msgs: []*MessageOutgoing{
81 | {ScheduledFor: &ScheduledForTime, Metadata: Metadata{}, Payload: nil},
82 | },
83 | },
84 | want: []any{
85 | &ScheduledForTime,
86 | json.RawMessage(nil),
87 | Metadata{
88 | "foo": "bar",
89 | },
90 | },
91 | },
92 | }
93 | for _, tt := range tests {
94 | t.Run(tt.name, func(t *testing.T) {
95 | p := NewPublisher(nil, WithMetaInjectors(
96 | StaticMetaInjector(Metadata{"foo": "bar"}),
97 | ))
98 | got := p.(*publisher).buildArgs(tt.args.ctx, tt.args.msgs)
99 | require.Equal(t, tt.want, got)
100 | })
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/validator.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/jmoiron/sqlx"
7 | "github.com/pkg/errors"
8 | )
9 |
10 | const columnSelect = `SELECT column_name
11 | FROM information_schema.columns
12 | WHERE table_catalog = CURRENT_CATALOG
13 | AND table_schema = CURRENT_SCHEMA
14 | AND table_name = $1
15 | ORDER BY ordinal_position
16 | `
17 |
18 | const indexSelect = `
19 | SELECT
20 | COUNT(DISTINCT a.attname) >= 2 AS index_exists
21 | FROM
22 | pg_class t
23 | JOIN
24 | pg_index ix ON t.oid = ix.indrelid
25 | JOIN
26 | pg_class i ON i.oid = ix.indexrelid
27 | JOIN
28 | pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey)
29 | WHERE
30 | t.relkind IN ('r', 'p')
31 | AND t.relname = $1
32 | AND a.attname = ANY($2)
33 | AND ix.indisvalid;
34 | `
35 |
36 | var mandatoryFields = []string{
37 | "id",
38 | "locked_until",
39 | "processed_at",
40 | "scheduled_for",
41 | "consumed_count",
42 | "started_at",
43 | "payload",
44 | "metadata",
45 | }
46 |
47 | // A list of all the indexes that the queue should have. Each slice entrance is a slice itself
48 | // that contains the fields that are used to create each index.
49 | var mandatoryIndexes = []string{
50 | "created_at",
51 | "processed_at",
52 | }
53 |
54 | // ValidateFields checks if required fields exist
55 | func ValidateFields(ctx context.Context, db *sqlx.DB, queueName string) error {
56 | // --- (1) ----
57 | // Recover the columns that the queue has
58 | columns, err := getColumnData(ctx, db, queueName)
59 | if err != nil {
60 | return err
61 | }
62 |
63 | // --- (2) ----
64 | // Run through each one of the recovered columns and validate if all the mandatory ones are included
65 | var missingColumns []string
66 | for _, mandatoryField := range mandatoryFields {
67 | if _, ok := columns[mandatoryField]; !ok {
68 | missingColumns = append(missingColumns, mandatoryField)
69 | }
70 | delete(columns, mandatoryField)
71 | }
72 |
73 | // If all the mandatory fields have been found then we don't need to return an error. However,
74 | // if there is at least one mandatory field missing in the schema then this queue is invalid.
75 | // TODO: Add some more logic to maybe indicate which field is the one that need to be included
76 | if len(missingColumns) > 1 {
77 | return errors.Errorf("some PGQ columns are missing: %v", missingColumns)
78 | }
79 |
80 | // TODO log extra columns in queue table or ignore them?
81 | // extraColumns := make([]string, 0, len(columns))
82 | // for k := range columns {
83 | // extraColumns = append(extraColumns, k)
84 | // }
85 | // _ = extraColumns
86 |
87 | return nil
88 | }
89 |
90 | // ValidateIndexes checks if required indexes exist
91 | func ValidateIndexes(ctx context.Context, db *sqlx.DB, queueName string) error {
92 | found, err := checkIndexData(ctx, db, queueName)
93 | if err != nil {
94 | return err
95 | }
96 |
97 | // Check if we found all the mandatory indexes were found. If even 1 is missing, then we return an error
98 | if !found {
99 | return errors.Errorf("some PGQ indexes are missing or invalid")
100 | }
101 | return nil
102 | }
103 |
104 | func getColumnData(ctx context.Context, db *sqlx.DB, queueName string) (map[string]struct{}, error) {
105 | rows, err := db.QueryContext(ctx, columnSelect, queueName)
106 | if err != nil {
107 | return nil, errors.Wrap(err, "querying schema of queue table")
108 | }
109 | defer func() { _ = rows.Close() }()
110 |
111 | columns := make(map[string]struct{})
112 | for rows.Next() {
113 | var s string
114 | if err := rows.Scan(&s); err != nil {
115 | return nil, errors.Wrap(err, "reading schema row of queue table")
116 | }
117 | columns[s] = struct{}{}
118 | }
119 | if err := rows.Err(); err != nil {
120 | return nil, errors.Wrap(err, "reading schema of queue table")
121 | }
122 | return columns, nil
123 | }
124 |
125 | func checkIndexData(ctx context.Context, db *sqlx.DB, queueName string) (bool, error) {
126 | rows, err := db.QueryContext(ctx, indexSelect, queueName, mandatoryIndexes)
127 | if err != nil {
128 | return false, errors.Wrap(err, "querying index schema of queue table")
129 | }
130 | defer func() { _ = rows.Close() }()
131 |
132 | var allMandatoryColumnsAreIndexed bool
133 | for rows.Next() {
134 | if err := rows.Scan(&allMandatoryColumnsAreIndexed); err != nil {
135 | return false, errors.Wrap(err, "reading index schema row of queue table")
136 | }
137 | }
138 | if err := rows.Err(); err != nil {
139 | return false, errors.Wrap(err, "reading index schema of queue table")
140 | }
141 | if err := rows.Close(); err != nil {
142 | return false, errors.Wrap(err, "closing index schema query of queue table")
143 | }
144 | return allMandatoryColumnsAreIndexed, nil
145 | }
146 |
--------------------------------------------------------------------------------
/validator_test.go:
--------------------------------------------------------------------------------
1 | package pgq
2 |
3 | import (
4 | "context"
5 | "crypto/rand"
6 | "encoding/base64"
7 | "fmt"
8 | "os"
9 | "testing"
10 |
11 | "go.dataddo.com/pgq/internal/pg"
12 | "go.dataddo.com/pgq/internal/require"
13 |
14 | _ "github.com/jackc/pgx/v4/stdlib"
15 | "github.com/jmoiron/sqlx"
16 | )
17 |
18 | func TestValidator_ValidateFieldsCorrectSchema(t *testing.T) {
19 | // --- (1) ----
20 | // Arrange
21 | ctx := context.Background()
22 | db := openDB(t)
23 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
24 | t.Cleanup(func() {
25 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
26 | require.NoError(t, err)
27 | })
28 |
29 | // Create the new queue
30 | _, err := db.ExecContext(ctx, generateCreateTableQuery(queueName))
31 | require.NoError(t, err)
32 |
33 | // --- (2) ----
34 | // Act: Validate queue
35 | err = ValidateFields(ctx, db, queueName)
36 |
37 | // Assert
38 | require.NoError(t, err)
39 | }
40 |
41 | func TestValidator_ValidateFieldsCorrectSchemaPartitionedTable(t *testing.T) {
42 | // --- (1) ----
43 | // Arrange
44 | ctx := context.Background()
45 | db := openDB(t)
46 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
47 | t.Cleanup(func() {
48 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
49 | require.NoError(t, err)
50 | })
51 |
52 | // Create the new queue
53 | _, err := db.ExecContext(ctx, generateCreateTablePartitionedQuery(queueName))
54 | require.NoError(t, err)
55 |
56 | // --- (2) ----
57 | // Act: Validate queue
58 | err = ValidateFields(ctx, db, queueName)
59 |
60 | // Assert
61 | require.NoError(t, err)
62 | }
63 |
64 | func TestValidator_ValidateFieldsIncorrectSchema(t *testing.T) {
65 | // --- (1) ----
66 | // Arrange
67 | ctx := context.Background()
68 | db := openDB(t)
69 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
70 | t.Cleanup(func() {
71 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
72 | require.NoError(t, err)
73 | })
74 | // Create the new incorrect queue
75 | _, err := db.ExecContext(ctx, generateInvalidQueueQuery(queueName))
76 | require.NoError(t, err)
77 |
78 | // --- (2) ----
79 | // Act: Validate queue
80 | err = ValidateFields(ctx, db, queueName)
81 |
82 | // Assert
83 | require.Error(t, err)
84 | }
85 |
86 | func TestValidator_ValidateIndexesCorrectSchema(t *testing.T) {
87 | // --- (1) ----
88 | // Arrange
89 | ctx := context.Background()
90 | db := openDB(t)
91 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
92 | t.Cleanup(func() {
93 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
94 | require.NoError(t, err)
95 | })
96 |
97 | // Create the new queue
98 | _, err := db.ExecContext(ctx, generateCreateTableQuery(queueName))
99 | require.NoError(t, err)
100 |
101 | // --- (2) ----
102 | // Act: Validate queue
103 | err = ValidateIndexes(ctx, db, queueName)
104 |
105 | // Assert
106 | require.NoError(t, err)
107 | }
108 |
109 | func TestValidator_ValidateIndexesCorrectSchema_CompositeIndexes(t *testing.T) {
110 | // --- (1) ----
111 | // Arrange
112 | ctx := context.Background()
113 | db := openDB(t)
114 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
115 | t.Cleanup(func() {
116 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
117 | require.NoError(t, err)
118 | })
119 | // Create the new queue
120 | _, err := db.ExecContext(ctx, generateCreateTableQueryCompositeIndex(queueName))
121 | require.NoError(t, err)
122 |
123 | // --- (2) ----
124 | // Act: Validate queue
125 | err = ValidateIndexes(ctx, db, queueName)
126 |
127 | // Assert
128 | require.NoError(t, err)
129 | }
130 |
131 | func TestValidator_ValidateIndexesIncorrectSchema(t *testing.T) {
132 | // --- (1) ----
133 | // Arrange
134 | ctx := context.Background()
135 | db := openDB(t)
136 | queueName := fmt.Sprintf("TestQueue_%s", generateRandomString(10))
137 | t.Cleanup(func() {
138 | _, err := db.ExecContext(ctx, generateDropTableQuery(queueName))
139 | require.NoError(t, err)
140 | })
141 | // Create the new incorrect queue
142 | _, err := db.ExecContext(ctx, generateInvalidQueueQuery(queueName))
143 | require.NoError(t, err)
144 |
145 | // --- (2) ----
146 | // Act: Validate queue
147 | err = ValidateIndexes(ctx, db, queueName)
148 |
149 | // Assert
150 | require.Error(t, err)
151 | }
152 |
153 | // TODO: This was recovered from the consumer_test.go file. We can make a common testing package and add all these common
154 | // functionalities will be included
155 | func openDB(t *testing.T) *sqlx.DB {
156 | dsn, ok := os.LookupEnv("TEST_POSTGRES_DSN")
157 | if !ok {
158 | t.Skip("Skipping integration test, TEST_POSTGRES_DSN is not set")
159 | }
160 | db, err := sqlx.Open("pgx", dsn)
161 | require.NoError(t, err)
162 | t.Cleanup(func() {
163 | err := db.Close()
164 | require.NoError(t, err)
165 | })
166 | ensureUUIDExtension(t, db)
167 | return db
168 | }
169 |
170 | func ensureUUIDExtension(t *testing.T, db *sqlx.DB) {
171 | _, err := db.Exec(`
172 | DO $$
173 | BEGIN
174 | IF current_setting('server_version_num')::int < 130000 THEN
175 | -- If PostgreSQL version is less than 13, enable pgcrypto
176 | CREATE EXTENSION IF NOT EXISTS pgcrypto;
177 | END IF;
178 | END $$;
179 | `)
180 | require.NoError(t, err)
181 | }
182 |
183 | func generateRandomString(length int) string {
184 | b := make([]byte, length)
185 | _, err := rand.Read(b)
186 | if err != nil {
187 | panic(err)
188 | }
189 | return base64.StdEncoding.EncodeToString(b)
190 | }
191 |
192 | func generateInvalidQueueQuery(queueName string) string {
193 | quotedTableName := pg.QuoteIdentifier(queueName)
194 | return fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %[1]s
195 | (
196 | id UUID DEFAULT gen_random_uuid() NOT NULL PRIMARY KEY,
197 | created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
198 | started_at TIMESTAMPTZ NULL,
199 | description TEXT NULL,
200 | name TEXT NULL
201 | );
202 | `, quotedTableName, quotedTableName[1:len(quotedTableName)-1])
203 | }
204 |
205 | func generateCreateTableQuery(queueName string) string {
206 | quotedTableName := pg.QuoteIdentifier(queueName)
207 | return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %[1]s
208 | (
209 | id UUID DEFAULT gen_random_uuid() NOT NULL PRIMARY KEY,
210 | created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
211 | started_at TIMESTAMPTZ NULL,
212 | locked_until TIMESTAMPTZ NULL,
213 | processed_at TIMESTAMPTZ NULL,
214 | consumed_count INTEGER DEFAULT 0 NOT NULL,
215 | error_detail TEXT NULL,
216 | payload JSONB NOT NULL,
217 | metadata JSONB NOT NULL
218 | );
219 | CREATE INDEX IF NOT EXISTS "%[2]s_created_at_idx" ON %[1]s (created_at);
220 | CREATE INDEX IF NOT EXISTS "%[2]s_processed_at_null_idx" ON %[1]s (processed_at) WHERE (processed_at IS NULL);
221 | `, quotedTableName, quotedTableName[1:len(quotedTableName)-1])
222 | }
223 |
224 | func generateCreateTableQueryCompositeIndex(queueName string) string {
225 | quotedTableName := pg.QuoteIdentifier(queueName)
226 | return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %[1]s
227 | (
228 | id UUID DEFAULT gen_random_uuid() NOT NULL PRIMARY KEY,
229 | created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
230 | started_at TIMESTAMPTZ NULL,
231 | locked_until TIMESTAMPTZ NULL,
232 | processed_at TIMESTAMPTZ NULL,
233 | consumed_count INTEGER DEFAULT 0 NOT NULL,
234 | error_detail TEXT NULL,
235 | payload JSONB NOT NULL,
236 | metadata JSONB NOT NULL
237 | );
238 | CREATE INDEX IF NOT EXISTS "%[2]s_created_at_idx" ON %[1]s (created_at);
239 | CREATE INDEX IF NOT EXISTS "%[2]s_processed_at_null_idx" ON %[1]s (consumed_count, processed_at) WHERE (processed_at IS NULL);
240 | `, quotedTableName, quotedTableName[1:len(quotedTableName)-1])
241 | }
242 |
243 | func generateCreateTablePartitionedQuery(queueName string) string {
244 | quotedTableName := pg.QuoteIdentifier(queueName)
245 | return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %[1]s
246 | (
247 | id UUID DEFAULT gen_random_uuid() NOT NULL,
248 | created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
249 | started_at TIMESTAMPTZ NULL,
250 | locked_until TIMESTAMPTZ NULL,
251 | processed_at TIMESTAMPTZ NULL,
252 | consumed_count INTEGER DEFAULT 0 NOT NULL,
253 | error_detail TEXT NULL,
254 | payload JSONB NOT NULL,
255 | metadata JSONB NOT NULL
256 | ) PARTITION BY RANGE (created_at);
257 | CREATE TABLE "%[2]s_y2024m02" PARTITION OF %[1]s FOR VALUES FROM ('2024-01-01') TO ('2024-02-01');
258 | CREATE INDEX IF NOT EXISTS "%[2]s_created_at_idx" ON %[1]s (created_at);
259 | CREATE INDEX IF NOT EXISTS "%[2]s_processed_at_null_idx" ON %[1]s (processed_at) WHERE (processed_at IS NULL);
260 | `, quotedTableName, quotedTableName[1:len(quotedTableName)-1])
261 | }
262 |
263 | func generateDropTableQuery(queueName string) string {
264 | quotedTableName := pg.QuoteIdentifier(queueName)
265 | return `DROP TABLE IF EXISTS ` + quotedTableName
266 | }
267 |
--------------------------------------------------------------------------------
/x/schema/queue.go:
--------------------------------------------------------------------------------
1 | // Package schema is a place where to put general functions and constants relevant to the postgres table schema and pgq setup
2 | package schema
3 |
4 | import (
5 | "fmt"
6 |
7 | "go.dataddo.com/pgq/internal/pg"
8 | )
9 |
10 | // GenerateCreateTableQuery returns the query for creating the queue table
11 | func GenerateCreateTableQuery(queueName string) string {
12 | quotedTableName := pg.QuoteIdentifier(queueName)
13 | return fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %[1]s
14 | (
15 | id UUID DEFAULT gen_random_uuid() NOT NULL PRIMARY KEY,
16 | created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP NOT NULL,
17 | started_at TIMESTAMPTZ NULL,
18 | locked_until TIMESTAMPTZ NULL,
19 | scheduled_for TIMESTAMPTZ NULL,
20 | processed_at TIMESTAMPTZ NULL,
21 | consumed_count INTEGER DEFAULT 0 NOT NULL,
22 | error_detail TEXT NULL,
23 | payload JSONB NOT NULL,
24 | metadata JSONB NOT NULL
25 | );
26 | CREATE INDEX IF NOT EXISTS "%[2]s_created_at_idx" ON %[1]s (created_at);
27 | CREATE INDEX IF NOT EXISTS "%[2]s_processed_at_null_idx" ON %[1]s (processed_at) WHERE (processed_at IS NULL);
28 | CREATE INDEX IF NOT EXISTS "%[2]s_scheduled_for_idx" ON %[1]s (scheduled_for ASC NULLS LAST) WHERE (processed_at IS NULL);
29 | CREATE INDEX IF NOT EXISTS "%[2]s_metadata_idx" ON %[1]s USING GIN(metadata) WHERE processed_at IS NULL;
30 | `, quotedTableName, quotedTableName[1:len(quotedTableName)-1])
31 | }
32 |
33 | // GenerateDropTableQuery returns a postgres query for dropping the queue table
34 | func GenerateDropTableQuery(queueName string) string {
35 | quotedTableName := pg.QuoteIdentifier(queueName)
36 | return `DROP TABLE IF EXISTS ` + quotedTableName
37 | }
38 |
--------------------------------------------------------------------------------