├── .github
└── workflows
│ └── test.yml
├── .gitignore
├── LICENSE
├── README.md
├── disk
└── disk.go
├── docs
├── Queue-Speed-With-Prioritize.png
├── Queue-Speed-Without-Prioritize.png
├── Time-to-Send-and-Receive-VS-Bucket-Count.png
├── bench-report-no-repro.csv
├── bench-report-repro.csv
└── gpq.png
├── ftime
├── ftime.go
└── helpers.go
├── go.mod
├── go.sum
├── gpq.go
├── gpq_base_test.go
├── gpq_e2e_test.go
├── gpq_helpers_test.go
├── gpq_parallel_test.go
├── graphs
└── graph.go
├── helpers.go
├── queues
├── cpq.go
├── gheap
│ └── gheap.go
└── pq.go
└── schema
└── schema.go
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Build Test
2 |
3 | on:
4 | push:
5 | branches:
6 | - "master"
7 | tags:
8 | - "v*"
9 | pull_request:
10 | branches:
11 | - "master"
12 |
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@v4
19 |
20 | - name: Setup Go 1.22
21 | uses: actions/setup-go@v5
22 | with:
23 | go-version: "1.22"
24 |
25 | - name: Test
26 | run: |
27 | sudo apt update -y
28 | sudo apt install -y golang git
29 | go test -v
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .history/*
2 | profile.mutex
3 | profile.pprof
4 | bench/
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Justin Timperio
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | GPQ is an extremely fast and flexible priority queue, capable of millions transactions a second. GPQ supports a complex "Double Priority Queue" which allows for priorities to be distributed across N buckets, with each bucket holding a second priority queue which allows for internal escalation and timeouts of items based on parameters the user can specify during submission combined with how frequently you ask GPQ to prioritize the queue.
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | ## Notice
19 | While GPQ is largely stable, bugs are more than likely present at this early stage, and you should carefully consider if your application can tolerate any down time or lost messages that may result from adopting this project into a production workflow. If you run into any bugs please submit an issue or better a PR! Check out the guide to contributing below.
20 |
21 |
22 | ## Table of Contents
23 | - [Notice](#notice)
24 | - [Table of Contents](#table-of-contents)
25 | - [Background](#background)
26 | - [Should I Use GPQ?](#should-i-use-gpq)
27 | - [Sister Projects](#sister-projects)
28 | - [Benchmarks](#benchmarks)
29 | - [Usage](#usage)
30 | - [Prerequisites](#prerequisites)
31 | - [API Reference](#api-reference)
32 | - [Submitting Items to the Queue](#submitting-items-to-the-queue)
33 | - [Contributing](#contributing)
34 | - [We Develop with Github](#we-develop-with-github)
35 | - [All Code Changes Happen Through Pull Requests](#all-code-changes-happen-through-pull-requests)
36 | - [Any contributions you make will be under the MIT Software License](#any-contributions-you-make-will-be-under-the-mit-software-license)
37 | - [Report bugs using Github's Issues](#report-bugs-using-githubs-issues)
38 | - [Write bug reports with detail, background, and sample code](#write-bug-reports-with-detail-background-and-sample-code)
39 | - [License](#license)
40 |
41 | ## Background
42 | GPQ was written as an experiment when I was playing with [Fibonacci Heaps](https://en.wikipedia.org/wiki/Fibonacci_heap) and wanted to find something faster. I was disappointed by the state of research and libraries being used by most common applications, so GPQ is meant to be a highly flexible framework that can support a multitude of workloads.
43 |
44 | ### Should I Use GPQ?
45 | GPQ is a concurrency safe, embeddable priority queue that can be used in a variety of applications. GPQ might be the right choice if:
46 | - Your data requires strict ordering guarantees
47 | - You need to prioritize items that are in the queue too long
48 | - You need to timeout items
49 | - You have multiple writers and readers that need to access the queue concurrently
50 | - You run critical workloads and need to store the queue on disk in case of a crash
51 |
52 | ### Sister Projects
53 | - [fibheap (Fibonacci Heaps)](https://github.com/JustinTimperio/fibheap)
54 | - [rpq (Rust Priority Queue)](https://github.com/JustinTimperio/rpq)
55 | - [gpq-server (GPQ Server)](https://github.com/JustinTimperio/gpq-server)
56 | - [pq-bench (Priority Queue Benchmarks)](https://github.com/JustinTimperio/pq-bench)
57 |
58 |
59 | ## Benchmarks
60 | Due to the fact that most operations are done in constant time `O(1)` or logarithmic time `O(log n)`, with the exception of the prioritize function which happens in linear time `O(n)`, all GPQ operations are extremely fast. A single GPQ can handle a few million transactions a second and can be tuned depending on your work load. I have included some basic benchmarks using C++, Rust, Zig, Python, and Go to measure GPQ's performance against the standard implementations of other languages that can be found here at: [pq-bench](https://github.com/JustinTimperio/pq-bench)
61 |
62 | | | |
63 | |-------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|
64 | |  |  |
65 | |  |  |
66 |
67 | ## Usage
68 | GPQ at the core is a embeddable priority queue meant to be used at the core of critical workloads that require complex queueing and delivery order guarantees. The best way to use it is just to import it.
69 |
70 | ```go
71 | import "github.com/JustinTimperio/gpq"
72 | ```
73 |
74 | ### Prerequisites
75 | For this you will need Go >= `1.22` and gpq itself uses [hashmap](https://github.com/cornelk/hashmap), [btree](https://github.com/tidwall/btree) and [BadgerDB](https://github.com/dgraph-io/badger).
76 |
77 | ### API Reference
78 | - `NewGPQ[d any](options schema.GPQOptions) (uint, *GPQ[d], error)` - Creates a new GPQ with the specified options and returns the number of restored items, the GPQ, and an error if one occurred.
79 | - `ItemsInQueue() uint` - Returns the number of items in the queue.
80 | - `ItemsInDB() uint` - Returns the number of items in the database.
81 | - `ActiveBuckets() uint` - Returns the number of active buckets.
82 | - `Enqueue(item schema.Item[d]) error` - Enqueues an item into the queue.
83 | - `EnqueueBatch(items []schema.Item[d]) error` - Enqueues a batch of items into the queue.
84 | - `Dequeue() (*schema.Item[d], error)` - Dequeues an item from the queue.
85 | - `DequeueBatch(batchSize uint) ([]*schema.Item[d], error)` - Dequeues a batch of items from the queue.
86 | - `Prioritize() error` - Prioritizes the queue based on the values in each item.
87 | - `Close()` - Closes the queue and saves the queue to disk.
88 |
89 | ### Submitting Items to the Queue
90 | Once you have an initialized queue you can easily submit items like the following:
91 | ```go
92 | package main
93 |
94 | import (
95 | "log"
96 | "time"
97 |
98 | "github.com/JustinTimperio/gpq"
99 | )
100 |
101 | func main() {
102 | defaultMessageOptions := schema.EnqueueOptions{
103 | ShouldEscalate: true,
104 | EscalationRate: time.Duration(time.Second),
105 | CanTimeout: true,
106 | Timeout: time.Duration(time.Second * 5),
107 | }
108 |
109 | opts := schema.GPQOptions{
110 | MaxPriority: maxBuckets,
111 |
112 | DiskCacheEnabled: true,
113 | DiskCachePath: "/tmp/gpq",
114 | DiskCacheCompression: true,
115 | DiskEncryptionEnabled: true,
116 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
117 | DiskWriteDelay: time.Duration(time.Second * 5),
118 |
119 | LazyDiskCacheEnabled: true,
120 | LazyDiskCacheChannelSize: 1_000_000,
121 | LazyDiskBatchSize: 10_000,
122 | }
123 |
124 | _, queue, err := gpq.NewGPQ[uint](opts)
125 | if err != nil {
126 | log.Fatalln(err)
127 | }
128 |
129 | for i := uint(0); i < total; i++ {
130 | p := i % maxBuckets
131 | item := schema.NewItem(p, i, defaultMessageOptions)
132 |
133 | err := queue.Enqueue(item)
134 | if err != nil {
135 | log.Fatalln(err)
136 | }
137 | }
138 |
139 | for i := uint(0); i < total; i++ {
140 | item, err := queue.Dequeue()
141 | if err != nil {
142 | log.Fatalln(err)
143 | }
144 | }
145 |
146 | queue.Close()
147 | }
148 | ```
149 |
150 | You have a few options when you submit a job such as if the item should escalate over time if not sent, or inversely can timeout if it has been enqueued to long to be relevant anymore.
151 |
152 | ## Contributing
153 | GPQ is actively looking for maintainers so feel free to help out when:
154 |
155 | - Reporting a bug
156 | - Discussing the current state of the code
157 | - Submitting a fix
158 | - Proposing new features
159 |
160 | ### We Develop with Github
161 | We use github to host code, to track issues and feature requests, as well as accept pull requests.
162 |
163 | ### All Code Changes Happen Through Pull Requests
164 | 1. Fork the repo and create your branch from `master`.
165 | 2. If you've added code that should be tested, add tests.
166 | 3. If you've changed APIs, update the documentation.
167 | 4. Ensure the test suite passes.
168 | 5. Make sure your code lints.
169 | 6. Issue that pull request!
170 |
171 | ### Any contributions you make will be under the MIT Software License
172 | In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern.
173 |
174 | ### Report bugs using Github's [Issues](https://github.com/JustinTimperio/gpq/issues)
175 | We use GitHub issues to track public bugs. Report a bug by opening a new issue; it's that easy!
176 |
177 | ### Write bug reports with detail, background, and sample code
178 | **Great Bug Reports** tend to have:
179 |
180 | - A quick summary and/or background
181 | - Steps to reproduce
182 | - Be specific!
183 | - Give sample code if you can.
184 | - What you expected would happen
185 | - What actually happens
186 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
187 |
188 | ## License
189 | All code here was originally written by me, Justin Timperio, under an MIT license with the exception of some code directly forked under a BSD license from the Go maintainers.
--------------------------------------------------------------------------------
/disk/disk.go:
--------------------------------------------------------------------------------
1 | package disk
2 |
3 | import (
4 | "errors"
5 |
6 | "github.com/JustinTimperio/gpq/schema"
7 |
8 | "github.com/dgraph-io/badger/v4"
9 | bOptions "github.com/dgraph-io/badger/v4/options"
10 | )
11 |
12 | type Disk[T any] struct {
13 | diskCache *badger.DB
14 | }
15 |
16 | func NewDiskCache[T any](bLogger badger.Logger, options schema.GPQOptions) (*Disk[T], error) {
17 |
18 | if options.DiskCachePath == "" {
19 | return nil, errors.New("Error creating disk cache: path is empty")
20 | }
21 |
22 | opts := badger.DefaultOptions(options.DiskCachePath)
23 | opts.Logger = bLogger
24 | if options.DiskCacheCompression {
25 | opts.Compression = bOptions.ZSTD
26 | }
27 | if options.DiskEncryptionEnabled {
28 | opts.WithEncryptionKey(options.DiskEncryptionKey)
29 | }
30 | db, err := badger.Open(opts)
31 | if err != nil {
32 | return nil, errors.New("Error opening disk cache: " + err.Error())
33 | }
34 |
35 | return &Disk[T]{diskCache: db}, nil
36 | }
37 |
38 | func (d *Disk[T]) Close() error {
39 | d.diskCache.Sync()
40 | return d.diskCache.Close()
41 | }
42 |
43 | func (d *Disk[T]) ItemsInDB() uint {
44 | var count uint
45 | _ = d.diskCache.View(func(txn *badger.Txn) error {
46 | opts := badger.DefaultIteratorOptions
47 | it := txn.NewIterator(opts)
48 | defer it.Close()
49 |
50 | for it.Rewind(); it.Valid(); it.Next() {
51 | count++
52 | }
53 |
54 | return nil
55 | })
56 | return count
57 | }
58 |
59 | func (d *Disk[T]) ProcessBatch(batch []*schema.Item[T]) error {
60 | txn := d.diskCache.NewTransaction(true)
61 | defer txn.Discard()
62 |
63 | for i := 0; i < len(batch); i++ {
64 | entry := batch[i]
65 | b, err := entry.ToBytes()
66 | if err != nil {
67 | return err
68 | }
69 | err = txn.Set(entry.DiskUUID, b)
70 | if err == badger.ErrTxnTooBig {
71 | if err := txn.Commit(); err != nil {
72 | return err
73 | }
74 | txn = d.diskCache.NewTransaction(true)
75 | txn.Set(entry.DiskUUID, b)
76 |
77 | } else if err != nil {
78 | return err
79 | }
80 | }
81 |
82 | // Commit the final transaction, if it has any pending writes
83 | if err := txn.Commit(); err != nil {
84 | return err
85 | }
86 | return nil
87 | }
88 |
89 | func (d *Disk[T]) DeleteBatch(batch []*schema.DeleteMessage) error {
90 | txn := d.diskCache.NewTransaction(true)
91 | defer txn.Discard()
92 |
93 | for i := 0; i < len(batch); i++ {
94 | entry := batch[i]
95 | err := txn.Delete(entry.DiskUUID)
96 | if err == badger.ErrTxnTooBig {
97 | if err := txn.Commit(); err != nil {
98 | return err
99 | }
100 | txn = d.diskCache.NewTransaction(true)
101 | txn.Delete(entry.DiskUUID)
102 | } else if err != nil {
103 | return err
104 | }
105 | }
106 |
107 | // Commit the final transaction, if it has any pending writes
108 | if err := txn.Commit(); err != nil {
109 | return err
110 | }
111 | return nil
112 | }
113 |
114 | func (d *Disk[T]) WriteSingle(key []byte, value schema.Item[T]) error {
115 | b, err := value.ToBytes()
116 | if err != nil {
117 | return err
118 | }
119 | err = d.diskCache.Update(func(txn *badger.Txn) error {
120 | return txn.Set(key, b)
121 | })
122 |
123 | return err
124 | }
125 |
126 | func (d *Disk[T]) DeleteSingle(key []byte) error {
127 | err := d.diskCache.Update(func(txn *badger.Txn) error {
128 | return txn.Delete(key)
129 | })
130 |
131 | return err
132 | }
133 |
134 | func (d *Disk[T]) RestoreFromDisk() ([]*schema.Item[T], error) {
135 | var items []*schema.Item[T]
136 |
137 | // Re-add items to the GPQ from the disk cache
138 | err := d.diskCache.View(func(txn *badger.Txn) error {
139 | opts := badger.DefaultIteratorOptions
140 | it := txn.NewIterator(opts)
141 | defer it.Close()
142 |
143 | for it.Rewind(); it.Valid(); it.Next() {
144 | var value []byte
145 | item := it.Item()
146 | key := item.Key()
147 |
148 | // Get the item from the disk cache
149 | item, err := txn.Get(key)
150 | if err != nil {
151 | return err
152 | }
153 | item.Value(func(val []byte) error {
154 | value = append([]byte{}, val...)
155 | return nil
156 | })
157 |
158 | if len(value) == 0 {
159 | return errors.New("Error reading from disk cache: value is empty")
160 | }
161 |
162 | obj := new(schema.Item[T])
163 | obj.FromBytes(value)
164 | obj.WasRestored = true
165 | items = append(items, obj)
166 |
167 | }
168 |
169 | return nil
170 | })
171 | if err != nil {
172 | return nil, errors.New("Error reading from disk cache: " + err.Error())
173 | }
174 |
175 | return items, err
176 | }
177 |
--------------------------------------------------------------------------------
/docs/Queue-Speed-With-Prioritize.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JustinTimperio/gpq/23a8ae90c534c81d980ee75e0e54b03140fd32b2/docs/Queue-Speed-With-Prioritize.png
--------------------------------------------------------------------------------
/docs/Queue-Speed-Without-Prioritize.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JustinTimperio/gpq/23a8ae90c534c81d980ee75e0e54b03140fd32b2/docs/Queue-Speed-Without-Prioritize.png
--------------------------------------------------------------------------------
/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JustinTimperio/gpq/23a8ae90c534c81d980ee75e0e54b03140fd32b2/docs/Time-to-Send-and-Receive-VS-Bucket-Count.png
--------------------------------------------------------------------------------
/docs/bench-report-no-repro.csv:
--------------------------------------------------------------------------------
1 | Total Items,Buckets,Removed,Escalated,Time Elapsed,Time to Send,Time to Receive
2 | 1000000,5,0,0,0.480352,0.124661,0.355690
3 | 1000000,10,0,0,0.456221,0.123449,0.332772
4 | 1000000,15,0,0,0.450382,0.123358,0.327024
5 | 1000000,20,0,0,0.401607,0.121166,0.280440
6 | 1000000,25,0,0,0.391769,0.120379,0.271390
7 | 1000000,30,0,0,0.378708,0.127517,0.251192
8 | 1000000,35,0,0,0.414640,0.124497,0.290143
9 | 1000000,40,0,0,0.361109,0.122513,0.238597
10 | 1000000,45,0,0,0.370787,0.130278,0.240508
11 | 1000000,50,0,0,0.386700,0.120075,0.266625
12 | 1000000,55,0,0,0.377624,0.123339,0.254285
13 | 1000000,60,0,0,0.363803,0.124509,0.239294
14 | 1000000,65,0,0,0.373651,0.130343,0.243308
15 | 1000000,70,0,0,0.358304,0.129457,0.228846
16 | 1000000,75,0,0,0.356953,0.129481,0.227471
17 | 1000000,80,0,0,0.372278,0.136249,0.236029
18 | 1000000,85,0,0,0.354502,0.131480,0.223022
19 | 1000000,90,0,0,0.355176,0.128582,0.226594
20 | 1000000,95,0,0,0.368184,0.134018,0.234166
21 | 1000000,100,0,0,0.367679,0.127886,0.239793
22 | 2000000,5,0,0,1.080151,0.270605,0.809546
23 | 2000000,10,0,0,0.992683,0.242134,0.750549
24 | 2000000,15,0,0,0.954768,0.247791,0.706977
25 | 2000000,20,0,0,0.977516,0.244314,0.733203
26 | 2000000,25,0,0,0.913640,0.241858,0.671782
27 | 2000000,30,0,0,0.895809,0.249782,0.646027
28 | 2000000,35,0,0,0.811538,0.245463,0.566074
29 | 2000000,40,0,0,0.820050,0.250800,0.569249
30 | 2000000,45,0,0,0.771446,0.248302,0.523144
31 | 2000000,50,0,0,0.815834,0.242626,0.573208
32 | 2000000,55,0,0,0.775734,0.252302,0.523432
33 | 2000000,60,0,0,0.759699,0.251128,0.508572
34 | 2000000,65,0,0,0.789265,0.260698,0.528567
35 | 2000000,70,0,0,0.755390,0.262109,0.493281
36 | 2000000,75,0,0,0.759171,0.256032,0.503139
37 | 2000000,80,0,0,0.798577,0.261225,0.537352
38 | 2000000,85,0,0,0.741509,0.261255,0.480254
39 | 2000000,90,0,0,0.734015,0.263220,0.470795
40 | 2000000,95,0,0,0.734381,0.259012,0.475370
41 | 2000000,100,0,0,0.785392,0.252216,0.533176
42 | 3000000,5,0,0,1.554099,0.356306,1.197792
43 | 3000000,10,0,0,1.542341,0.358731,1.183610
44 | 3000000,15,0,0,1.526235,0.363726,1.162509
45 | 3000000,20,0,0,1.493434,0.362188,1.131246
46 | 3000000,25,0,0,1.501360,0.363820,1.137541
47 | 3000000,30,0,0,1.438228,0.374326,1.063902
48 | 3000000,35,0,0,1.359310,0.373921,0.985390
49 | 3000000,40,0,0,1.279624,0.371083,0.908540
50 | 3000000,45,0,0,1.280505,0.375016,0.905489
51 | 3000000,50,0,0,1.308245,0.366860,0.941384
52 | 3000000,55,0,0,1.222253,0.384495,0.837757
53 | 3000000,60,0,0,1.265391,0.379666,0.885725
54 | 3000000,65,0,0,1.258930,0.391462,0.867469
55 | 3000000,70,0,0,1.239573,0.389759,0.849813
56 | 3000000,75,0,0,1.211239,0.381575,0.829664
57 | 3000000,80,0,0,1.215992,0.395794,0.820198
58 | 3000000,85,0,0,1.164770,0.384545,0.780225
59 | 3000000,90,0,0,1.182612,0.390766,0.791847
60 | 3000000,95,0,0,1.175426,0.404903,0.770524
61 | 3000000,100,0,0,1.228954,0.381821,0.847133
62 | 4000000,5,0,0,2.132397,0.480412,1.651985
63 | 4000000,10,0,0,2.128339,0.476332,1.652007
64 | 4000000,15,0,0,2.126882,0.498357,1.628524
65 | 4000000,20,0,0,2.127416,0.488123,1.639293
66 | 4000000,25,0,0,2.074483,0.493737,1.580746
67 | 4000000,30,0,0,2.022730,0.489689,1.533041
68 | 4000000,35,0,0,1.863314,0.501425,1.361889
69 | 4000000,40,0,0,1.860621,0.501323,1.359299
70 | 4000000,45,0,0,1.748524,0.495773,1.252750
71 | 4000000,50,0,0,1.861381,0.489861,1.371520
72 | 4000000,55,0,0,1.715253,0.497953,1.217300
73 | 4000000,60,0,0,1.764435,0.498660,1.265775
74 | 4000000,65,0,0,1.767213,0.516821,1.250392
75 | 4000000,70,0,0,1.738856,0.513258,1.225598
76 | 4000000,75,0,0,1.798200,0.523680,1.274520
77 | 4000000,80,0,0,1.734383,0.517710,1.216673
78 | 4000000,85,0,0,1.624824,0.529243,1.095581
79 | 4000000,90,0,0,1.628978,0.516347,1.112631
80 | 4000000,95,0,0,1.610213,0.536970,1.073243
81 | 4000000,100,0,0,1.679644,0.501370,1.178274
82 | 5000000,5,0,0,2.738972,0.602219,2.136753
83 | 5000000,10,0,0,2.716369,0.602615,2.113755
84 | 5000000,15,0,0,2.720195,0.603787,2.116407
85 | 5000000,20,0,0,2.698476,0.609594,2.088882
86 | 5000000,25,0,0,2.743277,0.608929,2.134348
87 | 5000000,30,0,0,2.641273,0.617008,2.024265
88 | 5000000,35,0,0,2.470825,0.629333,1.841492
89 | 5000000,40,0,0,2.417312,0.628503,1.788810
90 | 5000000,45,0,0,2.285351,0.620693,1.664658
91 | 5000000,50,0,0,2.405780,0.607240,1.798540
92 | 5000000,55,0,0,2.228662,0.621444,1.607218
93 | 5000000,60,0,0,2.303424,0.623592,1.679832
94 | 5000000,65,0,0,2.334081,0.651338,1.682743
95 | 5000000,70,0,0,2.319969,0.646172,1.673797
96 | 5000000,75,0,0,2.314917,0.646436,1.668481
97 | 5000000,80,0,0,2.225577,0.644466,1.581111
98 | 5000000,85,0,0,2.191136,0.653857,1.537280
99 | 5000000,90,0,0,2.155392,0.655350,1.500042
100 | 5000000,95,0,0,2.038051,0.660276,1.377775
101 | 5000000,100,0,0,2.196845,0.630068,1.566777
102 | 6000000,5,0,0,3.327895,0.762550,2.565346
103 | 6000000,10,0,0,3.298723,0.734865,2.563858
104 | 6000000,15,0,0,3.354630,0.732273,2.622357
105 | 6000000,20,0,0,3.286660,0.739868,2.546792
106 | 6000000,25,0,0,3.372318,0.732436,2.639882
107 | 6000000,30,0,0,3.259724,0.740645,2.519079
108 | 6000000,35,0,0,3.054829,0.743409,2.311419
109 | 6000000,40,0,0,2.973856,0.738804,2.235052
110 | 6000000,45,0,0,2.823585,0.742369,2.081216
111 | 6000000,50,0,0,2.859516,0.734220,2.125296
112 | 6000000,55,0,0,2.755520,0.761987,1.993533
113 | 6000000,60,0,0,2.983694,0.767813,2.215880
114 | 6000000,65,0,0,2.925565,0.789701,2.135864
115 | 6000000,70,0,0,2.909070,0.782410,2.126660
116 | 6000000,75,0,0,2.914689,0.771284,2.143406
117 | 6000000,80,0,0,2.796472,0.772394,2.024078
118 | 6000000,85,0,0,2.689242,0.793284,1.895957
119 | 6000000,90,0,0,2.664311,0.795375,1.868936
120 | 6000000,95,0,0,2.529320,0.790332,1.738988
121 | 6000000,100,0,0,2.675950,0.757688,1.918262
122 | 7000000,5,0,0,3.996704,0.924851,3.071853
123 | 7000000,10,0,0,3.898082,0.844846,3.053236
124 | 7000000,15,0,0,3.967545,0.881168,3.086377
125 | 7000000,20,0,0,3.932736,0.866162,3.066574
126 | 7000000,25,0,0,4.035503,0.855663,3.179841
127 | 7000000,30,0,0,3.853679,0.858375,2.995304
128 | 7000000,35,0,0,3.700756,0.892373,2.808384
129 | 7000000,40,0,0,3.593856,0.857586,2.736270
130 | 7000000,45,0,0,3.363648,0.878067,2.485581
131 | 7000000,50,0,0,3.437134,0.850130,2.587004
132 | 7000000,55,0,0,3.334999,0.877300,2.457699
133 | 7000000,60,0,0,3.505047,0.907076,2.597971
134 | 7000000,65,0,0,3.484218,0.915663,2.568555
135 | 7000000,70,0,0,3.564678,0.910495,2.654184
136 | 7000000,75,0,0,3.470754,0.900429,2.570326
137 | 7000000,80,0,0,3.385602,0.902801,2.482802
138 | 7000000,85,0,0,3.108434,0.925589,2.182845
139 | 7000000,90,0,0,3.219396,0.924434,2.294962
140 | 7000000,95,0,0,3.124498,0.910731,2.213767
141 | 7000000,100,0,0,3.177028,0.885023,2.292005
142 | 8000000,5,0,0,4.766666,1.096688,3.669978
143 | 8000000,10,0,0,4.513911,0.982807,3.531104
144 | 8000000,15,0,0,4.560598,0.971304,3.589294
145 | 8000000,20,0,0,4.612083,0.984183,3.627900
146 | 8000000,25,0,0,4.660397,0.970289,3.690108
147 | 8000000,30,0,0,4.621589,0.995142,3.626447
148 | 8000000,35,0,0,4.320914,0.986778,3.334136
149 | 8000000,40,0,0,4.246617,0.985245,3.261372
150 | 8000000,45,0,0,4.091949,0.993188,3.098761
151 | 8000000,50,0,0,4.125746,0.973301,3.152445
152 | 8000000,55,0,0,3.946131,1.012447,2.933684
153 | 8000000,60,0,0,4.003785,0.998628,3.005157
154 | 8000000,65,0,0,4.093362,1.044400,3.048962
155 | 8000000,70,0,0,4.121861,1.032051,3.089810
156 | 8000000,75,0,0,4.069124,1.030064,3.039060
157 | 8000000,80,0,0,4.064636,1.064685,2.999951
158 | 8000000,85,0,0,3.734827,1.033935,2.700892
159 | 8000000,90,0,0,3.785738,1.036601,2.749137
160 | 8000000,95,0,0,3.677863,1.073646,2.604217
161 | 8000000,100,0,0,3.874679,1.020020,2.854659
162 | 9000000,5,0,0,5.235279,1.212156,4.023123
163 | 9000000,10,0,0,5.083309,1.092832,3.990477
164 | 9000000,15,0,0,5.104618,1.091969,4.012649
165 | 9000000,20,0,0,5.237394,1.097292,4.140102
166 | 9000000,25,0,0,5.403803,1.096931,4.306872
167 | 9000000,30,0,0,5.105784,1.122448,3.983336
168 | 9000000,35,0,0,4.815739,1.119876,3.695863
169 | 9000000,40,0,0,4.855958,1.126481,3.729477
170 | 9000000,45,0,0,4.629845,1.125421,3.504425
171 | 9000000,50,0,0,4.619241,1.078884,3.540357
172 | 9000000,55,0,0,4.460644,1.125915,3.334729
173 | 9000000,60,0,0,4.554042,1.105754,3.448288
174 | 9000000,65,0,0,4.710019,1.191131,3.518888
175 | 9000000,70,0,0,4.715202,1.163294,3.551908
176 | 9000000,75,0,0,4.704559,1.150023,3.554536
177 | 9000000,80,0,0,4.710057,1.219878,3.490179
178 | 9000000,85,0,0,4.513430,1.195610,3.317819
179 | 9000000,90,0,0,4.398654,1.203555,3.195100
180 | 9000000,95,0,0,4.153370,1.206831,2.946539
181 | 9000000,100,0,0,4.138215,1.138724,2.999492
182 | 10000000,5,0,0,5.791414,1.291686,4.499728
183 | 10000000,10,0,0,5.724184,1.220067,4.504117
184 | 10000000,15,0,0,5.765862,1.215561,4.550301
185 | 10000000,20,0,0,5.850229,1.234341,4.615889
186 | 10000000,25,0,0,5.904642,1.235793,4.668849
187 | 10000000,30,0,0,5.771626,1.240559,4.531067
188 | 10000000,35,0,0,5.592988,1.265301,4.327687
189 | 10000000,40,0,0,5.453986,1.259612,4.194374
190 | 10000000,45,0,0,5.226291,1.245989,3.980302
191 | 10000000,50,0,0,5.254268,1.228019,4.026249
192 | 10000000,55,0,0,5.031442,1.276297,3.755144
193 | 10000000,60,0,0,5.079279,1.243382,3.835896
194 | 10000000,65,0,0,5.308688,1.297778,4.010910
195 | 10000000,70,0,0,5.352717,1.295163,4.057554
196 | 10000000,75,0,0,5.394536,1.307947,4.086589
197 | 10000000,80,0,0,5.174465,1.290116,3.884350
198 | 10000000,85,0,0,5.036778,1.329402,3.707376
199 | 10000000,90,0,0,4.893278,1.301761,3.591517
200 | 10000000,95,0,0,4.616589,1.314193,3.302397
201 | 10000000,100,0,0,5.059062,1.274316,3.784747
202 |
--------------------------------------------------------------------------------
/docs/bench-report-repro.csv:
--------------------------------------------------------------------------------
1 | Total Items,Buckets,Removed,Escalated,Time Elapsed,Time to Send,Time to Receive
2 | 1000000,5,0,0,0.614072,0.252223,0.361849
3 | 1000000,10,0,0,0.487301,0.166346,0.320954
4 | 1000000,15,0,0,0.465424,0.161509,0.303915
5 | 1000000,20,0,0,0.429060,0.160433,0.268626
6 | 1000000,25,0,0,0.422653,0.151694,0.270959
7 | 1000000,30,0,0,0.412588,0.153395,0.259193
8 | 1000000,35,0,0,0.411344,0.146609,0.264735
9 | 1000000,40,0,0,0.372737,0.144614,0.228123
10 | 1000000,45,0,0,0.381298,0.149845,0.231453
11 | 1000000,50,0,0,0.421840,0.157114,0.264726
12 | 1000000,55,0,0,0.367223,0.134796,0.232427
13 | 1000000,60,0,0,0.373999,0.136299,0.237700
14 | 1000000,65,0,0,0.390020,0.145616,0.244404
15 | 1000000,70,0,0,0.364162,0.136035,0.228127
16 | 1000000,75,0,0,0.368686,0.143142,0.225544
17 | 1000000,80,0,0,0.377908,0.146060,0.231848
18 | 1000000,85,0,0,0.383652,0.148589,0.235063
19 | 1000000,90,0,0,0.376831,0.145537,0.231294
20 | 1000000,95,0,0,0.379045,0.150784,0.228262
21 | 1000000,100,0,0,0.381100,0.154579,0.226521
22 | 2000000,5,0,8780,1.172073,0.389456,0.782617
23 | 2000000,10,0,0,1.010092,0.260968,0.749124
24 | 2000000,15,0,0,0.968993,0.257443,0.711549
25 | 2000000,20,0,0,0.954110,0.265092,0.689018
26 | 2000000,25,0,0,0.971114,0.265140,0.705974
27 | 2000000,30,0,0,0.895227,0.261474,0.633753
28 | 2000000,35,0,0,0.851089,0.282734,0.568355
29 | 2000000,40,0,0,0.833119,0.276862,0.556257
30 | 2000000,45,0,0,0.789324,0.271361,0.517963
31 | 2000000,50,0,0,0.863200,0.296498,0.566703
32 | 2000000,55,0,0,0.808548,0.285560,0.522989
33 | 2000000,60,0,0,0.823831,0.283021,0.540809
34 | 2000000,65,0,0,0.825488,0.302481,0.523006
35 | 2000000,70,0,0,0.840942,0.318196,0.522745
36 | 2000000,75,0,0,0.816378,0.308826,0.507552
37 | 2000000,80,0,0,0.799257,0.281457,0.517800
38 | 2000000,85,0,0,0.755920,0.262167,0.493753
39 | 2000000,90,0,0,0.742541,0.272914,0.469628
40 | 2000000,95,0,0,0.753119,0.275580,0.477539
41 | 2000000,100,0,0,0.809352,0.269598,0.539754
42 | 3000000,5,0,525147,1.897074,0.512351,1.384723
43 | 3000000,10,0,432326,1.782897,0.448884,1.334013
44 | 3000000,15,0,585831,1.836801,0.471124,1.365677
45 | 3000000,20,0,700310,1.894317,0.472125,1.422191
46 | 3000000,25,0,761735,1.947429,0.510474,1.436955
47 | 3000000,30,0,523822,1.749668,0.441676,1.307992
48 | 3000000,35,0,292013,1.520458,0.380246,1.140212
49 | 3000000,40,0,194007,1.451599,0.384171,1.067428
50 | 3000000,45,0,90511,1.322008,0.398315,0.923692
51 | 3000000,50,0,97903,1.333236,0.365696,0.967540
52 | 3000000,55,0,134423,1.356082,0.396705,0.959377
53 | 3000000,60,0,174357,1.398783,0.408589,0.990194
54 | 3000000,65,0,165502,1.356271,0.391172,0.965099
55 | 3000000,70,0,207399,1.387111,0.403218,0.983893
56 | 3000000,75,0,158505,1.338503,0.402033,0.936470
57 | 3000000,80,0,117314,1.275055,0.391559,0.883496
58 | 3000000,85,0,80291,1.230854,0.405083,0.825771
59 | 3000000,90,0,67175,1.220653,0.414606,0.806047
60 | 3000000,95,0,60570,1.241768,0.417398,0.824370
61 | 3000000,100,0,59876,1.285382,0.397571,0.887811
62 | 4000000,5,0,2052180,2.730309,0.622339,2.107969
63 | 4000000,10,0,1386031,2.485446,0.505215,1.980231
64 | 4000000,15,0,1665887,2.540234,0.500777,2.039457
65 | 4000000,20,0,1862791,2.602652,0.510908,2.091744
66 | 4000000,25,0,1963693,2.630047,0.509664,2.120383
67 | 4000000,30,0,1584274,2.424148,0.509078,1.915070
68 | 4000000,35,0,1094349,2.209760,0.503182,1.706578
69 | 4000000,40,0,923556,2.137053,0.522911,1.614142
70 | 4000000,45,0,555130,1.956664,0.509600,1.447064
71 | 4000000,50,0,550534,1.973188,0.520544,1.452644
72 | 4000000,55,0,627424,1.944976,0.506746,1.438230
73 | 4000000,60,0,806981,2.040783,0.524901,1.515882
74 | 4000000,65,0,1011482,2.061003,0.535786,1.525217
75 | 4000000,70,0,1170764,2.151871,0.532845,1.619026
76 | 4000000,75,0,1233862,2.208219,0.540444,1.667775
77 | 4000000,80,0,1268491,2.120460,0.537724,1.582736
78 | 4000000,85,0,867423,1.932836,0.525573,1.407263
79 | 4000000,90,0,723756,1.886115,0.538142,1.347973
80 | 4000000,95,0,516079,1.787550,0.571406,1.216145
81 | 4000000,100,0,324528,1.791376,0.529586,1.261790
82 | 5000000,5,0,4013480,3.484860,0.744488,2.740372
83 | 5000000,10,0,3723893,3.463461,0.625551,2.837910
84 | 5000000,15,0,4392646,3.611652,0.653881,2.957771
85 | 5000000,20,0,4432753,3.624666,0.635287,2.989379
86 | 5000000,25,0,5158887,3.839290,0.632956,3.206334
87 | 5000000,30,0,4327634,3.506346,0.639295,2.867051
88 | 5000000,35,0,3280483,3.081920,0.628154,2.453766
89 | 5000000,40,0,3261591,3.128934,0.649785,2.479149
90 | 5000000,45,0,2203708,2.704719,0.651279,2.053440
91 | 5000000,50,0,1829039,2.565912,0.641004,1.924908
92 | 5000000,55,0,1919433,2.596977,0.632209,1.964768
93 | 5000000,60,0,2568018,2.815486,0.662384,2.153103
94 | 5000000,65,0,3082863,2.940506,0.676709,2.263797
95 | 5000000,70,0,3296707,3.072556,0.648120,2.424436
96 | 5000000,75,0,3646389,3.071129,0.672348,2.398780
97 | 5000000,80,0,3241118,2.970847,0.675929,2.294918
98 | 5000000,85,0,2377000,2.677772,0.658825,2.018947
99 | 5000000,90,0,2398246,2.733234,0.684645,2.048589
100 | 5000000,95,0,1738847,2.509009,0.703938,1.805071
101 | 5000000,100,0,1130174,2.332412,0.656768,1.675644
102 | 6000000,5,0,7874560,4.660791,0.862922,3.797869
103 | 6000000,10,0,6538673,4.350979,0.756016,3.594964
104 | 6000000,15,0,7671927,4.683158,0.753397,3.929761
105 | 6000000,20,0,7866072,4.793124,0.755119,4.038005
106 | 6000000,25,0,8244137,4.895965,0.752644,4.143321
107 | 6000000,30,0,7854925,4.768136,0.763280,4.004856
108 | 6000000,35,0,6347304,4.200039,0.758310,3.441730
109 | 6000000,40,0,5246034,3.884311,0.753668,3.130643
110 | 6000000,45,0,4674411,3.618269,0.787706,2.830563
111 | 6000000,50,0,4375865,3.581855,0.764940,2.816915
112 | 6000000,55,0,4593503,3.577487,0.768592,2.808895
113 | 6000000,60,0,5368031,3.872888,0.778848,3.094041
114 | 6000000,65,0,5902275,3.999542,0.779623,3.219919
115 | 6000000,70,0,6479259,4.157115,0.826905,3.330210
116 | 6000000,75,0,6707107,4.237924,0.798023,3.439901
117 | 6000000,80,0,6068588,3.949625,0.777362,3.172263
118 | 6000000,85,0,5511508,3.704121,0.813528,2.890594
119 | 6000000,90,0,4921397,3.567229,0.805057,2.762172
120 | 6000000,95,0,4071941,3.284678,0.823504,2.461175
121 | 6000000,100,0,3307903,3.140478,0.757465,2.383013
122 | 7000000,5,0,10942236,5.697644,1.619642,4.078001
123 | 7000000,10,0,10270453,5.512758,0.885017,4.627741
124 | 7000000,15,51163,11869644,6.008075,0.911135,5.096941
125 | 7000000,20,0,11636610,6.075285,0.873067,5.202218
126 | 7000000,25,477954,12231702,6.407728,0.890165,5.517562
127 | 7000000,30,96719,12021027,6.172390,0.882817,5.289573
128 | 7000000,35,0,10750107,5.529346,0.930081,4.599266
129 | 7000000,40,0,9268594,5.031654,0.874133,4.157521
130 | 7000000,45,0,8346097,4.697675,0.914095,3.783580
131 | 7000000,50,0,7876007,4.542001,0.855008,3.686992
132 | 7000000,55,0,8023334,4.528474,0.913997,3.614478
133 | 7000000,60,0,8419225,4.699484,0.875165,3.824319
134 | 7000000,65,0,10571310,5.321135,0.971543,4.349592
135 | 7000000,70,0,10196521,5.205932,0.949261,4.256671
136 | 7000000,75,3087,11225914,5.737270,0.896307,4.840963
137 | 7000000,80,0,10444634,5.338575,0.948241,4.390334
138 | 7000000,85,0,9412025,4.922358,0.950560,3.971799
139 | 7000000,90,0,8181797,4.566340,0.949068,3.617272
140 | 7000000,95,0,6492478,4.068910,0.927911,3.140998
141 | 7000000,100,0,6460742,4.072999,0.926043,3.146956
142 | 8000000,5,1547679,13974990,6.611841,1.782974,4.828866
143 | 8000000,10,1349023,13819521,6.522988,1.848314,4.674674
144 | 8000000,15,1250438,14768979,7.003629,2.103466,4.900163
145 | 8000000,20,1944856,13511558,6.956602,2.188037,4.768566
146 | 8000000,25,3385314,13241958,7.170449,2.433094,4.737355
147 | 8000000,30,2981814,13113729,6.890043,2.399469,4.490574
148 | 8000000,35,484684,14308929,6.553872,2.096342,4.457529
149 | 8000000,40,490274,13323517,6.151847,1.925466,4.226381
150 | 8000000,45,120202,12008897,5.750389,1.760990,3.989398
151 | 8000000,50,66021,11455779,5.452624,1.621572,3.831053
152 | 8000000,55,0,11529740,5.543609,1.761989,3.781620
153 | 8000000,60,0,12567514,5.892941,1.912218,3.980723
154 | 8000000,65,169118,13824651,6.379142,2.144917,4.234224
155 | 8000000,70,265625,13805616,6.537384,2.167549,4.369835
156 | 8000000,75,1279731,13029862,6.451706,2.322877,4.128829
157 | 8000000,80,467329,13615753,6.470692,2.228395,4.242297
158 | 8000000,85,0,12772989,5.942393,2.074334,3.868059
159 | 8000000,90,0,11531293,5.427606,1.925593,3.502014
160 | 8000000,95,0,10702063,5.198019,1.819703,3.378316
161 | 8000000,100,0,9408805,4.834727,1.479196,3.355530
162 | 9000000,5,3037646,16685247,7.339630,1.977169,5.362461
163 | 9000000,10,2628172,16768556,7.522570,1.967070,5.555500
164 | 9000000,15,3772889,14626740,7.591316,2.319846,5.271470
165 | 9000000,20,3825503,14272986,7.727109,2.297578,5.429531
166 | 9000000,25,4504975,14511430,7.784240,2.518145,5.266096
167 | 9000000,30,4258811,14570970,7.631955,2.542294,5.089661
168 | 9000000,35,2876804,14663941,7.512055,2.261533,5.250523
169 | 9000000,40,1875466,16302585,7.138127,2.045630,5.092497
170 | 9000000,45,1616716,15099770,6.686282,1.942530,4.743752
171 | 9000000,50,1475924,14625713,6.340199,1.755059,4.585140
172 | 9000000,55,1278398,14987535,6.506340,1.964767,4.541573
173 | 9000000,60,1850110,16260263,7.177482,2.055182,5.122300
174 | 9000000,65,1847096,14951605,7.119351,2.290011,4.829341
175 | 9000000,70,2639700,14665600,7.203971,2.357735,4.846236
176 | 9000000,75,3854553,14674516,7.514475,2.570441,4.944033
177 | 9000000,80,2654872,14337311,7.131127,2.354073,4.777055
178 | 9000000,85,1418711,15076827,7.112464,2.283614,4.828851
179 | 9000000,90,1085103,15473669,6.721262,2.121744,4.599519
180 | 9000000,95,470511,14010402,6.074357,1.911429,4.162928
181 | 9000000,100,362220,13206922,5.821822,1.605431,4.216391
182 | 10000000,5,4380122,19328044,8.223477,2.138010,6.085467
183 | 10000000,10,4032912,18410204,8.479764,2.137043,6.342721
184 | 10000000,15,5134573,15854102,8.336251,2.409270,5.926981
185 | 10000000,20,5372727,15739193,8.478422,2.512243,5.966179
186 | 10000000,25,5530225,16266760,8.678549,2.709208,5.969340
187 | 10000000,30,5569506,16286334,8.637668,2.668475,5.969193
188 | 10000000,35,4335864,15901036,8.064952,2.387045,5.677907
189 | 10000000,40,3746996,16632197,8.214258,2.261024,5.953234
190 | 10000000,45,2959391,18071628,7.543156,2.018661,5.524495
191 | 10000000,50,3050989,17761921,7.281090,1.926665,5.354425
192 | 10000000,55,2446851,17872804,7.314149,2.056313,5.257835
193 | 10000000,60,3281209,17727995,7.993614,2.200846,5.792768
194 | 10000000,65,3397266,16003254,7.593191,2.398930,5.194261
195 | 10000000,70,3815474,15875834,7.805462,2.413468,5.391994
196 | 10000000,75,4746834,15989428,8.195779,2.663577,5.532202
197 | 10000000,80,4105434,15890037,7.991832,2.515989,5.475843
198 | 10000000,85,3263554,15789517,7.571546,2.372554,5.198992
199 | 10000000,90,3259600,17271474,7.993723,2.224401,5.769322
200 | 10000000,95,2122795,17905826,7.228344,2.135265,5.093080
201 | 10000000,100,1662459,16302933,6.542334,1.763724,4.778610
202 |
--------------------------------------------------------------------------------
/docs/gpq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JustinTimperio/gpq/23a8ae90c534c81d980ee75e0e54b03140fd32b2/docs/gpq.png
--------------------------------------------------------------------------------
/ftime/ftime.go:
--------------------------------------------------------------------------------
1 | package ftime
2 |
3 | import (
4 | "context"
5 | "sync"
6 | "sync/atomic"
7 | "time"
8 | )
9 |
10 | type Ftime interface {
11 | IsDaemonRunning() bool
12 | GetFormat() string
13 | SetFormat(format string) Ftime
14 | GetLocation() *time.Location
15 | SetLocation(location *time.Location) Ftime
16 | Now() time.Time
17 | Stop()
18 | UnixNow() int64
19 | UnixUNow() uint32
20 | UnixNanoNow() int64
21 | UnixUNanoNow() uint32
22 | FormattedNow() []byte
23 | Since(t time.Time) time.Duration
24 | StartTimerD(ctx context.Context, dur time.Duration) Ftime
25 | }
26 |
27 | // Fastime is fastime's base struct, it's stores atomic time object
28 | type fastime struct {
29 | uut uint32
30 | uunt uint32
31 | dur int64
32 | ut int64
33 | unt int64
34 | correctionDur time.Duration
35 | mu sync.Mutex
36 | wg sync.WaitGroup
37 | running atomic.Bool
38 | t atomic.Pointer[time.Time]
39 | ft atomic.Pointer[[]byte]
40 | format atomic.Pointer[string]
41 | formatValid atomic.Bool
42 | location atomic.Pointer[time.Location]
43 | }
44 |
45 | const (
46 | bufSize = 64
47 | bufMargin = 10
48 | )
49 |
50 | var (
51 | once sync.Once
52 | instance Ftime
53 | )
54 |
55 | func init() {
56 | once.Do(func() {
57 | instance = New().StartTimerD(context.Background(), time.Millisecond*5)
58 | })
59 | }
60 |
61 | func IsDaemonRunning() (running bool) {
62 | return instance.IsDaemonRunning()
63 | }
64 |
65 | func GetLocation() (loc *time.Location) {
66 | return instance.GetLocation()
67 | }
68 |
69 | func GetFormat() (form string) {
70 | return instance.GetFormat()
71 | }
72 |
73 | // SetLocation replaces time location
74 | func SetLocation(location *time.Location) (ft Ftime) {
75 | return instance.SetLocation(location)
76 | }
77 |
78 | // SetFormat replaces time format
79 | func SetFormat(format string) (ft Ftime) {
80 | return instance.SetFormat(format)
81 | }
82 |
83 | // Now returns current time
84 | func Now() (now time.Time) {
85 | return instance.Now()
86 | }
87 |
88 | // Since returns the time elapsed since t.
89 | // It is shorthand for fastime.Now().Sub(t).
90 | func Since(t time.Time) (dur time.Duration) {
91 | return instance.Since(t)
92 | }
93 |
94 | // Stop stops stopping time refresh daemon
95 | func Stop() {
96 | instance.Stop()
97 | }
98 |
99 | // UnixNow returns current unix time
100 | func UnixNow() (now int64) {
101 | return instance.UnixNow()
102 | }
103 |
104 | // UnixUNow returns current unix time
105 | func UnixUNow() (now uint32) {
106 | return instance.UnixUNow()
107 | }
108 |
109 | // UnixNanoNow returns current unix nano time
110 | func UnixNanoNow() (now int64) {
111 | return instance.UnixNanoNow()
112 | }
113 |
114 | // UnixUNanoNow returns current unix nano time
115 | func UnixUNanoNow() (now uint32) {
116 | return instance.UnixUNanoNow()
117 | }
118 |
119 | // FormattedNow returns formatted byte time
120 | func FormattedNow() (now []byte) {
121 | return instance.FormattedNow()
122 | }
123 |
124 | // StartTimerD provides time refresh daemon
125 | func StartTimerD(ctx context.Context, dur time.Duration) (ft Ftime) {
126 | return instance.StartTimerD(ctx, dur)
127 | }
128 |
--------------------------------------------------------------------------------
/ftime/helpers.go:
--------------------------------------------------------------------------------
1 | package ftime
2 |
3 | import (
4 | "context"
5 | "math"
6 | "sync/atomic"
7 | "syscall"
8 | "time"
9 | "unsafe"
10 | )
11 |
12 | func (f *fastime) fetchSysTime() (now time.Time) {
13 | var tv syscall.Timeval
14 | err := syscall.Gettimeofday(&tv)
15 | loc := f.GetLocation()
16 | if err != nil {
17 | now = time.Now()
18 | if loc != nil {
19 | return now.In(loc)
20 | }
21 | return now
22 | }
23 | now = time.Unix(0, syscall.TimevalToNsec(tv))
24 | if loc != nil {
25 | return now.In(loc)
26 | }
27 | return now
28 | }
29 |
30 | func New() (f Ftime) {
31 | return newFtime()
32 | }
33 |
34 | func newFtime() (f *fastime) {
35 | f = &fastime{
36 | ut: math.MaxInt64,
37 | unt: math.MaxInt64,
38 | uut: math.MaxUint32,
39 | uunt: math.MaxUint32,
40 | correctionDur: time.Millisecond * 100,
41 | }
42 |
43 | form := time.RFC3339
44 | f.format.Store(&form)
45 | loc := func() (loc *time.Location) {
46 | tz, ok := syscall.Getenv("TZ")
47 | if ok && tz != "" {
48 | var err error
49 | loc, err = time.LoadLocation(tz)
50 | if err == nil {
51 | return loc
52 | }
53 | }
54 | return new(time.Location)
55 | }()
56 |
57 | f.location.Store(loc)
58 |
59 | buf := f.newBuffer(len(form) + bufMargin)
60 | f.ft.Store(&buf)
61 |
62 | return f.refresh()
63 | }
64 |
65 | func (f *fastime) update() (ft *fastime) {
66 | return f.store(f.Now().Add(time.Duration(atomic.LoadInt64(&f.dur))))
67 | }
68 |
69 | func (f *fastime) refresh() (ft *fastime) {
70 | return f.store(f.fetchSysTime())
71 | }
72 |
73 | func (f *fastime) newBuffer(max int) (b []byte) {
74 | if max < bufSize {
75 | var buf [bufSize]byte
76 | b = buf[:0]
77 | } else {
78 | b = make([]byte, 0, max)
79 | }
80 | return b
81 | }
82 |
83 | func (f *fastime) store(t time.Time) (ft *fastime) {
84 | f.t.Store(&t)
85 | f.formatValid.Store(false)
86 | ut := t.Unix()
87 | unt := t.UnixNano()
88 | atomic.StoreInt64(&f.ut, ut)
89 | atomic.StoreInt64(&f.unt, unt)
90 | atomic.StoreUint32(&f.uut, *(*uint32)(unsafe.Pointer(&ut)))
91 | atomic.StoreUint32(&f.uunt, *(*uint32)(unsafe.Pointer(&unt)))
92 | return f
93 | }
94 |
95 | func (f *fastime) IsDaemonRunning() (running bool) {
96 | return f.running.Load()
97 | }
98 |
99 | func (f *fastime) GetLocation() (loc *time.Location) {
100 | loc = f.location.Load()
101 | if loc == nil {
102 | return nil
103 | }
104 | return loc
105 | }
106 |
107 | func (f *fastime) GetFormat() (form string) {
108 | return *f.format.Load()
109 | }
110 |
111 | // SetLocation replaces time location
112 | func (f *fastime) SetLocation(loc *time.Location) (ft Ftime) {
113 | if loc == nil {
114 | return f
115 | }
116 | f.location.Store(loc)
117 | f.refresh()
118 | return f
119 | }
120 |
121 | // SetFormat replaces time format
122 | func (f *fastime) SetFormat(format string) (ft Ftime) {
123 | f.format.Store(&format)
124 | f.formatValid.Store(false)
125 | f.refresh()
126 | return f
127 | }
128 |
129 | // Now returns current time
130 | func (f *fastime) Now() (t time.Time) {
131 | return *f.t.Load()
132 | }
133 |
134 | // Stop stops stopping time refresh daemon
135 | func (f *fastime) Stop() {
136 | f.mu.Lock()
137 | f.stop()
138 | f.mu.Unlock()
139 | }
140 |
141 | func (f *fastime) stop() {
142 | if f.IsDaemonRunning() {
143 | atomic.StoreInt64(&f.dur, 0)
144 | }
145 | f.wg.Wait()
146 | }
147 |
148 | func (f *fastime) Since(t time.Time) (dur time.Duration) {
149 | return f.Now().Sub(t)
150 | }
151 |
152 | // UnixNow returns current unix time
153 | func (f *fastime) UnixNow() (now int64) {
154 | return atomic.LoadInt64(&f.ut)
155 | }
156 |
157 | // UnixNow returns current unix time
158 | func (f *fastime) UnixUNow() (now uint32) {
159 | return atomic.LoadUint32(&f.uut)
160 | }
161 |
162 | // UnixNanoNow returns current unix nano time
163 | func (f *fastime) UnixNanoNow() (now int64) {
164 | return atomic.LoadInt64(&f.unt)
165 | }
166 |
167 | // UnixNanoNow returns current unix nano time
168 | func (f *fastime) UnixUNanoNow() (now uint32) {
169 | return atomic.LoadUint32(&f.uunt)
170 | }
171 |
172 | // FormattedNow returns formatted byte time
173 | func (f *fastime) FormattedNow() (now []byte) {
174 | // only update formatted value on swap
175 | if f.formatValid.CompareAndSwap(false, true) {
176 | form := f.GetFormat()
177 | buf := f.Now().AppendFormat(f.newBuffer(len(form)+bufMargin), form)
178 | f.ft.Store(&buf)
179 | }
180 | return *f.ft.Load()
181 | }
182 |
183 | // StartTimerD provides time refresh daemon
184 | func (f *fastime) StartTimerD(ctx context.Context, dur time.Duration) (ft Ftime) {
185 | f.mu.Lock()
186 | defer f.mu.Unlock()
187 | // if the daemon was already running, restart
188 | if f.IsDaemonRunning() {
189 | f.stop()
190 | }
191 | f.running.Store(true)
192 | f.dur = math.MaxInt64
193 | atomic.StoreInt64(&f.dur, dur.Nanoseconds())
194 | ticker := time.NewTicker(time.Duration(atomic.LoadInt64(&f.dur)))
195 | lastCorrection := f.fetchSysTime()
196 | f.wg.Add(1)
197 | f.refresh()
198 |
199 | go func() {
200 | // daemon cleanup
201 | defer func() {
202 | f.running.Store(false)
203 | ticker.Stop()
204 | f.wg.Done()
205 | }()
206 | for atomic.LoadInt64(&f.dur) > 0 {
207 | t := <-ticker.C
208 | // rely on ticker for approximation
209 | if t.Sub(lastCorrection) < f.correctionDur {
210 | f.update()
211 | } else { // correct the system time at a fixed interval
212 | select {
213 | case <-ctx.Done():
214 | return
215 | default:
216 | }
217 | f.refresh()
218 | lastCorrection = t
219 | }
220 | }
221 | }()
222 | return f
223 | }
224 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/JustinTimperio/gpq
2 |
3 | go 1.22
4 |
5 | require (
6 | github.com/cornelk/hashmap v1.0.8
7 | github.com/dgraph-io/badger/v4 v4.5.0
8 | github.com/google/uuid v1.6.0
9 | github.com/tidwall/btree v1.7.0
10 | )
11 |
12 | require (
13 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
14 | github.com/dgraph-io/ristretto/v2 v2.0.1 // indirect
15 | github.com/dustin/go-humanize v1.0.1 // indirect
16 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
17 | github.com/google/flatbuffers v24.3.25+incompatible // indirect
18 | github.com/google/go-cmp v0.6.0 // indirect
19 | github.com/klauspost/compress v1.17.11 // indirect
20 | github.com/pkg/errors v0.9.1 // indirect
21 | go.opencensus.io v0.24.0 // indirect
22 | golang.org/x/net v0.33.0 // indirect
23 | golang.org/x/sys v0.28.0 // indirect
24 | google.golang.org/protobuf v1.36.0 // indirect
25 | )
26 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
3 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
4 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
5 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
6 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
7 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
8 | github.com/cornelk/hashmap v1.0.8 h1:nv0AWgw02n+iDcawr5It4CjQIAcdMMKRrs10HOJYlrc=
9 | github.com/cornelk/hashmap v1.0.8/go.mod h1:RfZb7JO3RviW/rT6emczVuC/oxpdz4UsSB2LJSclR1k=
10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
11 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
12 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
13 | github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g=
14 | github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A=
15 | github.com/dgraph-io/ristretto/v2 v2.0.1 h1:7W0LfEP+USCmtrUjJsk+Jv2jbhJmb72N4yRI7GrLdMI=
16 | github.com/dgraph-io/ristretto/v2 v2.0.1/go.mod h1:K7caLeufSdxm+ITp1n/73U+VbFVAHrexfLbz4n14hpo=
17 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
18 | github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
19 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
20 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
21 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
22 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
23 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
24 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
25 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
26 | github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
27 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
28 | github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
29 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
30 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
31 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
32 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
33 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
34 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
35 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
36 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
37 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
38 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
39 | github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
40 | github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
41 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
42 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
43 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
44 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
45 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
46 | github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
47 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
48 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
49 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
50 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
51 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
52 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
53 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
54 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
55 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
56 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
57 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
58 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
59 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
60 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
61 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
62 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
63 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
64 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
65 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
66 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
67 | github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
68 | github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
69 | go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
70 | go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
71 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
72 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
73 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
74 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
75 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
76 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
77 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
78 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
79 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
80 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
81 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
82 | golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
83 | golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
84 | golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
85 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
86 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
87 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
88 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
89 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
90 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
91 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
92 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
93 | golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
94 | golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
95 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
96 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
97 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
98 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
99 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
100 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
101 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
102 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
103 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
104 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
105 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
106 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
107 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
108 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
109 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
110 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
111 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
112 | google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
113 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
114 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
115 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
116 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
117 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
118 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
119 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
120 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
121 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
122 | google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
123 | google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
124 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
125 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
126 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
127 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
128 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
129 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
130 |
--------------------------------------------------------------------------------
/gpq.go:
--------------------------------------------------------------------------------
1 | package gpq
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "sync"
7 | "time"
8 |
9 | "github.com/JustinTimperio/gpq/disk"
10 | "github.com/JustinTimperio/gpq/ftime"
11 | "github.com/JustinTimperio/gpq/queues"
12 | "github.com/JustinTimperio/gpq/schema"
13 |
14 | "github.com/google/uuid"
15 | )
16 |
17 | // GPQ is a generic priority queue that supports priority levels and timeouts
18 | // It is implemented using a heap for each priority level and a priority queue of non-empty buckets
19 | // It also supports disk caching using badgerDB with the option to lazily disk writes and deletes
20 | // The GPQ is thread-safe and supports concurrent access
21 | type GPQ[d any] struct {
22 | // options is a struct that contains the options for the GPQ
23 | options schema.GPQOptions
24 |
25 | // buckets is a map of priority buckets
26 | queue queues.CorePriorityQueue[d]
27 |
28 | // diskCache is a badgerDB used to store items in the GPQ
29 | diskCache *disk.Disk[d]
30 | // activeDBSessions is a wait group for active disk cache sessions
31 | activeDBSessions *sync.WaitGroup
32 |
33 | // lazyDiskMessageChan is a channel used to send messages to the lazy disk cache
34 | lazyDiskSendChan chan schema.Item[d]
35 | // lazyDiskDeleteChan is a channel used to send messages to the lazy disk cache
36 | lazyDiskDeleteChan chan schema.DeleteMessage
37 | // batchHandler allows for synchronization of disk cache batches
38 | batchHandler *batchHandler[d]
39 | // batchCounter is used to keep track the current batch number
40 | batchCounter *batchCounter
41 | }
42 |
43 | // NewGPQ creates a new GPQ with the given number of buckets
44 | // The number of buckets is the number of priority levels you want to support
45 | // You must provide the number of buckets ahead of time and all priorities you submit
46 | // must be within the range of 0 to NumOfBuckets
47 | func NewGPQ[d any](Options schema.GPQOptions) (uint, *GPQ[d], error) {
48 |
49 | var diskCache *disk.Disk[d]
50 | var err error
51 | var sender chan schema.Item[d]
52 | var receiver chan schema.DeleteMessage
53 |
54 | if Options.DiskCacheEnabled {
55 | diskCache, err = disk.NewDiskCache[d](nil, Options)
56 | if err != nil {
57 | return 0, nil, err
58 | }
59 |
60 | if Options.LazyDiskCacheEnabled {
61 | sender = make(chan schema.Item[d], Options.LazyDiskCacheChannelSize)
62 | receiver = make(chan schema.DeleteMessage, Options.LazyDiskCacheChannelSize)
63 | }
64 | }
65 |
66 | gpq := &GPQ[d]{
67 | queue: queues.NewCorePriorityQueue[d](Options, diskCache, receiver),
68 | options: Options,
69 |
70 | diskCache: diskCache,
71 | activeDBSessions: &sync.WaitGroup{},
72 |
73 | lazyDiskSendChan: sender,
74 | lazyDiskDeleteChan: receiver,
75 | batchHandler: newBatchHandler(diskCache),
76 | batchCounter: newBatchCounter(Options.LazyDiskBatchSize),
77 | }
78 |
79 | var restored uint
80 | if Options.DiskCacheEnabled {
81 | items, err := gpq.diskCache.RestoreFromDisk()
82 | if err != nil {
83 | return 0, gpq, err
84 | }
85 |
86 | errs := gpq.restoreDB(items)
87 | if errs != nil {
88 | return 0, gpq, fmt.Errorf("Failed to Restore DB, received %d errors! Errors: %v", len(errs), errs)
89 | }
90 | restored = uint(len(items))
91 |
92 | if Options.LazyDiskCacheEnabled {
93 | go gpq.lazyDiskWriter(Options.DiskWriteDelay)
94 | go gpq.lazyDiskDeleter()
95 | }
96 | }
97 |
98 | return restored, gpq, nil
99 | }
100 |
101 | // ItemsInQueue returns the total number of items in the queue
102 | func (g *GPQ[d]) ItemsInQueue() uint {
103 | return g.queue.ItemsInQueue()
104 | }
105 |
106 | // ItemsInDB returns the total number of items currently commit to disk
107 | func (g *GPQ[d]) ItemsInDB() uint {
108 | return g.diskCache.ItemsInDB()
109 | }
110 |
111 | // ActiveBuckets returns the total number of buckets(priorities) that have messages within
112 | func (g *GPQ[d]) ActiveBuckets() uint {
113 | return g.queue.ActiveBuckets()
114 | }
115 |
116 | // Enqueue adds an item to the queue with the given options
117 | func (g *GPQ[d]) Enqueue(item schema.Item[d]) error {
118 |
119 | if item.Priority > uint(g.options.MaxPriority) {
120 | return errors.New("Priority bucket does not exist")
121 | }
122 | item.SubmittedAt = ftime.Now()
123 | item.LastEscalated = item.SubmittedAt
124 |
125 | if g.options.DiskCacheEnabled && !item.WasRestored {
126 | key, err := uuid.New().MarshalBinary()
127 | if err != nil {
128 | return err
129 | }
130 | item.DiskUUID = key
131 |
132 | if g.options.LazyDiskCacheEnabled {
133 | item.BatchNumber = g.batchCounter.increment()
134 | g.lazyDiskSendChan <- item
135 | } else {
136 | err = g.diskCache.WriteSingle(key, item)
137 | if err != nil {
138 | return err
139 | }
140 | }
141 | }
142 |
143 | return g.queue.Enqueue(&item)
144 | }
145 |
146 | // EnqueueBatch takes a slice of items and attempts to enqueue them in their perspective buckets
147 | // If a error is generated, it is attached to a slice of errors. Currently the batch will be commit
148 | // in the partial state, and it is up to the user to parse the errors and resend messages that failed.
149 | // In the future this will most likely change with the addition of transactions.
150 | func (g *GPQ[d]) EnqueueBatch(items []schema.Item[d]) []error {
151 |
152 | var (
153 | errors []error
154 | processedItems []*schema.Item[d]
155 | )
156 |
157 | for i := 0; i < len(items); i++ {
158 | if items[i].Priority > uint(g.options.MaxPriority) {
159 | errors = append(errors, fmt.Errorf("No Bucket exists to place message %d with priority %d", i, items[i].Priority))
160 | continue
161 | }
162 |
163 | if g.options.DiskCacheEnabled {
164 | key, err := uuid.New().MarshalBinary()
165 | if err != nil {
166 | errors = append(errors, fmt.Errorf("Unable to generate UUID for message %d with priority %d", i, items[i].Priority))
167 | continue
168 | }
169 | items[i].DiskUUID = key
170 |
171 | if g.options.LazyDiskCacheEnabled {
172 | items[i].BatchNumber = g.batchCounter.increment()
173 | g.lazyDiskSendChan <- items[i]
174 | } else {
175 | err = g.diskCache.WriteSingle(items[i].DiskUUID, items[i])
176 | if err != nil {
177 | errors = append(errors, fmt.Errorf("Unable to write message %d with priority %d", i, items[i].Priority))
178 | continue
179 | }
180 | }
181 | }
182 |
183 | processedItems = append(processedItems, &items[i])
184 | }
185 |
186 | return g.queue.EnqueueBatch(processedItems)
187 | }
188 |
189 | // Dequeue removes and returns the item with the highest priority in the queue
190 | func (g *GPQ[d]) Dequeue() (item *schema.Item[d], err error) {
191 | item, err = g.queue.Dequeue()
192 | if err != nil {
193 | return item, err
194 | }
195 |
196 | if g.options.DiskCacheEnabled {
197 | if g.options.LazyDiskCacheEnabled {
198 | dm := schema.DeleteMessage{
199 | DiskUUID: item.DiskUUID,
200 | BatchNumber: item.BatchNumber,
201 | WasRestored: item.WasRestored,
202 | }
203 |
204 | g.lazyDiskDeleteChan <- dm
205 |
206 | } else {
207 | err = g.diskCache.DeleteSingle(item.DiskUUID)
208 | if err != nil {
209 | return item, err
210 | }
211 | }
212 | }
213 |
214 | return item, nil
215 |
216 | }
217 |
218 | // DequeueBatch takes a batch size, and returns a slice ordered by priority up to the batchSize provided
219 | // enough messages are present to fill the batch. Partial batches will be returned if a error is encountered.
220 | func (g *GPQ[d]) DequeueBatch(batchSize uint) (items []*schema.Item[d], errs []error) {
221 | items, errs = g.queue.DequeueBatch(batchSize)
222 | if errs != nil {
223 | return items, errs
224 | }
225 |
226 | if g.options.DiskCacheEnabled {
227 | for i := 0; i < len(items); i++ {
228 | if g.options.LazyDiskCacheEnabled {
229 | dm := schema.DeleteMessage{
230 | DiskUUID: items[i].DiskUUID,
231 | BatchNumber: items[i].BatchNumber,
232 | WasRestored: items[i].WasRestored,
233 | }
234 |
235 | g.lazyDiskDeleteChan <- dm
236 |
237 | } else {
238 | err := g.diskCache.DeleteSingle(items[i].DiskUUID)
239 | if err != nil {
240 | return nil, []error{err}
241 | }
242 | }
243 | }
244 | }
245 |
246 | return items, nil
247 | }
248 |
249 | // Prioritize orders the queue based on the individual options added to
250 | // every message in the queue. Prioritizing the queue is a stop-the-world
251 | // event, so consider your usage carefully.
252 | func (g *GPQ[d]) Prioritize() (escalated, removed uint, err error) {
253 | return g.queue.Prioritize()
254 | }
255 |
256 | // Close performs a safe shutdown of the GPQ and the disk cache preventing data loss
257 | func (g *GPQ[d]) Close() {
258 |
259 | if g.options.DiskCacheEnabled {
260 | if g.options.LazyDiskCacheEnabled {
261 | close(g.lazyDiskSendChan)
262 | close(g.lazyDiskDeleteChan)
263 | }
264 |
265 | // Wait for all db sessions to sync to disk
266 | g.activeDBSessions.Wait()
267 |
268 | // Safely close the diskCache
269 | g.diskCache.Close()
270 | }
271 |
272 | }
273 |
274 | func (g *GPQ[d]) restoreDB(items []*schema.Item[d]) []error {
275 | // Quick sanity check
276 | for i := 0; i < len(items); i++ {
277 | if items[i].Priority > uint(g.options.MaxPriority) {
278 | return []error{fmt.Errorf("You are trying to restore items with priorities higher than the max allowed for this queue")}
279 | }
280 | }
281 |
282 | return g.queue.EnqueueBatch(items)
283 | }
284 |
285 | func (g *GPQ[d]) lazyDiskWriter(maxDelay time.Duration) {
286 | g.activeDBSessions.Add(1)
287 | defer g.activeDBSessions.Done()
288 |
289 | var mux sync.Mutex
290 | var wg sync.WaitGroup
291 | var closer = make(chan struct{}, 1)
292 | batch := make(map[uint][]*schema.Item[d], 0)
293 | ticker := time.NewTicker(maxDelay)
294 |
295 | wg.Add(2)
296 | go func() {
297 | defer wg.Done()
298 | for {
299 | select {
300 | case item, ok := <-g.lazyDiskSendChan:
301 | if !ok {
302 | closer <- struct{}{}
303 | mux.Lock()
304 | for k, v := range batch {
305 | g.batchHandler.processBatch(v, k)
306 | batch[k] = batch[k][:0]
307 | }
308 | mux.Unlock()
309 | return
310 | }
311 |
312 | mux.Lock()
313 | batch[item.BatchNumber] = append(batch[item.BatchNumber], &item)
314 | mux.Unlock()
315 | }
316 | }
317 | }()
318 |
319 | go func() {
320 | defer wg.Done()
321 | for {
322 | select {
323 | case <-ticker.C:
324 | mux.Lock()
325 | for k, v := range batch {
326 | if len(v) >= int(g.options.LazyDiskBatchSize) {
327 | g.batchHandler.processBatch(v, k)
328 | batch[k] = batch[k][:0]
329 | }
330 | }
331 | mux.Unlock()
332 | case <-closer:
333 | return
334 | }
335 | }
336 | }()
337 |
338 | wg.Wait()
339 | }
340 |
341 | func (g *GPQ[d]) lazyDiskDeleter() {
342 | g.activeDBSessions.Add(1)
343 | defer g.activeDBSessions.Done()
344 |
345 | batch := make(map[uint][]*schema.DeleteMessage, 0)
346 | restored := make([]*schema.DeleteMessage, 0)
347 |
348 | for {
349 | select {
350 | case item, ok := <-g.lazyDiskDeleteChan:
351 | if !ok {
352 | g.batchHandler.deleteBatch(restored, 0, true)
353 |
354 | for i, v := range batch {
355 | g.batchHandler.deleteBatch(v, i, false)
356 | batch[item.BatchNumber] = batch[item.BatchNumber][:0]
357 | }
358 | return
359 | }
360 |
361 | if item.WasRestored {
362 | restored = append(restored, &item)
363 | if len(restored) >= int(g.options.LazyDiskBatchSize) {
364 | g.batchHandler.deleteBatch(restored, 0, true)
365 | restored = restored[:0]
366 | }
367 | continue
368 | }
369 |
370 | // If the batch is full, process it and delete the items from the disk cache
371 | batch[item.BatchNumber] = append(batch[item.BatchNumber], &item)
372 | if len(batch[item.BatchNumber]) >= int(g.options.LazyDiskBatchSize) {
373 | g.batchHandler.deleteBatch(batch[item.BatchNumber], item.BatchNumber, false)
374 | batch[item.BatchNumber] = batch[item.BatchNumber][:0]
375 | }
376 | }
377 | }
378 | }
379 |
--------------------------------------------------------------------------------
/gpq_base_test.go:
--------------------------------------------------------------------------------
1 | package gpq_test
2 |
3 | import (
4 | "log"
5 | "sync"
6 | "sync/atomic"
7 | "testing"
8 | "time"
9 |
10 | "github.com/JustinTimperio/gpq"
11 | "github.com/JustinTimperio/gpq/schema"
12 | )
13 |
14 | func TestOrder(t *testing.T) {
15 | var (
16 | total uint = 1_000_000
17 | syncToDisk bool = false
18 | lazySync bool = false
19 | maxBuckets uint = 10
20 | )
21 |
22 | defaultMessageOptions := schema.EnqueueOptions{
23 | ShouldEscalate: false,
24 | EscalationRate: time.Duration(time.Second),
25 | CanTimeout: false,
26 | Timeout: time.Duration(time.Second * 1),
27 | }
28 |
29 | opts := schema.GPQOptions{
30 | MaxPriority: maxBuckets,
31 |
32 | DiskCacheEnabled: syncToDisk,
33 | DiskCachePath: "/tmp/gpq/test-order",
34 | DiskCacheCompression: false,
35 | DiskEncryptionEnabled: false,
36 | DiskEncryptionKey: []byte("1234567890"),
37 | LazyDiskCacheChannelSize: 1_000_000,
38 |
39 | DiskWriteDelay: time.Duration(time.Second),
40 | LazyDiskCacheEnabled: lazySync,
41 | LazyDiskBatchSize: 10_000,
42 | }
43 |
44 | _, queue, err := gpq.NewGPQ[string](opts)
45 | if err != nil {
46 | log.Fatalln(err)
47 | }
48 |
49 | var order = make(map[uint][]schema.Item[string])
50 |
51 | // Add the messages to the queue in order
52 | for i := uint(0); i < total; i++ {
53 | p := i % maxBuckets
54 | item := schema.NewItem(p, randomString(30), defaultMessageOptions)
55 |
56 | err := queue.Enqueue(item)
57 | if err != nil {
58 | log.Fatalln(err)
59 | }
60 |
61 | _, ok := order[p]
62 | if !ok {
63 | order[p] = make([]schema.Item[string], 0)
64 | }
65 | order[p] = append(order[p], item)
66 | }
67 |
68 | // Pull off the queue and verify order
69 | for i := uint(0); i < total; i++ {
70 | item, err := queue.Dequeue()
71 | if err != nil {
72 | log.Fatalln(err)
73 | }
74 |
75 | orderMap := order[item.Priority]
76 | if orderMap[0].Data != item.Data {
77 | log.Fatalln("Order mismatch", orderMap[0], item)
78 | }
79 | order[item.Priority] = orderMap[1:]
80 | }
81 |
82 | queue.Close()
83 |
84 | }
85 |
86 | func TestPrioritize(t *testing.T) {
87 |
88 | defaultMessageOptions := schema.EnqueueOptions{
89 | ShouldEscalate: true,
90 | EscalationRate: time.Duration(time.Second),
91 | CanTimeout: true,
92 | Timeout: time.Duration(time.Second * 10),
93 | }
94 |
95 | ptest := func(tm uint, sd bool, ls bool, mb uint) {
96 | opts := schema.GPQOptions{
97 | MaxPriority: mb,
98 |
99 | DiskCacheEnabled: sd,
100 | DiskCachePath: "/tmp/gpq/test-prioritize",
101 | DiskCacheCompression: false,
102 | DiskEncryptionEnabled: false,
103 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
104 | LazyDiskCacheChannelSize: tm / 2,
105 |
106 | DiskWriteDelay: time.Duration(time.Second),
107 | LazyDiskCacheEnabled: ls,
108 | LazyDiskBatchSize: 10_000,
109 | }
110 |
111 | _, queue, err := gpq.NewGPQ[uint](opts)
112 | if err != nil {
113 | log.Fatalln(err)
114 | }
115 |
116 | var (
117 | escalated uint64
118 | removed uint64
119 | received uint64
120 | )
121 |
122 | var wg sync.WaitGroup
123 | shutdown := make(chan struct{})
124 |
125 | wg.Add(1)
126 | go func() {
127 | defer wg.Done()
128 |
129 | ticker := time.NewTicker(time.Second * 1)
130 |
131 | forloop:
132 | for {
133 | select {
134 | case <-ticker.C:
135 | r, e, err := queue.Prioritize()
136 | if err != nil {
137 | log.Fatalln(err)
138 | }
139 |
140 | atomic.AddUint64(&removed, uint64(r))
141 | atomic.AddUint64(&escalated, uint64(e))
142 | t.Log("Received:", atomic.LoadUint64(&received), "Removed:", atomic.LoadUint64(&removed), "Escalated:", atomic.LoadUint64(&escalated))
143 |
144 | case <-shutdown:
145 | break forloop
146 | }
147 | }
148 | }()
149 |
150 | wg.Add(1)
151 | go func() {
152 | defer wg.Done()
153 | for j := uint(0); j < tm; j++ {
154 | p := j % mb
155 | item := schema.NewItem(p, j, defaultMessageOptions)
156 |
157 | err := queue.Enqueue(item)
158 | if err != nil {
159 | log.Fatalln(err)
160 | }
161 | }
162 | t.Log("Enqueued all items")
163 | }()
164 |
165 | wg.Add(1)
166 | go func() {
167 | defer wg.Done()
168 | for {
169 | if atomic.LoadUint64(&received)+atomic.LoadUint64(&removed) >= uint64(tm) {
170 | break
171 | }
172 | time.Sleep(time.Millisecond * 10)
173 | _, err := queue.Dequeue()
174 | if err != nil {
175 | continue
176 | }
177 | atomic.AddUint64(&received, 1)
178 | }
179 | t.Log("Dequeued all items")
180 | shutdown <- struct{}{}
181 | }()
182 |
183 | wg.Wait()
184 |
185 | if received == 0 || removed == 0 || escalated == 0 {
186 | t.Fatal("Prioritize failed, no value should be zero: received -", received, "removed -", removed, "escalated -", escalated)
187 | }
188 |
189 | if queue.ItemsInQueue() != 0 {
190 | t.Fatal("Items in queue:", queue.ItemsInQueue())
191 | }
192 |
193 | queue.Close()
194 | t.Log("Received:", received, "Removed:", removed, "Escalated:", escalated)
195 | }
196 |
197 | // Test Without Disk Features
198 | ptest(1_000_000, false, false, 10)
199 | // Test With Disk Features
200 | ptest(1_000_000, true, true, 10)
201 | }
202 |
203 | func TestRestoreOrder(t *testing.T) {
204 | var (
205 | total uint = 250_000
206 | syncToDisk bool = true
207 | lazySync bool = true
208 | maxBuckets uint = 10
209 | )
210 |
211 | defaultMessageOptions := schema.EnqueueOptions{
212 | ShouldEscalate: false,
213 | EscalationRate: time.Duration(time.Second),
214 | CanTimeout: false,
215 | Timeout: time.Duration(time.Second * 1),
216 | }
217 |
218 | opts := schema.GPQOptions{
219 | MaxPriority: maxBuckets,
220 |
221 | DiskCacheEnabled: syncToDisk,
222 | DiskCachePath: "/tmp/gpq/test-restore",
223 | DiskCacheCompression: false,
224 | DiskEncryptionEnabled: false,
225 | DiskEncryptionKey: []byte("1234567890"),
226 | LazyDiskCacheChannelSize: 1_000_000,
227 |
228 | DiskWriteDelay: time.Duration(time.Second),
229 | LazyDiskCacheEnabled: lazySync,
230 | LazyDiskBatchSize: 10_000,
231 | }
232 |
233 | _, queue, err := gpq.NewGPQ[string](opts)
234 | if err != nil {
235 | log.Fatalln(err)
236 | }
237 |
238 | var order = make(map[uint][]schema.Item[string])
239 |
240 | // Add the messages to the queue in order
241 | for i := uint(0); i < total; i++ {
242 | p := i % maxBuckets
243 | item := schema.NewItem(p, randomString(30), defaultMessageOptions)
244 |
245 | err := queue.Enqueue(item)
246 | if err != nil {
247 | log.Fatalln(err)
248 | }
249 |
250 | _, ok := order[p]
251 | if !ok {
252 | order[p] = make([]schema.Item[string], 0)
253 | }
254 | order[p] = append(order[p], item)
255 | }
256 |
257 | // Close the queue
258 | queue.Close()
259 |
260 | // Rebuild the queue
261 | restored, queue, err := gpq.NewGPQ[string](opts)
262 | if err != nil {
263 | log.Fatalln(err)
264 | }
265 |
266 | if restored == 0 {
267 | log.Fatalln("No items were restored")
268 | }
269 |
270 | var lastHigh = uint(0)
271 |
272 | // Pull off the queue and verify order
273 | for i := uint(0); i < total; i++ {
274 | item, err := queue.Dequeue()
275 | if err != nil {
276 | log.Fatalln(err)
277 | }
278 |
279 | if item.Priority < lastHigh {
280 | log.Fatalln("Priority order mismatch", lastHigh, maxBuckets)
281 | }
282 | lastHigh = item.Priority
283 |
284 | orderMap := order[item.Priority]
285 | var found bool
286 | for _, m := range orderMap {
287 | if m.Data == item.Data {
288 | found = true
289 | break
290 | }
291 | }
292 | if !found {
293 | log.Fatalln("Message not found in correct bucket")
294 | }
295 | }
296 |
297 | log.Println("Restore Order test passed")
298 |
299 | }
300 |
--------------------------------------------------------------------------------
/gpq_e2e_test.go:
--------------------------------------------------------------------------------
1 | package gpq_test
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "sync"
7 | "sync/atomic"
8 | "testing"
9 | "time"
10 |
11 | "github.com/JustinTimperio/gpq"
12 | "github.com/JustinTimperio/gpq/schema"
13 | )
14 |
15 | func TestE2E(t *testing.T) {
16 | var (
17 | total uint64 = 10_000_000
18 | syncToDisk bool = true
19 | lazySync bool = true
20 | maxBuckets uint = 10
21 | batchSize uint = 10_000
22 | senders uint = 4
23 | receivers uint = 4
24 | )
25 |
26 | defaultMessageOptions := schema.EnqueueOptions{
27 | ShouldEscalate: true,
28 | EscalationRate: time.Duration(time.Second),
29 | CanTimeout: true,
30 | Timeout: time.Duration(time.Second * 5),
31 | }
32 |
33 | opts := schema.GPQOptions{
34 | MaxPriority: maxBuckets,
35 |
36 | DiskCacheEnabled: syncToDisk,
37 | DiskCachePath: "/tmp/gpq/batch-e2e-parallel",
38 | DiskCacheCompression: true,
39 | DiskEncryptionEnabled: true,
40 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
41 | LazyDiskCacheChannelSize: 1_000_000,
42 |
43 | DiskWriteDelay: time.Duration(time.Second * 5),
44 | LazyDiskCacheEnabled: lazySync,
45 | LazyDiskBatchSize: 10_000,
46 | }
47 |
48 | _, queue, err := gpq.NewGPQ[uint](opts)
49 | if err != nil {
50 | log.Fatalln(err)
51 | }
52 |
53 | var (
54 | received uint64
55 | removed uint64
56 | escalated uint64
57 | )
58 |
59 | var wg sync.WaitGroup
60 | var shutdown = make(chan struct{}, receivers)
61 |
62 | wg.Add(1)
63 | go func() {
64 | defer wg.Done()
65 | ticker := time.NewTicker(time.Second * 1)
66 |
67 | breaker:
68 | for {
69 | select {
70 | case <-ticker.C:
71 | r, e, err := queue.Prioritize()
72 | if err != nil {
73 | log.Fatalln(err)
74 | }
75 |
76 | atomic.AddUint64(&received, uint64(r))
77 | atomic.AddUint64(&escalated, uint64(e))
78 | t.Log("Received:", atomic.LoadUint64(&received), "Removed:", atomic.LoadUint64(&removed), "Escalated:", atomic.LoadUint64(&escalated))
79 |
80 | case <-shutdown:
81 | break breaker
82 | }
83 | }
84 |
85 | t.Log("Exited Prioritize")
86 | }()
87 |
88 | for i := uint(0); i < senders; i++ {
89 | wg.Add(1)
90 | go func() {
91 | defer wg.Done()
92 | for j := uint(0); j < (uint(total)/batchSize)/senders; j++ {
93 |
94 | var miniBatch []schema.Item[uint]
95 | for i := uint(0); i < batchSize; i++ {
96 | p := j % maxBuckets
97 | item := schema.NewItem(p, j, defaultMessageOptions)
98 | miniBatch = append(miniBatch, item)
99 | }
100 |
101 | err := queue.EnqueueBatch(miniBatch)
102 | if err != nil {
103 | log.Fatalln(err)
104 | }
105 | }
106 | t.Log("Worker:" + fmt.Sprint(i) + " Enqueued all items")
107 | }()
108 | }
109 |
110 | for i := uint(0); i < receivers; i++ {
111 | wg.Add(1)
112 | go func() {
113 | defer wg.Done()
114 | for {
115 | if atomic.LoadUint64(&received)+atomic.LoadUint64(&removed) >= total {
116 | break
117 | }
118 | items, err := queue.DequeueBatch(batchSize)
119 | if err != nil {
120 | continue
121 | }
122 | atomic.AddUint64(&received, uint64(len(items)))
123 | }
124 | t.Log("Worker:" + fmt.Sprint(i) + " Dequeued all items")
125 | shutdown <- struct{}{}
126 | }()
127 | }
128 |
129 | wg.Wait()
130 | if queue.ItemsInQueue() != 0 {
131 | t.Fatal("Items in queue:", queue.ItemsInQueue())
132 | }
133 |
134 | t.Log("Waiting for queue to close")
135 | queue.Close()
136 |
137 | num := numberOfItemsInDB(opts.DiskCachePath)
138 | if num > 0 {
139 | log.Fatalln("Items in DB:", num)
140 | }
141 |
142 | t.Log("Batch Test Passed")
143 | }
144 |
--------------------------------------------------------------------------------
/gpq_helpers_test.go:
--------------------------------------------------------------------------------
1 | package gpq_test
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "encoding/json"
7 | "fmt"
8 | "math/rand"
9 |
10 | "github.com/JustinTimperio/gpq/schema"
11 | "github.com/dgraph-io/badger/v4"
12 | )
13 |
14 | func randomString(length int) string {
15 | const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
16 | result := make([]byte, length)
17 | for i := range result {
18 | result[i] = charset[rand.Intn(len(charset))]
19 | }
20 | return string(result)
21 | }
22 |
23 | func numberOfItemsInDB(path string) int {
24 | var total int
25 | opts := badger.DefaultOptions(path)
26 | opts.Logger = nil
27 | db, err := badger.Open(opts)
28 | if err != nil {
29 | return 0
30 | }
31 |
32 | db.View(func(txn *badger.Txn) error {
33 | opts := badger.DefaultIteratorOptions
34 | opts.PrefetchSize = 10
35 | it := txn.NewIterator(opts)
36 | defer it.Close()
37 | for it.Rewind(); it.Valid(); it.Next() {
38 |
39 | val := it.Item()
40 | val.Value(func(v []byte) error {
41 | var buf bytes.Buffer
42 | buf.Write(v)
43 | obj := schema.Item[int]{}
44 | err = gob.NewDecoder(&buf).Decode(&obj)
45 | if err != nil {
46 | return err
47 | }
48 | jsonObj, err := json.MarshalIndent(obj, "", " ")
49 | if err != nil {
50 | return err
51 | }
52 | fmt.Println(string(jsonObj))
53 | total++
54 | return nil
55 |
56 | })
57 | }
58 |
59 | return nil
60 | })
61 |
62 | return total
63 | }
64 |
--------------------------------------------------------------------------------
/gpq_parallel_test.go:
--------------------------------------------------------------------------------
1 | package gpq_test
2 |
3 | import (
4 | "log"
5 | "sync"
6 | "testing"
7 | "time"
8 |
9 | "github.com/JustinTimperio/gpq"
10 | "github.com/JustinTimperio/gpq/schema"
11 | )
12 |
13 | // Tests pushing and pulling single messages in parallel
14 | func TestSingleParallel(t *testing.T) {
15 | var (
16 | total uint = 1_000_000
17 | syncToDisk bool = false
18 | lazySync bool = false
19 | maxBuckets uint = 10
20 | )
21 |
22 | defaultMessageOptions := schema.EnqueueOptions{
23 | ShouldEscalate: false,
24 | EscalationRate: time.Duration(time.Second),
25 | CanTimeout: false,
26 | Timeout: time.Duration(time.Second * 10),
27 | }
28 |
29 | opts := schema.GPQOptions{
30 | MaxPriority: maxBuckets,
31 |
32 | DiskCacheEnabled: syncToDisk,
33 | DiskCachePath: "/tmp/gpq/batch-parallel",
34 | DiskCacheCompression: false,
35 | DiskEncryptionEnabled: false,
36 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
37 | LazyDiskCacheChannelSize: 1_000_000,
38 |
39 | DiskWriteDelay: time.Duration(time.Second),
40 | LazyDiskCacheEnabled: lazySync,
41 | LazyDiskBatchSize: 10_000,
42 | }
43 |
44 | _, queue, err := gpq.NewGPQ[uint](opts)
45 | if err != nil {
46 | log.Fatalln(err)
47 | }
48 |
49 | var (
50 | received uint
51 | )
52 |
53 | var wg sync.WaitGroup
54 |
55 | wg.Add(1)
56 | go func() {
57 | defer wg.Done()
58 | for j := uint(0); j < total; j++ {
59 | p := j % maxBuckets
60 | item := schema.NewItem(p, j, defaultMessageOptions)
61 |
62 | err := queue.Enqueue(item)
63 | if err != nil {
64 | log.Fatalln(err)
65 | }
66 | }
67 | t.Log("Enqueued all items")
68 | }()
69 |
70 | wg.Add(1)
71 | go func() {
72 | defer wg.Done()
73 | for {
74 | if received >= total {
75 | break
76 | }
77 | _, err := queue.Dequeue()
78 | if err != nil {
79 | continue
80 | }
81 | received++
82 | }
83 | t.Log("Dequeued all items")
84 | }()
85 |
86 | wg.Wait()
87 | if queue.ItemsInQueue() != 0 {
88 | t.Fatal("Items in queue:", queue.ItemsInQueue())
89 | }
90 |
91 | queue.Close()
92 | t.Log("Single Parallel Test Passed")
93 | }
94 |
95 | // Tests pushing and pulling batches of messages in parallel
96 | func TestBatchParallel(t *testing.T) {
97 | var (
98 | total uint = 1_000_000
99 | syncToDisk bool = false
100 | lazySync bool = false
101 | maxBuckets uint = 10
102 | batchSize uint = 10_000
103 | )
104 |
105 | defaultMessageOptions := schema.EnqueueOptions{
106 | ShouldEscalate: false,
107 | EscalationRate: time.Duration(time.Second),
108 | CanTimeout: false,
109 | Timeout: time.Duration(time.Second * 10),
110 | }
111 |
112 | opts := schema.GPQOptions{
113 | MaxPriority: maxBuckets,
114 |
115 | DiskCacheEnabled: syncToDisk,
116 | DiskCachePath: "/tmp/gpq/batch-parallel",
117 | DiskCacheCompression: false,
118 | DiskEncryptionEnabled: false,
119 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
120 | LazyDiskCacheChannelSize: 1_000_000,
121 |
122 | DiskWriteDelay: time.Duration(time.Second),
123 | LazyDiskCacheEnabled: lazySync,
124 | LazyDiskBatchSize: 10_000,
125 | }
126 |
127 | _, queue, err := gpq.NewGPQ[uint](opts)
128 | if err != nil {
129 | log.Fatalln(err)
130 | }
131 |
132 | var (
133 | received uint
134 | )
135 |
136 | var wg sync.WaitGroup
137 |
138 | wg.Add(1)
139 | go func() {
140 | defer wg.Done()
141 | for j := uint(0); j < total/batchSize; j++ {
142 |
143 | var miniBatch []schema.Item[uint]
144 | for i := uint(0); i < batchSize; i++ {
145 | p := j % maxBuckets
146 | item := schema.NewItem(p, j, defaultMessageOptions)
147 | miniBatch = append(miniBatch, item)
148 | }
149 |
150 | err := queue.EnqueueBatch(miniBatch)
151 | if err != nil {
152 | log.Fatalln(err)
153 | }
154 | }
155 | t.Log("Enqueued all items")
156 | }()
157 |
158 | wg.Add(1)
159 | go func() {
160 | defer wg.Done()
161 | for {
162 | if received >= total {
163 | break
164 | }
165 | items, err := queue.DequeueBatch(batchSize)
166 | if err != nil {
167 | continue
168 | }
169 | received += uint(len(items))
170 | }
171 | t.Log("Dequeued all items")
172 | }()
173 |
174 | wg.Wait()
175 | if queue.ItemsInQueue() != 0 {
176 | t.Fatal("Items in queue:", queue.ItemsInQueue())
177 | }
178 |
179 | queue.Close()
180 | t.Log("Batch Parallel Test Passed")
181 | }
182 |
--------------------------------------------------------------------------------
/graphs/graph.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/csv"
5 | "log"
6 | "net/http"
7 | _ "net/http/pprof"
8 | "os"
9 | "runtime"
10 | "strconv"
11 | "time"
12 |
13 | "github.com/JustinTimperio/gpq"
14 | "github.com/JustinTimperio/gpq/schema"
15 | )
16 |
17 | var (
18 | maxTotal int = 10000000
19 | nMaxBuckets int = 100
20 | defaultMessageOptions = schema.EnqueueOptions{
21 | ShouldEscalate: true,
22 | EscalationRate: time.Duration(time.Second),
23 | CanTimeout: true,
24 | Timeout: time.Duration(time.Second * 5),
25 | }
26 | )
27 |
28 | func main() {
29 | // Setup the pprof server if you want to profile
30 | go func() {
31 | log.Println(http.ListenAndServe("0.0.0.0:6060", nil))
32 | }()
33 |
34 | var lazy = false
35 | var disk = false
36 |
37 | iter(true, disk, lazy)
38 | iter(false, disk, lazy)
39 |
40 | }
41 |
42 | func iter(prioritize bool, disk, lazy bool) {
43 |
44 | var name = "bench-report-no-repro.csv"
45 | if prioritize {
46 | name = "bench-report-repro.csv"
47 | }
48 |
49 | // Open the CSV file for writing
50 | os.Remove(name)
51 | file, err := os.Create(name)
52 | if err != nil {
53 | log.Fatal(err)
54 | }
55 | defer file.Close()
56 |
57 | // Create a CSV writer
58 | writer := csv.NewWriter(file)
59 | defer writer.Flush()
60 |
61 | // Write the header row to the CSV file
62 | header := []string{"Total Items", "Buckets", "Removed", "Escalated", "Time Elapsed", "Time to Send", "Time to Receive"}
63 | writer.Write(header)
64 |
65 | // Test the bench function for each million entries up to maxTotal
66 | for total := 1000000; total <= maxTotal; total += 1000000 {
67 | // Test the bench function for each increment of 10 buckets
68 | for buckets := 5; buckets <= nMaxBuckets; buckets += 5 {
69 | log.Println("Starting test for", total, "entries and", buckets, "buckets")
70 |
71 | // Run the bench function
72 | totalElapsed, sent, received, removed, escalated := bench(total, buckets, prioritize, disk, lazy)
73 | runtime.GC()
74 |
75 | // Write the statistics to the CSV file
76 | stats := []string{
77 | strconv.Itoa(total),
78 | strconv.Itoa(buckets),
79 | strconv.Itoa(int(removed)),
80 | strconv.Itoa(int(escalated)),
81 | strconv.FormatFloat(totalElapsed.Seconds(), 'f', 6, 64),
82 | strconv.FormatFloat(sent.Seconds(), 'f', 6, 64),
83 | strconv.FormatFloat(received.Seconds(), 'f', 6, 64),
84 | }
85 | writer.Write(stats)
86 | }
87 | }
88 | }
89 |
90 | func bench(total int, buckets int, prioritize bool, disk bool, lazy bool) (totalElapsed time.Duration, sentElapsed time.Duration, receivedElapsed time.Duration, removed uint64, escalated uint64) {
91 |
92 | opts := schema.GPQOptions{
93 | MaxPriority: uint(buckets),
94 | DiskCacheEnabled: disk,
95 | DiskCachePath: "/tmp/gpq/graphs",
96 | DiskCacheCompression: false,
97 | DiskEncryptionEnabled: false,
98 | DiskEncryptionKey: []byte("12345678901234567890123456789012"),
99 | LazyDiskCacheEnabled: lazy,
100 | LazyDiskBatchSize: 10_000,
101 | LazyDiskCacheChannelSize: uint(total),
102 | DiskWriteDelay: time.Duration(5 * time.Second),
103 | }
104 |
105 | // Create a new GPQ with a h-heap width of 100 using the TestStruct as the data type
106 | _, queue, err := gpq.NewGPQ[int](opts)
107 | if err != nil {
108 | log.Fatalln(err)
109 | }
110 |
111 | // If you want to prioritize the queue, start the prioritize function
112 | // This will move items to the front of the queue if they have been waiting too long
113 | if prioritize {
114 | go func() {
115 | for {
116 | time.Sleep(1 * time.Second)
117 | timedOut, prioritized, err := queue.Prioritize()
118 | if err != nil {
119 | log.Fatalln(err)
120 | }
121 | escalated += uint64(prioritized)
122 | removed += uint64(timedOut)
123 | }
124 | }()
125 | }
126 |
127 | timer := time.Now()
128 | for i := 0; i < total; i++ {
129 | p := i % buckets
130 | item := schema.NewItem(uint(p), i, defaultMessageOptions)
131 | err := queue.Enqueue(item)
132 | if err != nil {
133 | log.Fatalln(err)
134 | }
135 | }
136 | sendTime := time.Since(timer)
137 |
138 | timer = time.Now()
139 | breaker := 0
140 | for i := 0; i < total; i++ {
141 | _, err := queue.Dequeue()
142 | if err != nil {
143 | if breaker > 1000 {
144 | log.Println("Length of queue:", queue.ItemsInQueue())
145 | log.Panicln("An error occurred while dequeuing:", err, "at index", i, "of", total, "with total removed", removed)
146 | }
147 | if removed+uint64(i) == uint64(total) {
148 | break
149 | }
150 | breaker++
151 | i--
152 | }
153 | }
154 | receiveTime := time.Since(timer)
155 |
156 | // Wait for all db sessions to sync to disk
157 | queue.Close()
158 | return sendTime + receiveTime, sendTime, receiveTime, removed, escalated
159 |
160 | }
161 |
--------------------------------------------------------------------------------
/helpers.go:
--------------------------------------------------------------------------------
1 | package gpq
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/JustinTimperio/gpq/disk"
7 | "github.com/JustinTimperio/gpq/schema"
8 | )
9 |
10 | type batchHandler[T any] struct {
11 | mux *sync.Mutex
12 | syncedBatches map[uint]bool
13 | deletedBatches map[uint]bool
14 | diskCache *disk.Disk[T]
15 | }
16 |
17 | func newBatchHandler[T any](diskCache *disk.Disk[T]) *batchHandler[T] {
18 | return &batchHandler[T]{
19 | mux: &sync.Mutex{},
20 | syncedBatches: make(map[uint]bool),
21 | deletedBatches: make(map[uint]bool),
22 | diskCache: diskCache,
23 | }
24 | }
25 |
26 | func (bh *batchHandler[T]) processBatch(batch []*schema.Item[T], batchNumber uint) {
27 | bh.mux.Lock()
28 | defer bh.mux.Unlock()
29 |
30 | deleted, ok := bh.deletedBatches[batchNumber]
31 | if !ok || (ok && !deleted) {
32 | bh.diskCache.ProcessBatch(batch)
33 | }
34 |
35 | bh.syncedBatches[batchNumber] = true
36 | bh.deletedBatches[batchNumber] = false
37 | }
38 |
39 | func (bh *batchHandler[T]) deleteBatch(batch []*schema.DeleteMessage, batchNumber uint, wasRestored bool) {
40 | bh.mux.Lock()
41 | defer bh.mux.Unlock()
42 |
43 | if wasRestored {
44 | bh.diskCache.DeleteBatch(batch)
45 | return
46 | }
47 |
48 | bh.syncedBatches[batchNumber] = false
49 | bh.deletedBatches[batchNumber] = true
50 |
51 | if _, ok := bh.syncedBatches[batchNumber]; ok {
52 | bh.diskCache.DeleteBatch(batch)
53 | return
54 | }
55 |
56 | }
57 |
58 | type batchCounter struct {
59 | mux *sync.Mutex
60 | batchNumber uint
61 | batchCounter uint
62 | batchSize uint
63 | }
64 |
65 | func newBatchCounter(batchSize uint) *batchCounter {
66 | return &batchCounter{
67 | mux: &sync.Mutex{},
68 | batchNumber: 0,
69 | batchCounter: 0,
70 | batchSize: batchSize,
71 | }
72 | }
73 |
74 | func (bc *batchCounter) increment() (batchNumber uint) {
75 | bc.mux.Lock()
76 | defer bc.mux.Unlock()
77 |
78 | if (bc.batchCounter % bc.batchSize) == 0 {
79 | bc.batchNumber++
80 | }
81 | bc.batchCounter++
82 | return bc.batchNumber
83 | }
84 |
--------------------------------------------------------------------------------
/queues/cpq.go:
--------------------------------------------------------------------------------
1 | package queues
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "sync"
7 |
8 | "github.com/JustinTimperio/gpq/disk"
9 | "github.com/JustinTimperio/gpq/ftime"
10 | "github.com/JustinTimperio/gpq/queues/gheap"
11 | "github.com/JustinTimperio/gpq/schema"
12 |
13 | "github.com/cornelk/hashmap"
14 | "github.com/tidwall/btree"
15 | )
16 |
17 | type CorePriorityQueue[T any] struct {
18 | buckets *hashmap.Map[uint, *priorityQueue[T]]
19 | bpq *btree.Set[uint]
20 | mux *sync.RWMutex
21 | itemsInQueue uint
22 | disk *disk.Disk[T]
23 | options schema.GPQOptions
24 | lazy_disk_delete_chan chan schema.DeleteMessage
25 | }
26 |
27 | func NewCorePriorityQueue[T any](options schema.GPQOptions, diskCache *disk.Disk[T], deleteChan chan schema.DeleteMessage) CorePriorityQueue[T] {
28 | buckets := hashmap.New[uint, *priorityQueue[T]]()
29 | for i := uint(0); i < options.MaxPriority; i++ {
30 | pq := newPriorityQueue[T]()
31 | buckets.Set(uint(i), &pq)
32 | }
33 | var bpq btree.Set[uint]
34 |
35 | return CorePriorityQueue[T]{
36 | buckets: buckets,
37 | mux: &sync.RWMutex{},
38 | itemsInQueue: 0,
39 | bpq: &bpq,
40 | disk: diskCache,
41 | options: options,
42 | lazy_disk_delete_chan: deleteChan,
43 | }
44 | }
45 |
46 | func (cpq *CorePriorityQueue[T]) ItemsInQueue() uint {
47 | cpq.mux.RLock()
48 | defer cpq.mux.RUnlock()
49 | return cpq.itemsInQueue
50 | }
51 |
52 | func (cpq *CorePriorityQueue[T]) ActiveBuckets() uint {
53 | cpq.mux.RLock()
54 | defer cpq.mux.RUnlock()
55 | return uint(cpq.bpq.Len())
56 | }
57 |
58 | func (cpq *CorePriorityQueue[T]) Enqueue(data *schema.Item[T]) error {
59 | cpq.mux.Lock()
60 | defer cpq.mux.Unlock()
61 |
62 | bucket, ok := cpq.buckets.Get(data.Priority)
63 | if !ok {
64 | return errors.New("Core Priority Queue Error: Priority not found")
65 | }
66 |
67 | cpq.bpq.Insert(data.Priority)
68 | gheap.Enqueue[T](bucket, data)
69 | cpq.itemsInQueue++
70 |
71 | return nil
72 | }
73 |
74 | func (cpq *CorePriorityQueue[T]) EnqueueBatch(data []*schema.Item[T]) []error {
75 | cpq.mux.Lock()
76 | defer cpq.mux.Unlock()
77 |
78 | var errors []error
79 |
80 | for _, item := range data {
81 | bucket, ok := cpq.buckets.Get(item.Priority)
82 | if !ok {
83 | errors = append(errors, fmt.Errorf("Core Priority Queue Error: No bucket found with priority %d", item.Priority))
84 | continue
85 | }
86 |
87 | cpq.bpq.Insert(item.Priority)
88 | gheap.Enqueue[T](bucket, item)
89 | cpq.itemsInQueue++
90 | }
91 |
92 | return errors
93 |
94 | }
95 |
96 | func (cpq *CorePriorityQueue[T]) Dequeue() (*schema.Item[T], error) {
97 | cpq.mux.Lock()
98 | defer cpq.mux.Unlock()
99 |
100 | var item *schema.Item[T]
101 | for {
102 | priority, ok := cpq.bpq.Min()
103 | if !ok {
104 | return nil, errors.New("Core Priority Queue Error: No items found in the queue")
105 | }
106 |
107 | bucket, ok := cpq.buckets.Get(priority)
108 | if !ok {
109 | return nil, errors.New("Core Priority Queue Error: Priority not found")
110 | }
111 |
112 | var err error
113 | item, err = gheap.Dequeue[T](bucket)
114 | if err != nil {
115 | if bucket.Len() == 0 {
116 | cpq.bpq.Delete(priority)
117 | } else {
118 | return nil, err
119 | }
120 | } else {
121 | break
122 | }
123 |
124 | }
125 |
126 | cpq.itemsInQueue--
127 |
128 | return item, nil
129 | }
130 |
131 | func (cpq *CorePriorityQueue[T]) DequeueBatch(batchSize uint) ([]*schema.Item[T], []error) {
132 | cpq.mux.Lock()
133 | defer cpq.mux.Unlock()
134 |
135 | if cpq.bpq.Len() == 0 {
136 | return nil, []error{errors.New("Core Priority Queue Error: No items found in the queue")}
137 | }
138 |
139 | batch := make([]*schema.Item[T], 0, batchSize)
140 | for i := 0; i < int(batchSize); i++ {
141 | priority, ok := cpq.bpq.Min()
142 | if !ok {
143 | break
144 | }
145 |
146 | bucket, ok := cpq.buckets.Get(priority)
147 | if !ok {
148 | return batch, []error{errors.New("Core Priority Queue Error: Internal error, priority not found")}
149 | }
150 |
151 | item, err := gheap.Dequeue[T](bucket)
152 | if err != nil {
153 | // Only error that can return is an empty queue error
154 | break
155 | }
156 |
157 | cpq.itemsInQueue--
158 | batch = append(batch, item)
159 | if bucket.Len() == 0 {
160 | cpq.bpq.Delete(priority)
161 | }
162 | }
163 |
164 | return batch, nil
165 | }
166 |
167 | func (cpq *CorePriorityQueue[T]) Prioritize() (removed uint, escalated uint, err error) {
168 | cpq.mux.Lock()
169 | defer cpq.mux.Unlock()
170 |
171 | cpq.buckets.Range(func(key uint, bucket *priorityQueue[T]) bool {
172 | // Iterate through the bucket and remove items that have been waiting too long
173 | var len = bucket.Len()
174 | var currentIndex uint
175 | for i := 0; i < len; i++ {
176 | item := bucket.items[currentIndex]
177 |
178 | if item.CanTimeout {
179 | currentTime := ftime.Now()
180 | if currentTime.Sub(item.SubmittedAt) > item.Timeout {
181 |
182 | if cpq.options.DiskCacheEnabled {
183 | if cpq.options.LazyDiskCacheEnabled {
184 | dm := schema.DeleteMessage{
185 | BatchNumber: item.BatchNumber,
186 | DiskUUID: item.DiskUUID,
187 | WasRestored: item.WasRestored,
188 | }
189 |
190 | cpq.lazy_disk_delete_chan <- dm
191 | } else {
192 | cpq.disk.DeleteSingle(item.DiskUUID)
193 | }
194 | }
195 |
196 | _, e := gheap.Remove[T](bucket, item)
197 | if e != nil {
198 | err = fmt.Errorf("Core Priority Queue Error: %w", err)
199 | return false
200 | }
201 | cpq.itemsInQueue--
202 | removed++
203 |
204 | } else {
205 | currentIndex++
206 | }
207 |
208 | } else {
209 | currentIndex++
210 | }
211 | }
212 | return true
213 | })
214 |
215 | // Iterate through the buckets and remove empty buckets
216 | cpq.buckets.Range(func(key uint, bucket *priorityQueue[T]) bool {
217 | if bucket.Len() == 0 {
218 | cpq.bpq.Delete(key)
219 | }
220 | return true
221 | })
222 |
223 | if err != nil {
224 | return removed, escalated, err
225 | }
226 |
227 | // This is a very basic but fast algorithm that iterates from the front to the back of the queue.
228 | // If the item can escalate and has reached its ticker, then we check if the last item was escalated,
229 | // and that we are not first in the queue. This strategy means that messages can only push up the queue,
230 | // if other messages are also not being prioritized. In this model, messages not being escalated,
231 | // can be impacted by other high priority messages allowing for fairly complex queue strategies.
232 | // I think in the future this can allow for more advanced features but seems fine for now.
233 | cpq.buckets.Range(func(key uint, bucket *priorityQueue[T]) bool {
234 | var lastItemWasEscalated bool
235 | var len = bucket.Len()
236 |
237 | for i := 0; i < len; i++ {
238 | item := bucket.items[i]
239 |
240 | if item.ShouldEscalate {
241 | currentTime := ftime.Now()
242 | if currentTime.Sub(item.LastEscalated) > item.EscalationRate {
243 |
244 | if !lastItemWasEscalated && i != 0 {
245 | item.LastEscalated = currentTime
246 | bucket.UpdatePriority(item, i-1)
247 | escalated++
248 | }
249 | // We don't need to update lastItemWasEscalated here because we just swapped
250 | // the current cursor index, with cursor index - 1. The previous index must have
251 | // not been escalated so we don't need to update lastItemWasEscalated
252 | }
253 | } else {
254 | lastItemWasEscalated = false
255 | }
256 | }
257 |
258 | return true
259 | })
260 |
261 | return removed, escalated, nil
262 | }
263 |
--------------------------------------------------------------------------------
/queues/gheap/gheap.go:
--------------------------------------------------------------------------------
1 | // Copyright 2009 The Go Authors. All rights reserved.
2 | // Use of this source code is governed by a BSD-style
3 | // license that can be found in the LICENSE file.
4 |
5 | // Package heap provides heap operations for any type that implements
6 | // heap.Interface. A heap is a tree with the property that each node is the
7 | // minimum-valued node in its subtree.
8 | //
9 | // The minimum element in the tree is the root, at index 0.
10 | //
11 | // This package has be modified to allow for generics - Justin
12 | package gheap
13 |
14 | import (
15 | "errors"
16 | "sort"
17 |
18 | "github.com/JustinTimperio/gpq/schema"
19 | )
20 |
21 | // The Interface type describes the requirements
22 | // for a type using the routines in this package.
23 | // Any type that implements it may be used as a
24 | // min-heap with the following invariants (established after
25 | // [Init] has been called or if the data is empty or sorted):
26 | //
27 | // !h.Less(j, i) for 0 <= i < h.Len() and 2*i+1 <= j <= 2*i+2 and j < h.Len()
28 | //
29 | // Note that [EnQueue] and [DeQueue] in this interface are for package heap's
30 | // implementation to call. To add and remove things from the heap,
31 | // use [heap.EnQueue] and [heap.DeQueue].
32 | type Heap[S any] interface {
33 | sort.Interface
34 | Enqueue(x *schema.Item[S]) // add x as element Len()
35 | Dequeue() (*schema.Item[S], error) // remove and return element Len() - 1.
36 | }
37 |
38 | // Init establishes the heap invariants required by the other routines in this package.
39 | // Init is idempotent with respect to the heap invariants
40 | // and may be called whenever the heap invariants may have been invalidated.
41 | // The complexity is O(n) where n = h.Len().
42 | func Init[S any](h Heap[S]) {
43 | // heapify
44 | n := h.Len()
45 | for i := n/2 - 1; i >= 0; i-- {
46 | down(h, i, n)
47 | }
48 | }
49 |
50 | // Enqueues a element x onto the heap.
51 | // The complexity is O(log n) where n = h.Len().
52 | func Enqueue[S any](h Heap[S], x *schema.Item[S]) {
53 | h.Enqueue(x)
54 | up(h, h.Len()-1)
55 | }
56 |
57 | // DeQueue removes and returns the minimum element (according to Less) from the heap.
58 | // The complexity is O(log n) where n = h.Len().
59 | // DeQueue is equivalent to [Remove](h, 0).
60 | func Dequeue[S any](h Heap[S]) (data *schema.Item[S], err error) {
61 | if h.Len() == 0 {
62 | return data, errors.New("No items in the queue")
63 | }
64 | n := h.Len() - 1
65 | h.Swap(0, n)
66 | down(h, 0, n)
67 | return h.Dequeue()
68 | }
69 |
70 | // Remove removes and returns the element at index i from the heap.
71 | // The complexity is O(log n) where n = h.Len().
72 | func Remove[S any](h Heap[S], item *schema.Item[S]) (data *schema.Item[S], err error) {
73 | i := item.Index
74 | n := h.Len() - 1
75 | if n != i {
76 | h.Swap(i, n)
77 | if !down(h, i, n) {
78 | up(h, i)
79 | }
80 | }
81 | return h.Dequeue()
82 | }
83 |
84 | // Prioritize re-establishes the heap ordering after the element at index i has changed its value.
85 | // Changing the value of the element at index i and then calling Prioritize is equivalent to,
86 | // but less expensive than, calling [Remove](h, i) followed by a EnQueue of the new value.
87 | // The complexity is O(log n) where n = h.Len().
88 | func Prioritize[S any](h Heap[S], i int) {
89 | if !down(h, i, h.Len()) {
90 | up(h, i)
91 | }
92 | }
93 |
94 | func up[S any](h Heap[S], j int) {
95 | for {
96 | i := (j - 1) / 2 // parent
97 | if i == j || !h.Less(j, i) {
98 | break
99 | }
100 | h.Swap(i, j)
101 | j = i
102 | }
103 | }
104 |
105 | func down[S any](h Heap[S], i0, n int) bool {
106 | i := i0
107 | for {
108 | j1 := 2*i + 1
109 | if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
110 | break
111 | }
112 | j := j1 // left child
113 | if j2 := j1 + 1; j2 < n && h.Less(j2, j1) {
114 | j = j2 // = 2*i + 2 // right child
115 | }
116 | if !h.Less(j, i) {
117 | break
118 | }
119 | h.Swap(i, j)
120 | i = j
121 | }
122 | return i > i0
123 | }
124 |
--------------------------------------------------------------------------------
/queues/pq.go:
--------------------------------------------------------------------------------
1 | package queues
2 |
3 | import (
4 | "errors"
5 |
6 | "github.com/JustinTimperio/gpq/queues/gheap"
7 | "github.com/JustinTimperio/gpq/schema"
8 | )
9 |
10 | // NewCorePriorityQueue creates a new CorePriorityQueue
11 | func newPriorityQueue[T any]() priorityQueue[T] {
12 | pq := priorityQueue[T]{
13 | items: make([]*schema.Item[T], 0),
14 | }
15 | gheap.Init[T](&pq)
16 |
17 | return pq
18 | }
19 |
20 | // priorityQueue implements heap.Interface and holds Items.
21 | type priorityQueue[T any] struct {
22 | items []*schema.Item[T]
23 | }
24 |
25 | // Len is used to get the length of the heap
26 | // It is needed to implement the heap.Interface
27 | func (pq *priorityQueue[T]) Len() int {
28 | return len(pq.items)
29 | }
30 |
31 | // Less is used to compare the priority of two items
32 | // It is needed to implement the heap.Interface
33 | func (pq *priorityQueue[T]) Less(i, j int) bool {
34 | return pq.items[i].InternalPriority < pq.items[j].InternalPriority
35 | }
36 |
37 | // Swap is used to swap two items in the heap
38 | // It is needed to implement the heap.Interface
39 | func (pq *priorityQueue[T]) Swap(i, j int) {
40 | pq.items[i], pq.items[j] = pq.items[j], pq.items[i]
41 | pq.items[i].Index = i
42 | pq.items[j].Index = j
43 | }
44 |
45 | // EnQueue adds an item to the end of the heap
46 | func (pq *priorityQueue[T]) Enqueue(data *schema.Item[T]) {
47 | n := len(pq.items)
48 | data.InternalPriority = n
49 | item := data
50 | item.Index = n
51 | pq.items = append(pq.items, item)
52 | }
53 |
54 | // DeQueue removes the first item from the heap
55 | func (pq *priorityQueue[T]) Dequeue() (data *schema.Item[T], err error) {
56 | if len(pq.items) == 0 {
57 | return data, errors.New("Internal Priority Queue Error: No items found in the queue")
58 | }
59 |
60 | old := pq.items
61 | n := len(old)
62 | item := old[n-1]
63 | old[n-1] = nil // don't stop the GC from reclaiming the item eventually
64 | pq.items = old[0 : n-1]
65 |
66 | return item, nil
67 | }
68 |
69 | // UpdatePriority modifies the priority of an Item in the queue.
70 | func (pq *priorityQueue[T]) UpdatePriority(item *schema.Item[T], newPriority int) {
71 | item.InternalPriority = newPriority
72 | gheap.Prioritize[T](pq, item.Index)
73 | }
74 |
--------------------------------------------------------------------------------
/schema/schema.go:
--------------------------------------------------------------------------------
1 | package schema
2 |
3 | import (
4 | "bytes"
5 | "encoding/gob"
6 | "time"
7 |
8 | "github.com/dgraph-io/badger/v4"
9 | )
10 |
11 | // Item is used to store items in the GPQ
12 | type Item[d any] struct {
13 | // User
14 | Priority uint
15 | Data d
16 | DiskUUID []byte
17 | ShouldEscalate bool
18 | EscalationRate time.Duration
19 | CanTimeout bool
20 | Timeout time.Duration
21 |
22 | // Internal
23 | SubmittedAt time.Time
24 | LastEscalated time.Time
25 | Index int
26 | InternalPriority int
27 | BatchNumber uint
28 | WasRestored bool
29 | }
30 |
31 | // NewItem creates a new item using the options provide and data stored in a generic
32 | func NewItem[d any](priority uint, data d, options EnqueueOptions) Item[d] {
33 | return Item[d]{
34 | Priority: priority,
35 | Data: data,
36 | ShouldEscalate: options.ShouldEscalate,
37 | EscalationRate: options.EscalationRate,
38 | CanTimeout: options.CanTimeout,
39 | Timeout: options.Timeout,
40 | SubmittedAt: time.Now(),
41 | }
42 | }
43 |
44 | // ToBytes converts a item into a blob
45 | func (i *Item[d]) ToBytes() ([]byte, error) {
46 | // Encode the item to a byte slice
47 | var buf bytes.Buffer
48 | enc := gob.NewEncoder(&buf)
49 | err := enc.Encode(i)
50 | if err != nil {
51 | return nil, err
52 | }
53 |
54 | return buf.Bytes(), nil
55 | }
56 |
57 | // FromBytes converts a blob to a item
58 | func (i *Item[d]) FromBytes(data []byte) error {
59 | // Decode the item from a byte slice
60 | dec := gob.NewDecoder(bytes.NewReader(data))
61 | err := dec.Decode(i)
62 | if err != nil {
63 | return err
64 | }
65 |
66 | return nil
67 | }
68 |
69 | // GPQOptions is used to configure the GPQ
70 | type GPQOptions struct {
71 | // Logger provides a custom logger interface for the Badger cache
72 | Logger badger.Logger
73 | // MaxPriority is the maximum priority allowed by this GPQ
74 | MaxPriority uint
75 |
76 | // DiskCacheEnabled is used to enable or disable the disk cache
77 | DiskCacheEnabled bool
78 | // DiskCachePath is the local path to the disk cache directory
79 | DiskCachePath string
80 | // DiskWriteDelay is the delay between writes to disk (used to batch writes)
81 | DiskWriteDelay time.Duration
82 | // DiskCacheCompression is used to enable or disable zstd compression on the disk cache
83 | DiskCacheCompression bool
84 |
85 | // LazyDiskCacheEnabled is used to enable or disable the lazy disk cache
86 | LazyDiskCacheEnabled bool
87 | // LazyDiskBatchSize is the number of items to write to disk at once
88 | LazyDiskBatchSize uint
89 | // LazyDiskCacheChannelSize is the length of the channel buffer for the disk cache
90 | LazyDiskCacheChannelSize uint
91 |
92 | // DiskEncryptionEnabled is used to enable or disable disk encryption
93 | DiskEncryptionEnabled bool
94 | // DiskEncryptionKey is the key used to encrypt the disk cache
95 | DiskEncryptionKey []byte
96 | }
97 |
98 | // EnqueueOptions is used to configure the EnQueue method
99 | type EnqueueOptions struct {
100 | // ShouldEscalate is used to determine if the item should be escalated
101 | ShouldEscalate bool
102 | // EscalationRate is the time to wait before escalating the item (happens every duration)
103 | EscalationRate time.Duration
104 | // CanTimeout is used to determine if the item can timeout
105 | CanTimeout bool
106 | // Timeout is the time to wait before timing out the item
107 | Timeout time.Duration
108 | }
109 |
110 | // Used internally to delete entries on the lazy disk chan
111 | type DeleteMessage struct {
112 | BatchNumber uint
113 | DiskUUID []byte
114 | WasRestored bool
115 | }
116 |
--------------------------------------------------------------------------------