├── .gitignore
├── .travis.yml
├── LICENSE
├── Makefile
├── README.md
├── benchmark_test.go
├── doc.go
├── docs
├── FanoutQueueTutorial.md
├── QueueTutorial.md
└── images
│ ├── contact_us.png
│ ├── fanout-queue.png
│ ├── file_storage_overview.png
│ ├── location_offset_overview.png
│ └── log_collector.png
├── errors.go
├── examples
├── async
│ ├── async.go
│ └── go.mod
├── nomal
│ ├── go.mod
│ ├── go.sum
│ └── normal.go
└── subscribe
│ └── subscribe.go
├── fanoutqueue.go
├── filefanoutqueue.go
├── filefanoutqueue_test.go
├── filequeue.go
├── filequeue_test.go
├── go.mod
├── go.sum
├── mmap.go
├── mmap_darwin.go
├── mmap_linux.go
├── mmap_test.go
├── mmap_windows.go
├── mmapfactory.go
├── options.go
├── queue.go
└── utils.go
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 | /.settings/
17 | /.project
18 | bin/temp/fanoutqueue/data/blank.txt
19 | bin/temp/fanoutqueue/data/page-0.dat
20 | bin/temp/fanoutqueue/front_index/blank.txt
21 | bin/temp/fanoutqueue/front_index/page-0.dat
22 | bin/temp/fanoutqueue/front_index_100/blank.txt
23 | bin/temp/fanoutqueue/front_index_101/blank.txt
24 | bin/temp/fanoutqueue/index/blank.txt
25 | bin/temp/fanoutqueue/index/page-0.dat
26 | bin/temp/fanoutqueue/meta_data/blank.txt
27 | bin/temp/fanoutqueue/meta_data/page-0.dat
28 | bin/temp/testqueue/data/page-0.dat
29 | bin/temp/testqueue/data/page-164.dat
30 | bin/temp/testqueue/data/page-165.dat
31 | bin/temp/testqueue/data/page-166.dat
32 | bin/temp/testqueue/data/page-167.dat
33 | bin/temp/testqueue/data/test.dir
34 | bin/temp/testqueue/front_index/test.dir
35 | bin/temp/testqueue/index/page-0.dat
36 | bin/temp/testqueue/index/test.dir
37 | bin/temp/testqueue/meta_data/test.dir
38 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 | go_import_path: github.com/jhunters/bigqueue
3 |
4 | sudo: false
5 |
6 | go:
7 | - 1.13
8 |
9 | before_install:
10 | - go get -v honnef.co/go/tools/...
11 | - go get -v github.com/kisielk/errcheck
12 |
13 | script:
14 | - make fmt
15 | - make test
16 | - make race
17 | # - make errcheck
18 |
19 | after_success:
20 | - bash <(curl -s https://codecov.io/bash)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | BRANCH=`git rev-parse --abbrev-ref HEAD`
2 | COMMIT=`git rev-parse --short HEAD`
3 | GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
4 |
5 | default: build
6 |
7 | fmt:
8 | !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
9 |
10 | # go get honnef.co/go/tools/simple
11 | gosimple:
12 | gosimple ./...
13 |
14 | # go get honnef.co/go/tools/unused
15 | unused:
16 | unused ./...
17 |
18 |
19 | test:
20 | go test -timeout 20m -v -coverprofile cover.out -covermode atomic
21 |
22 | race:
23 | go test -bench=. -benchtime=1s -run=^$
24 |
25 | .PHONY: fmt test gosimple unused
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
BigQueue-go
2 |
3 |
4 | BigQueue-go is pure Golang implementation for big, fast and persistent queue based on memory mapped file. Its file storage structures is totally compatible with
5 | [BigQueue](https://github.com/bulldog2011/bigqueue)
6 |
7 |
8 | [](https://goreportcard.com/report/github.com/jhunters/bigqueue)
9 | [](https://travis-ci.org/jhunters/bigqueue)
10 | [](https://codecov.io/gh/jhunters/bigqueue)
11 | [](https://github.com/jhunters/bigqueue/releases)
12 | [](https://godoc.org/github.com/jhunters/bigqueue)
13 | [](https://github.com/jhunters/bigqueue/blob/master/LICENSE)
14 |
15 | ## Feature Highlight:
16 | 1. **Fast**: close to the speed of direct memory access, both enqueue and dequeue are close to O(1) memory access.
17 | 2. **Big**: the total size of the queue is only limited by the available disk space.
18 | 3. **Persistent**: all data in the queue is persisted on disk, and is crash resistant.
19 | 4. **Reliable**: OS will be responsible to presist the produced messages even your process crashes.
20 | 5. **Realtime**: messages produced by producer threads will be immediately visible to consumer threads.
21 | 6. **Memory-efficient**: automatic paging & swapping algorithm, only most-recently accessed data is kept in memory.
22 | 7. **Thread-safe**: multiple threads can concurrently enqueue and dequeue without data corruption.
23 | 8. **Simple&Light** : pure Golang implements without any 3rd-party library
24 |
25 | ## Quick Start
26 |
27 | ### Installing
28 |
29 | To start using BigQueue-Go, install Go and run `go get`:
30 |
31 | ```sh
32 | $ go get github.com/jhunters/bigqueue
33 | ```
34 |
35 | To run testcases:
36 | ```sh
37 | $ go test -v .
38 | ```
39 |
40 | ### Importing bigqueue
41 |
42 | To use bigqueue as an file implements queue, import as:
43 |
44 | ```go
45 |
46 | import "github.com/jhunters/bigqueue"
47 |
48 | func main() {
49 | var queue = new(bigqueue.FileQueue)
50 |
51 | err := queue.Open(".", "testqueue", nil)
52 |
53 | if err != nil {
54 | fmt.Println(err)
55 | return
56 | }
57 | defer queue.Close()
58 |
59 | data := []byte("hello jhunters")
60 |
61 | i, err := queue.Enqueue(data)
62 | if err != nil {
63 | fmt.Println(err)
64 | return
65 | } else {
66 | fmt.Println("Enqueued index=", i, string(data))
67 | }
68 |
69 | index, bb, err := queue.Dequeue()
70 | if err != nil {
71 | fmt.Println(err)
72 | return
73 | }
74 |
75 | fmt.Println("Dequeue data:", index, string(bb))
76 | }
77 | ```
78 |
79 | ## Docs
80 | 1. [big queue tutorial](./docs/QueueTutorial.md)
81 | 2. [fanout queue tutorial](./docs/FanoutQueueTutorial.md)
82 |
83 | ## The Big Picture
84 | 
85 |
86 | 
87 |
88 | 
89 |
90 |
91 | ## Benmark test
92 | ```sh
93 | $ go test -bench . -benchtime=3s -run=^$
94 | ```
95 |
96 | ```property
97 | goos: linux
98 | goarch: amd64
99 | pkg: github.com/bigqueue
100 | Benchmark_EnqueueOnly-8 2319403 1479 ns/op
101 | Benchmark_DequeueOnly-8 4704715 743 ns/op
102 | Benchmark_EnqueueDequeue-8 1536244 2303 ns/op
103 | Benchmark_ParallelEnqueueDequeue-8 1254315 2760 ns/op
104 | PASS
105 | ok github.com/bigqueue 40.028s
106 | ```
107 |
108 |
109 | ## License
110 | BigQueue-Go is [Apache 2.0 licensed](./LICENSE).
111 |
--------------------------------------------------------------------------------
/benchmark_test.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | )
7 |
8 | //在当前目录下,运行以及命令
9 | //go test -bench . -benchtime=3s -run=^$
10 | // 输出如下
11 | // BenchmarkSprintf-8 50000000 109 ns/op
12 | // 看到函数后面的-8了吗?这个表示运行时对应的GOMAXPROCS的值。
13 | // 接着的20000000表示运行for循环的次数也就是调用被测试代码的次数
14 | // 最后的117 ns/op表示每次需要话费117纳秒。(执行一次操作话费的时间)
15 | //如果查看内存分配配置,命令如下 加-benchmem
16 | // go.exe test -bench=. -benchmem -run=^$
17 | func Benchmark_EnqueueOnly(b *testing.B) {
18 |
19 | path := Tempfile()
20 | defer clearFiles(path, "testqueue")
21 |
22 | var queue = new(FileQueue)
23 |
24 | err := queue.Open(path, "testqueue", nil)
25 |
26 | if err != nil {
27 | fmt.Println(err)
28 | }
29 | defer queue.Close()
30 |
31 | // init enqueue
32 | bb := []byte("hello xiemalin! welcome to our world!")
33 |
34 | b.ResetTimer()
35 | t := &testing.T{}
36 | enqueue(queue, bb, b.N, t)
37 | b.StopTimer()
38 |
39 | }
40 |
41 | func Benchmark_DequeueOnly(b *testing.B) {
42 |
43 | path := Tempfile()
44 | defer clearFiles(path, "testqueue")
45 |
46 | var queue = new(FileQueue)
47 |
48 | err := queue.Open(path, "testqueue", nil)
49 |
50 | if err != nil {
51 | fmt.Println(err)
52 | }
53 | defer queue.Close()
54 |
55 | bb := []byte("hello xiemalin! welcome to our world!")
56 | t := &testing.T{}
57 | enqueue(queue, bb, b.N, t)
58 |
59 | b.ResetTimer()
60 | for i := 0; i < b.N; i++ {
61 | queue.Dequeue()
62 | }
63 | b.StopTimer()
64 |
65 | }
66 |
67 | func Benchmark_EnqueueDequeue(b *testing.B) {
68 |
69 | path := Tempfile()
70 | defer clearFiles(path, "testqueue")
71 |
72 | var queue = new(FileQueue)
73 |
74 | err := queue.Open(path, "testqueue", nil)
75 |
76 | if err != nil {
77 | fmt.Println(err)
78 | }
79 | defer queue.Close()
80 |
81 | bb := []byte("hello xiemalin! welcome to our world!")
82 |
83 | b.ResetTimer()
84 | for i := 0; i < b.N; i++ {
85 | queue.Enqueue(bb)
86 | queue.Dequeue()
87 | }
88 | b.StopTimer()
89 | }
90 |
91 | func Benchmark_ParallelEnqueueDequeue(b *testing.B) {
92 | path := Tempfile()
93 | defer clearFiles(path, "testqueue")
94 |
95 | var queue = new(FileQueue)
96 |
97 | err := queue.Open(path, "testqueue", nil)
98 |
99 | if err != nil {
100 | fmt.Println(err)
101 | }
102 | defer queue.Close()
103 | bb := []byte("hello xiemalin! welcome to our world!")
104 | b.ResetTimer()
105 | b.RunParallel(func(pb *testing.PB) {
106 | for pb.Next() {
107 | queue.Enqueue(bb)
108 | queue.Dequeue()
109 | }
110 | })
111 | }
112 |
--------------------------------------------------------------------------------
/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | package bigqueue implements is pure Golang implementation for big, fast and persistent queue based on memory mapped file.
3 | */
4 | package bigqueue
5 |
--------------------------------------------------------------------------------
/docs/FanoutQueueTutorial.md:
--------------------------------------------------------------------------------
1 | BigQueue-go tutorial
2 |
3 |
4 | This is a tutorial to show the fanout queue API usage of big queue.
5 |
6 | Below is figure visually show the fanout semantics:
7 |
8 | 
9 |
10 | ## Quick tutorial:
11 | **Initialize fanout queue**:
12 | You can create(initialize) a new fanout queue in just two statements:
13 | ```go
14 | // new queue struct
15 | var fanoutqueue = new(bigqueue.FileFanoutQueue)
16 | // open file with target directory and queue name
17 | err := fanoutqueue.Open(".", "testqueue", nil)
18 |
19 | ```
20 | Initialize with customized options
21 | ```go
22 | // new queue struct
23 | var fanoutqueue = new(bigqueue.FileFanoutQueue)
24 | // create customized options
25 | var options = &bigqueue.Options{
26 | DataPageSize: bigqueue.DefaultDataPageSize,
27 | GcLock: false,
28 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
29 | }
30 |
31 | // open file with target directory and queue name
32 | err := fanoutqueue.Open(".", "testqueue", options)
33 | ```
34 | #### 参数说明:
35 | 参数名 |默认值 | 说明
36 | -|-|-
37 | DataPageSize | 128 * 1024 * 1024 | Number of bytes size in one data page file |
38 | IndexItemsPerPage | 17 | Number of index item size in one index page file. default is 1 << 17 |
39 |
40 | **Enqueue**:
41 | To add or produce item into the queue, you just call the enqueue method on the queue reference, here we enqueue 10 numbers into the queue:
42 | ```go
43 | for i := 0; i < 10; i++ {
44 | content := strconv.Itoa(i)
45 | idx, err := fanoutqueue.Enqueue([]byte(content))
46 | if err != nil {
47 | t.Error("Enqueue failed with err:", err)
48 | }
49 | }
50 |
51 | ```
52 |
53 | **Size**:
54 | Now there are 10 items in the queue, and it’s not empty anymore, to find out the total number of items in the queue, call the size method:
55 | ```go
56 | fanoutID := int64(100)
57 | fanoutID2 := int64(101)
58 | size := fanoutqueue.Size(fanoutID) // get size 10 with target fanout id
59 | size2 := fanoutqueue.Size(fanoutID2) // get size 10 with target fanout id2
60 | ```
61 |
62 | **IsEmpty**:
63 | Check current queue is empty.
64 | ```go
65 | fanoutID := int64(100)
66 | isEmpty := fanoutqueue.IsEmpty(fanoutID) // return false cause fanout id(100) has 10 items
67 |
68 | ```
69 |
70 | **Peek and Dequeue**:
71 | The peek method just let you peek item at the front of the queue without removing the item from the queue:
72 | ```go
73 | // peek one
74 | fanoutID := int64(100)
75 | index, data, err := fanoutqueue.Peek(fanoutID)
76 | if err != nil {
77 | // print err
78 | }
79 | ```
80 |
81 | ```go
82 | // peek all
83 | fanoutID := int64(100)
84 | data, err := fanoutqueue.PeekAll(fanoutID) // type of data is [][]byte
85 | if err != nil {
86 | // print err
87 | }
88 | ```
89 |
90 |
91 | To remove or consume item from the queue, just call the dequeue method, here we dequeue 1 items from the queue:
92 | ```go
93 | fanoutID := int64(100)
94 | index, data, err := fanoutqueue.Dequeue(fanoutID)
95 | if err != nil {
96 | // print err
97 | }
98 | ```
99 |
100 | **Skip**:
101 | The Skip method is to ignore the specified items count from current index.
102 | ```go
103 | fanoutID := int64(100)
104 | count := int64(10)
105 | err := fanoutqueue.Skip(fanoutID, count)
106 | if err != nil {
107 | // print err
108 | }
109 | ```
110 |
111 |
112 | **Subscribe and FreeSbuscribe**:
113 | The Subscribe method is dequeue item from queue in asynchouse way. like listener pattern.
114 | ```go
115 | fanoutID := int64(100)
116 | fanoutqueue.Subscribe(fanoutID, func(index int64, bb []byte, err error) {
117 | if err != nil {
118 | // we met some error
119 | }
120 | // on item dequeued with item index and item data
121 | })
122 |
123 | // free subscribe action
124 | fanoutqueue.FreeSubscribe(fanoutID)
125 |
126 | // free all subscribe action
127 | // fanoutqueue.FreeAllSubscribe()
128 | ```
129 |
130 |
131 | **Close**:
132 | Finally, when you finish with the queue, just call Close method to release resource used by the queue, this is not mandatory, just a best practice, call close will release part of used memory immediately. Usually, you initialize big queue in a try block and close it in the finally block, here is the usage paradigm:
133 | ```go
134 | err := fanoutqueue.Close()
135 | if err != nil {
136 | // print err
137 | }
138 | ```
139 |
140 |
141 |
142 | ## License
143 | BigQueue-Go is [Apache 2.0 licensed](./LICENSE).
144 |
--------------------------------------------------------------------------------
/docs/QueueTutorial.md:
--------------------------------------------------------------------------------
1 | BigQueue-go tutorial
2 |
3 |
4 | This is a tutorial to show the basic API usage of big queue.
5 |
6 |
7 |
8 | ## Quick tutorial:
9 | **Initialize bigqueue**:
10 | You can create(initialize) a new big queue in just two statements:
11 | ```go
12 | // new queue struct
13 | var queue = new(bigqueue.FileQueue)
14 | // open file with target directory and queue name
15 | err := queue.Open(".", "testqueue", nil)
16 |
17 | ```
18 | Initialize with customized options
19 | ```go
20 | // new queue struct
21 | var queue = new(bigqueue.FileQueue)
22 | // create customized options
23 | var options = &bigqueue.Options{
24 | DataPageSize: bigqueue.DefaultDataPageSize,
25 | GcLock: false,
26 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
27 | }
28 |
29 | // open file with target directory and queue name
30 | err := queue.Open(".", "testqueue", options)
31 | ```
32 | #### 参数说明:
33 | 参数名 |默认值 | 说明
34 | -|-|-
35 | DataPageSize | 128 * 1024 * 1024 | Number of bytes size in one data page file |
36 | IndexItemsPerPage | 17 | Number of index item size in one index page file. default is 1 << 17 |
37 |
38 | **Enqueue**:
39 | To add or produce item into the queue, you just call the enqueue method on the queue reference, here we enqueue 10 numbers into the queue:
40 | ```go
41 | for i := 0; i < 10; i++ {
42 | content := strconv.Itoa(i)
43 | idx, err := queue.Enqueue([]byte(content))
44 | if err != nil {
45 | t.Error("Enqueue failed with err:", err)
46 | }
47 | }
48 |
49 | ```
50 |
51 | **Size**:
52 | Now there are 10 items in the queue, and it’s not empty anymore, to find out the total number of items in the queue, call the size method:
53 | ```go
54 | size := queue.Size() // get size 10
55 |
56 | ```
57 |
58 | **IsEmpty**:
59 | Check current queue is empty.
60 | ```go
61 | isEmpty := queue.IsEmpty()
62 |
63 | ```
64 |
65 | **Peek and Dequeue**:
66 | The peek method just let you peek item at the front of the queue without removing the item from the queue:
67 | ```go
68 | // peek one
69 | index, data, err := queue.Peek()
70 | if err != nil {
71 | // print err
72 | }
73 | ```
74 |
75 | ```go
76 | // peek all
77 | data, err := queue.PeekAll() // type of data is [][]byte
78 | if err != nil {
79 | // print err
80 | }
81 | ```
82 |
83 | To remove or consume item from the queue, just call the dequeue method, here we dequeue 1 items from the queue:
84 | ```go
85 | index, data, err := queue.Dequeue()
86 | if err != nil {
87 | // print err
88 | }
89 | ```
90 |
91 | **Skip**:
92 | The Skip method is to ignore the specified items count from current index.
93 | ```go
94 | count := int64(10)
95 | err := queue.Skip(count)
96 | if err != nil {
97 | // print err
98 | }
99 | ```
100 |
101 |
102 | **Gc**:
103 | The GC method is to delete old items from index and data page file(s) to free disk usage.
104 | ```go
105 | err := queue.Gc()
106 | if err != nil {
107 | // print err
108 | }
109 | ```
110 |
111 | **Auto Gc**:
112 | To enable auto gc action.
113 | ```go
114 | var options = &bigqueue.Options{
115 | DataPageSize: bigqueue.DefaultDataPageSize,
116 | GcLock: false,
117 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
118 | AutoGCBySeconds: 1, // set auto gc action interval
119 | }
120 |
121 | // open file with target directory and queue name
122 | err := queue.Open(".", "testqueue", options)
123 | ```
124 |
125 | **Subscribe and FreeSbuscribe**:
126 | The Subscribe method is dequeue item from queue in asynchouse way. like listener pattern.
127 | ```go
128 | queue.Subscribe(func(index int64, bb []byte, err error) {
129 | if err != nil {
130 | // we met some error
131 | }
132 | // on item dequeued with item index and item data
133 | })
134 |
135 | // free subscribe action
136 | queue.FreeSubscribe()
137 | ```
138 |
139 |
140 | **Close**:
141 | Finally, when you finish with the queue, just call Close method to release resource used by the queue, this is not mandatory, just a best practice, call close will release part of used memory immediately. Usually, you initialize big queue in a try block and close it in the finally block, here is the usage paradigm:
142 | ```go
143 | err := queue.Close()
144 | if err != nil {
145 | // print err
146 | }
147 | ```
148 |
149 |
150 |
151 | ## License
152 | BigQueue-Go is [Apache 2.0 licensed](./LICENSE).
153 |
--------------------------------------------------------------------------------
/docs/images/contact_us.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhunters/bigqueue/a76a3f66edd46e04604eec8fabaf27833c25f05e/docs/images/contact_us.png
--------------------------------------------------------------------------------
/docs/images/fanout-queue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhunters/bigqueue/a76a3f66edd46e04604eec8fabaf27833c25f05e/docs/images/fanout-queue.png
--------------------------------------------------------------------------------
/docs/images/file_storage_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhunters/bigqueue/a76a3f66edd46e04604eec8fabaf27833c25f05e/docs/images/file_storage_overview.png
--------------------------------------------------------------------------------
/docs/images/location_offset_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhunters/bigqueue/a76a3f66edd46e04604eec8fabaf27833c25f05e/docs/images/location_offset_overview.png
--------------------------------------------------------------------------------
/docs/images/log_collector.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jhunters/bigqueue/a76a3f66edd46e04604eec8fabaf27833c25f05e/docs/images/log_collector.png
--------------------------------------------------------------------------------
/errors.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import "errors"
4 |
5 | // These errors can be returned when opening or calling methods on a DB.
6 | var (
7 | ErrEnqueueDataNull = errors.New("enqueue data can not be null")
8 |
9 | ErrIndexOutOfBoundTH = errors.New("index is valid which should between tail and head index")
10 |
11 | // SubscribeExistErr repeat call Subscriber method
12 | ErrSubscribeExistErr = errors.New("Subscriber alread set, can not repeat set")
13 |
14 | // Subscribe should call after queue Open method
15 | ErrSubscribeFailedNoOpenErr = errors.New("Subscriber method only support after queue opened")
16 | )
17 |
--------------------------------------------------------------------------------
/examples/async/async.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "time"
7 |
8 | "github.com/jhunters/bigqueue"
9 | )
10 |
11 | func main() {
12 | var queue = new(bigqueue.FileQueue)
13 |
14 | var DefaultOptions = &bigqueue.Options{
15 | DataPageSize: bigqueue.DefaultDataPageSize,
16 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
17 | }
18 |
19 | err := queue.Open(".", "testqueue", DefaultOptions)
20 |
21 | if err != nil {
22 | fmt.Println(err)
23 | }
24 | for i := 1; i < 100; i++ {
25 | data := []byte("hello jhunters" + strconv.Itoa(i))
26 | queue.EnqueueAsync(data, func(index int64, err error) {
27 | if err != nil {
28 | fmt.Println(err)
29 | } else {
30 | fmt.Println("Enqueued index=", index, string(data))
31 | }
32 | idx, bb, err := queue.Dequeue()
33 | if err != nil {
34 | fmt.Println(err)
35 | }
36 | fmt.Println(idx, string(bb))
37 | })
38 | }
39 |
40 | time.Sleep(time.Duration(2) * time.Second)
41 |
42 | queue.Gc()
43 |
44 | queue.Close()
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/examples/async/go.mod:
--------------------------------------------------------------------------------
1 | module async
2 |
3 | go 1.13
4 |
--------------------------------------------------------------------------------
/examples/nomal/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/jhunters/bigqueue
2 |
3 | go 1.13
4 |
--------------------------------------------------------------------------------
/examples/nomal/go.sum:
--------------------------------------------------------------------------------
1 | github.com/jhunters/bigqueue v1.0.1 h1:xIcBuzfm1ILMdGY5M+fukI84sqv87SPFUDEFT7xaKmc=
2 | github.com/jhunters/bigqueue v1.0.1/go.mod h1:CbEObWKPe9f5OYtOu99fb92F+gJJewLhxeDm/V+WBio=
3 |
--------------------------------------------------------------------------------
/examples/nomal/normal.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 |
7 | "github.com/jhunters/bigqueue"
8 | )
9 |
10 | // a demo to show how to enqueue and dequeue data
11 | func main() {
12 |
13 | var queue = new(bigqueue.FileQueue)
14 |
15 | // use custom options
16 | var DefaultOptions = &bigqueue.Options{
17 | DataPageSize: bigqueue.DefaultDataPageSize,
18 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
19 | }
20 |
21 | // open queue files
22 | err := queue.Open("./bin", "testqueue", DefaultOptions)
23 |
24 | if err != nil {
25 | fmt.Println(err)
26 | }
27 | defer queue.Close()
28 |
29 | // do enqueue
30 | for i := 1; i < 10; i++ {
31 | data := []byte("hello jhunters" + strconv.Itoa(i))
32 | i, err := queue.Enqueue(data)
33 | if err != nil {
34 | fmt.Println(err)
35 | } else {
36 | fmt.Println("Enqueued index=", i, string(data))
37 | }
38 | }
39 | // do dequeue
40 | for i := 1; i < 10; i++ {
41 | index, bb, err := queue.Dequeue()
42 | if err != nil {
43 | fmt.Println(err)
44 | }
45 | if index != -1 {
46 | fmt.Println(index, string(bb))
47 | }
48 |
49 | }
50 |
51 | // do gc action to free old data
52 | queue.Gc()
53 |
54 | }
55 |
--------------------------------------------------------------------------------
/examples/subscribe/subscribe.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "time"
7 |
8 | "github.com/jhunters/bigqueue"
9 | )
10 |
11 | func main() {
12 | var queue = new(bigqueue.FileQueue)
13 |
14 | // use custom options
15 | var DefaultOptions = &bigqueue.Options{
16 | DataPageSize: bigqueue.DefaultDataPageSize,
17 | IndexItemsPerPage: bigqueue.DefaultIndexItemsPerPage,
18 | }
19 |
20 | // open queue files
21 | err := queue.Open("./bin", "testqueue", DefaultOptions)
22 |
23 | if err != nil {
24 | fmt.Println(err)
25 | }
26 | defer queue.Close()
27 |
28 | for i := 1; i < 10; i++ {
29 | data := []byte("hello jhunters" + strconv.Itoa(i))
30 | i, err := queue.Enqueue(data)
31 | if err != nil {
32 | fmt.Println(err)
33 | } else {
34 | fmt.Println("Enqueued index=", i, string(data))
35 | }
36 | }
37 |
38 | queue.Subscribe(func(index int64, bb []byte, err error) {
39 | fmt.Println("index=", index, " value=", string(bb))
40 | })
41 |
42 | // for y := 0; y < 10; y++ {
43 | // // do enqueue
44 | // for i := 1; i < 10; i++ {
45 | // data := []byte("hello jhunters" + strconv.Itoa(i))
46 | // i, err := queue.Enqueue(data)
47 | // if err != nil {
48 | // fmt.Println(err)
49 | // } else {
50 | // fmt.Println("Enqueued index=", i, string(data))
51 | // }
52 | // }
53 | // time.Sleep(time.Duration(1) * time.Second)
54 | // }
55 |
56 | time.Sleep(time.Duration(10) * time.Second)
57 | }
58 |
--------------------------------------------------------------------------------
/fanoutqueue.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | // FanOutQueue queue supports with pub-sub feature
4 | type FanOutQueue interface {
5 | // Open to open target file if failed error returns
6 | Open(dir string, queueName string, options *Options) error
7 |
8 | // IsEmpty Determines whether a queue is empty
9 | //fanoutId queue index
10 | // return ture if empty, false otherwise
11 | IsEmpty(fanoutID int64) bool
12 |
13 | // Size return avaiable queue size
14 | Size(fanoutID int64) int64
15 |
16 | // Enqueue Append an item to the queue and return index no
17 | // if any error ocurres a non-nil error returned
18 | Enqueue(data []byte) (int64, error)
19 |
20 | // EnqueueAsync Append an item to the queue async way
21 | EnqueueAsync(data []byte, fn func(int64, error))
22 |
23 | Dequeue(fanoutID int64) (int64, []byte, error)
24 |
25 | Peek(fanoutID int64) (int64, []byte, error)
26 |
27 | // To skip deqeue target number of items
28 | Skip(fanoutID int64, count int64) error
29 |
30 | Close() error
31 |
32 | // Set to asynchous subscribe
33 | Subscribe(fanoutID int64, fn func(int64, []byte, error)) error
34 |
35 | // to free asynchous subscribe
36 | FreeSubscribe(fanoutID int64)
37 |
38 | // FreeAllSubscribe to free all asynchous subscribe
39 | FreeAllSubscribe()
40 | }
41 |
--------------------------------------------------------------------------------
/filefanoutqueue.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "errors"
5 | "os"
6 | "strconv"
7 | "sync"
8 | )
9 |
10 | const (
11 | // FanoutFrontFileName Fanout FrontFileName file name
12 | FanoutFrontFileName = "front_index_"
13 | )
14 |
15 | // FileFanoutQueue file fanout queue implements
16 | type FileFanoutQueue struct {
17 | fileQueue *FileQueue
18 |
19 | // queue options
20 | options *Options
21 |
22 | // front index by fanout id
23 | frontIndexMap map[int64]*QueueFront
24 |
25 | queueGetLock sync.Mutex
26 |
27 | path string
28 |
29 | opened bool
30 | }
31 |
32 | // QueueFront queue front struct
33 | type QueueFront struct {
34 | // fanout id
35 | fanoutID int64
36 |
37 | // front index of the big queue,
38 | frontIndex int64
39 |
40 | fanoutDatafile *DB
41 |
42 | // locks for queue front write management
43 | queueFrontWriteLock sync.Mutex
44 |
45 | subscribLock sync.Mutex
46 |
47 | subscriber func(int64, []byte, error)
48 | }
49 |
50 | // Open the queue files
51 | func (q *FileFanoutQueue) Open(dir string, queueName string, options *Options) error {
52 |
53 | if !q.opened {
54 | q.opened = true
55 | } else {
56 | return errors.New("FileFanoutQueue already opened")
57 | }
58 |
59 | q.fileQueue = &FileQueue{}
60 |
61 | if options == nil {
62 | options = DefaultOptions
63 | }
64 | err := q.fileQueue.Open(dir, queueName, options)
65 | if err != nil {
66 | return err
67 | }
68 | q.options = options
69 |
70 | q.path = dir + "/" + queueName
71 |
72 | q.frontIndexMap = make(map[int64]*QueueFront)
73 |
74 | q.fileQueue.Subscribe(q.doSubscribe)
75 |
76 | return nil
77 | }
78 |
79 | // Status get status info from current queue
80 | func (q *FileFanoutQueue) Status(fanoutID int64) *QueueFilesStatus {
81 | qf, err := q.getQueueFront(fanoutID)
82 | if err != nil {
83 | return nil
84 | }
85 |
86 | queueFilesStatus := q.fileQueue.status(qf.frontIndex, -1, -1)
87 | if queueFilesStatus == nil {
88 | return nil
89 | }
90 |
91 | return queueFilesStatus
92 | }
93 |
94 | // IsEmpty test if target fanoutID is empty
95 | func (q *FileFanoutQueue) IsEmpty(fanoutID int64) bool {
96 | qf, err := q.getQueueFront(fanoutID)
97 | if err != nil {
98 | return true
99 | }
100 |
101 | return q.fileQueue.isEmpty(qf.frontIndex)
102 | }
103 |
104 | // Size return item size with target fanoutID
105 | func (q *FileFanoutQueue) Size(fanoutID int64) int64 {
106 | qf, err := q.getQueueFront(fanoutID)
107 | if err != nil {
108 | return -1
109 | }
110 |
111 | return q.fileQueue.size(qf.frontIndex)
112 | }
113 |
114 | // Close free the resource
115 | func (q *FileFanoutQueue) Close() {
116 | // close file queue
117 | q.fileQueue.Close()
118 |
119 | for _, v := range q.frontIndexMap {
120 | if v.fanoutDatafile != nil {
121 | v.fanoutDatafile.Close()
122 | }
123 | v.fanoutDatafile = nil
124 | }
125 |
126 | q.opened = false
127 | }
128 |
129 | // Enqueue Append an item to the queue and return index no
130 | func (q *FileFanoutQueue) Enqueue(data []byte) (int64, error) {
131 | return q.fileQueue.Enqueue(data)
132 | }
133 |
134 | // Dequeue dequeue data from target fanoutID
135 | func (q *FileFanoutQueue) Dequeue(fanoutID int64) (int64, []byte, error) {
136 | qf, err := q.getQueueFront(fanoutID)
137 | if err != nil {
138 | return -1, nil, err
139 | }
140 |
141 | if q.IsEmpty(fanoutID) {
142 | return -1, nil, nil
143 | }
144 |
145 | index, err := qf.updateQueueFrontIndex(1)
146 | if err != nil {
147 | return -1, nil, err
148 | }
149 |
150 | data, err := q.fileQueue.peek(index)
151 | if err != nil {
152 | return -1, nil, err
153 | }
154 |
155 | return index, data, nil
156 | }
157 |
158 | // Peek peek the head item from target fanoutID
159 | func (q *FileFanoutQueue) Peek(fanoutID int64) (int64, []byte, error) {
160 | qf, err := q.getQueueFront(fanoutID)
161 | if err != nil {
162 | return -1, nil, err
163 | }
164 |
165 | index := qf.frontIndex
166 |
167 | data, err := q.fileQueue.peek(index)
168 | if err != nil {
169 | return -1, nil, err
170 | }
171 |
172 | return index, data, nil
173 | }
174 |
175 | // // PeekAll Retrieves all the items from the front of a queue
176 | // return array of data and array of index
177 | func (q *FileFanoutQueue) PeekAll(fanoutID int64) ([][]byte, []int64, error) {
178 | qf, err := q.getQueueFront(fanoutID)
179 | if err != nil {
180 | return nil, nil, err
181 | }
182 | index := qf.frontIndex
183 |
184 | return q.fileQueue.peekAll(index, q.Size(fanoutID))
185 | }
186 |
187 | // PeekPagination to peek data from queue by paing feature.
188 | func (q *FileFanoutQueue) PeekPagination(fanoutID int64, page, pagesize uint64) ([][]byte, []int64, error) {
189 |
190 | qf, err := q.getQueueFront(fanoutID)
191 | if err != nil {
192 | return nil, nil, err
193 | }
194 | index := qf.frontIndex
195 |
196 | return q.fileQueue.peekPagination(index, q.fileQueue.size(index), page, pagesize)
197 | }
198 |
199 | // Skip To skip deqeue target number of items
200 | func (q *FileFanoutQueue) Skip(fanoutID int64, count int64) error {
201 | if count <= 0 {
202 | // do nothing
203 | return nil
204 | }
205 | qf, err := q.getQueueFront(fanoutID)
206 | if err != nil {
207 | return err
208 | }
209 |
210 | _, err = qf.updateQueueFrontIndex(count)
211 | if err != nil {
212 | return err
213 | }
214 |
215 | return nil
216 | }
217 |
218 | // Subscribe do async subscribe by target fanout id
219 | func (q *FileFanoutQueue) Subscribe(fanoutID int64, fn func(int64, []byte, error)) error {
220 | if fn == nil {
221 | return errors.New("parameter 'fn' is nil")
222 | }
223 | qf, err := q.getQueueFront(fanoutID)
224 | if err != nil {
225 | return err
226 | }
227 |
228 | if qf.subscriber != nil {
229 | return ErrSubscribeExistErr
230 | }
231 |
232 | // do subsrcibe by async way
233 | go func() {
234 | q.doLoopSubscribe(fanoutID, fn)
235 |
236 | qf.subscriber = fn
237 | }()
238 |
239 | return nil
240 | }
241 |
242 | // FreeSubscribe to free subscriber by target fanout id
243 | func (q *FileFanoutQueue) FreeSubscribe(fanoutID int64) {
244 | qf, err := q.getQueueFront(fanoutID)
245 | if err != nil {
246 | return
247 | }
248 | qf.subscriber = nil
249 | }
250 |
251 | // FreeAllSubscribe to free all subscriber
252 | func (q *FileFanoutQueue) FreeAllSubscribe() {
253 | for _, qf := range q.frontIndexMap {
254 | qf.subscriber = nil
255 | }
256 |
257 | }
258 |
259 | func (q *FileFanoutQueue) getQueueFront(fanoutID int64) (*QueueFront, error) {
260 | q.queueGetLock.Lock()
261 | defer q.queueGetLock.Unlock()
262 | qf := q.frontIndexMap[fanoutID]
263 | if qf != nil {
264 | return qf, nil
265 | }
266 |
267 | qf = &QueueFront{
268 | fanoutID: fanoutID,
269 | frontIndex: 0,
270 | }
271 | err := qf.open(q.path)
272 | if err != nil {
273 | return nil, err
274 | }
275 | q.frontIndexMap[fanoutID] = qf
276 | return qf, nil
277 | }
278 |
279 | func (q *QueueFront) open(path string) error {
280 |
281 | frontFilePath := path + "/" + FanoutFrontFileName + strconv.Itoa(int(q.fanoutID)) + "/"
282 | err := os.MkdirAll(frontFilePath, os.ModeDir)
283 | if err != nil {
284 | return err
285 | }
286 |
287 | // create index file
288 | q.fanoutDatafile = &DB{
289 | path: frontFilePath + GetFileName(filePrefix, fileSuffix, q.fanoutID),
290 | InitialMmapSize: defaultFrontPageSize,
291 | opened: true,
292 | }
293 |
294 | err = q.fanoutDatafile.Open(defaultFileMode)
295 | if err != nil {
296 | return err
297 | }
298 | q.frontIndex = BytesToInt(q.fanoutDatafile.data[:defaultFrontPageSize])
299 | Assert(q.frontIndex >= 0, "front index can not be a negetive number. value is %v", q.frontIndex)
300 |
301 | return nil
302 | }
303 |
304 | func (q *QueueFront) updateQueueFrontIndex(count int64) (int64, error) {
305 | q.queueFrontWriteLock.Lock()
306 | defer q.queueFrontWriteLock.Unlock()
307 |
308 | queueFrontIndex := q.frontIndex
309 | nextQueueFrontIndex := queueFrontIndex
310 |
311 | if nextQueueFrontIndex == MaxInt64 {
312 | nextQueueFrontIndex = 0
313 | } else {
314 | nextQueueFrontIndex += count
315 | }
316 | if nextQueueFrontIndex < 0 {
317 | // if overflow then reset to zero
318 | nextQueueFrontIndex = 0
319 | }
320 |
321 | q.frontIndex = nextQueueFrontIndex
322 |
323 | bb := IntToBytes(q.frontIndex)
324 | for idx, b := range bb {
325 | q.fanoutDatafile.data[idx] = b
326 |
327 | }
328 |
329 | return queueFrontIndex, nil
330 | }
331 |
332 | func (q *FileFanoutQueue) doSubscribe(index int64, data []byte, err error) {
333 | for fanoutID, v := range q.frontIndexMap {
334 | if v.subscriber != nil {
335 | v.subscribLock.Lock()
336 | defer v.subscribLock.Unlock()
337 | // here should be care something about blocked callback subscriber
338 | q.doLoopSubscribe(fanoutID, v.subscriber)
339 | }
340 |
341 | }
342 | }
343 |
344 | func (q *FileFanoutQueue) doLoopSubscribe(fanoutID int64, subscriber func(int64, []byte, error)) {
345 | for {
346 | if subscriber == nil {
347 | return
348 | }
349 | index, bb, err := q.Dequeue(fanoutID)
350 | if bb == nil || len(bb) == 0 {
351 | break // queue is empty
352 | }
353 | subscriber(index, bb, err)
354 | }
355 |
356 | }
357 |
--------------------------------------------------------------------------------
/filefanoutqueue_test.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "testing"
7 | "time"
8 |
9 | . "github.com/smartystreets/goconvey/convey"
10 | )
11 |
12 | // TestFanoutQueueOpen to test Open() function
13 | func TestFanoutQueueOpen(t *testing.T) {
14 | path := Tempfile()
15 | defer clearFiles(path, "fanoutqueue")
16 |
17 | Convey("TestFanoutQueueOpen", t, func() {
18 |
19 | fq := FileFanoutQueue{}
20 | err := fq.Open(path, "fanoutqueue", nil)
21 | So(err, ShouldBeNil)
22 | fq.Close()
23 | })
24 |
25 | }
26 |
27 | // TestFanoutQueueOpen to test Open() function
28 | func TestFanoutQueueOpenTwice(t *testing.T) {
29 | path := Tempfile()
30 | defer clearFiles(path, "fanoutqueue")
31 |
32 | Convey("TestFanoutQueueOpenTwice", t, func() {
33 |
34 | fq := FileFanoutQueue{}
35 | err := fq.Open(path, "fanoutqueue", nil)
36 | So(err, ShouldBeNil)
37 |
38 | err = fq.Open(path, "fanoutqueue", nil)
39 | So(err, ShouldNotBeNil)
40 | fq.Close()
41 | })
42 |
43 | }
44 |
45 | // TestFanoutQueueIsEmpty to test open a empty directory should return empty queue
46 | func TestFanoutQueueIsEmpty(t *testing.T) {
47 | path := Tempfile()
48 | clearFiles(path, "fanoutqueue")
49 | defer clearFiles(path, "fanoutqueue")
50 |
51 | Convey("TestFanoutQueueIsEmpty", t, func() {
52 |
53 | fanoutID := int64(100)
54 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
55 |
56 | fq := FileFanoutQueue{}
57 | err := fq.Open(path, "fanoutqueue", nil)
58 | So(err, ShouldBeNil)
59 | defer fq.Close()
60 |
61 | bool := fq.IsEmpty(fanoutID)
62 | SoMsg("New created queue must be empty", bool, ShouldBeTrue)
63 | })
64 |
65 | }
66 |
67 | // TestFanoutQueueSize to test queue Size() function
68 | func TestFanoutQueueSize(t *testing.T) {
69 | path := Tempfile()
70 | clearFiles(path, "fanoutqueue")
71 | defer clearFiles(path, "fanoutqueue")
72 |
73 | Convey("TestFanoutQueueSize", t, func() {
74 | fanoutID := int64(100)
75 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
76 |
77 | fq := FileFanoutQueue{}
78 | err := fq.Open(path, "fanoutqueue", nil)
79 | So(err, ShouldBeNil)
80 | defer fq.Close()
81 |
82 | sz := fq.Size(fanoutID)
83 | SoMsg("New created queue size must be zero", sz, ShouldEqual, 0)
84 | })
85 |
86 | }
87 |
88 | // TestFanoutQueueEnqueue to test enqueue only function
89 | func TestFanoutQueueEnqueue(t *testing.T) {
90 | path := Tempfile()
91 | clearFiles(path, "fanoutqueue")
92 | fanoutID := int64(100)
93 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
94 | defer clearFiles(path, "fanoutqueue")
95 |
96 | Convey("TestFanoutQueueEnqueue", t, func() {
97 | fq := FileFanoutQueue{}
98 | err := fq.Open(path, "fanoutqueue", nil)
99 |
100 | So(err, ShouldBeNil)
101 | defer fq.Close()
102 | sz := fq.Size(fanoutID)
103 | SoMsg("New created queue size must be zero", sz, ShouldEqual, 0)
104 |
105 | _, err = fq.Enqueue([]byte("hello world"))
106 |
107 | So(err, ShouldBeNil)
108 |
109 | sz = fq.Size(fanoutID)
110 | So(sz, ShouldEqual, 1)
111 |
112 | bool := fq.IsEmpty(fanoutID)
113 | So(bool, ShouldBeFalse)
114 | })
115 |
116 | }
117 |
118 | func clearFrontIndexFiles(path, queueName string, fanoutID int64) {
119 | RemoveFiles(path + "/" + queueName + "/" + FanoutFrontFileName + strconv.Itoa(int(fanoutID)))
120 | }
121 |
122 | // TestFanoutQueueEnqueueDequeue to test enqueue and dequeue function
123 | func TestFanoutQueueEnqueueDequeue(t *testing.T) {
124 | path := Tempfile()
125 | clearFiles(path, "fanoutqueue")
126 | fanoutID := int64(100)
127 | fanoutID1 := int64(101)
128 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
129 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID1)
130 | defer clearFiles(path, "fanoutqueue")
131 |
132 | Convey("TestFanoutQueueEnqueueDequeue", t, func() {
133 | fq := FileFanoutQueue{}
134 | err := fq.Open(path, "fanoutqueue", nil)
135 |
136 | So(err, ShouldBeNil)
137 | defer fq.Close()
138 |
139 | _, err = fq.Enqueue([]byte("hello world"))
140 |
141 | So(err, ShouldBeNil)
142 |
143 | index, data, _ := fq.Dequeue(fanoutID)
144 | index1, data1, _ := fq.Dequeue(fanoutID1)
145 | So(index, ShouldEqual, index1)
146 | So(data, ShouldResemble, data1)
147 | })
148 |
149 | }
150 |
151 | // TestFanoutQueueEnqueuePeek to test Peek() function
152 | func TestFanoutQueueEnqueuePeek(t *testing.T) {
153 | path := Tempfile()
154 | clearFiles(path, "fanoutqueue")
155 | fanoutID := int64(100)
156 | fanoutID1 := int64(101)
157 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
158 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID1)
159 | defer clearFiles(path, "fanoutqueue")
160 |
161 | Convey("TestFanoutQueueEnqueuePeek", t, func() {
162 |
163 | fq := FileFanoutQueue{}
164 | err := fq.Open(path, "fanoutqueue", nil)
165 |
166 | So(err, ShouldBeNil)
167 | defer fq.Close()
168 |
169 | _, err = fq.Enqueue([]byte("hello world"))
170 |
171 | So(err, ShouldBeNil)
172 |
173 | index, data, _ := fq.Peek(fanoutID)
174 | index1, data1, _ := fq.Peek(fanoutID1)
175 |
176 | So(index, ShouldEqual, index1)
177 | So(data, ShouldResemble, data1)
178 |
179 | // test peek all
180 | dataAll, indexAll, err := fq.PeekAll(fanoutID)
181 | So(err, ShouldBeNil)
182 |
183 | So(len(dataAll), ShouldEqual, 1)
184 | So(len(indexAll), ShouldEqual, 1)
185 | })
186 |
187 | }
188 |
189 | // TestFanoutQueueSkip to test Skip() function
190 | func TestFanoutQueueSkip(t *testing.T) {
191 | path := Tempfile()
192 | clearFiles(path, "fanoutqueue")
193 | fanoutID := int64(100)
194 | fanoutID1 := int64(101)
195 |
196 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
197 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID1)
198 | defer clearFiles(path, "fanoutqueue")
199 |
200 | Convey("TestFanoutQueueSkip", t, func() {
201 | fq := FileFanoutQueue{}
202 | err := fq.Open(path, "fanoutqueue", nil)
203 |
204 | So(err, ShouldBeNil)
205 | defer fq.Close()
206 |
207 | for i := 0; i < 10; i++ {
208 | _, err = fq.Enqueue([]byte("hello world" + strconv.Itoa(i)))
209 | So(err, ShouldBeNil)
210 | }
211 |
212 | fq.Skip(fanoutID, int64(5))
213 | index, data, _ := fq.Peek(fanoutID)
214 | SoMsg(fmt.Sprintf("index should be 5 but actually %d", index), index, ShouldEqual, 5)
215 | So(data, ShouldNotBeNil)
216 |
217 | fq.Skip(fanoutID1, int64(1))
218 | index1, data1, _ := fq.Peek(fanoutID1)
219 | So(index1, ShouldEqual, 1)
220 | So(data1, ShouldNotBeNil)
221 | })
222 |
223 | }
224 |
225 | // TestFanoutQueueSubscribe to test Subscribe() function with multiple subscriber ids
226 | func TestFanoutQueueSubscribe(t *testing.T) {
227 |
228 | path := Tempfile()
229 | clearFiles(path, "fanoutqueue")
230 | fanoutID := int64(100)
231 | fanoutID1 := int64(101)
232 | clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
233 | clearFrontIndexFiles(path, "fanoutqueue", fanoutID1)
234 |
235 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
236 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID1)
237 | defer clearFiles(path, "fanoutqueue")
238 |
239 | Convey("TestFanoutQueueSubscribe", t, func() {
240 |
241 | fq := FileFanoutQueue{}
242 | err := fq.Open(path, "fanoutqueue", nil)
243 |
244 | So(err, ShouldBeNil)
245 | defer fq.Close()
246 |
247 | fanoutIDCount1, fanoutIDCount2 := 0, 0
248 | count := 10
249 |
250 | for i := 0; i < 10; i++ {
251 | _, err = fq.Enqueue([]byte("hello world" + strconv.Itoa(i)))
252 | So(err, ShouldBeNil)
253 | }
254 | for i := 0; i < 5; i++ {
255 | fq.Dequeue(fanoutID) // fanoutID dequeue 5 elements directly
256 | }
257 |
258 | fq.Subscribe(fanoutID, func(index int64, data []byte, err error) {
259 | fanoutIDCount1++
260 | })
261 |
262 | fq.Subscribe(fanoutID1, func(index int64, data []byte, err error) {
263 | fanoutIDCount2++
264 | })
265 |
266 | time.Sleep(time.Duration(3) * time.Second)
267 |
268 | So(fanoutIDCount1, ShouldEqual, 5)
269 | So(fanoutIDCount2, ShouldEqual, count)
270 | })
271 |
272 | }
273 |
274 | // TestFileQueue_Status
275 | func TestFanoutQueue_Status(t *testing.T) {
276 | Convey("Test empty queue status result", t, func() {
277 | path := Tempfile()
278 | clearFiles(path, "fanoutqueue")
279 | fanoutID := int64(100)
280 |
281 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
282 | defer clearFiles(path, "fanoutqueue")
283 |
284 | queue := FileFanoutQueue{}
285 | err := queue.Open(path, "fanoutqueue", nil)
286 |
287 | if err != nil {
288 | t.Error("open fanout queue failed", err)
289 | }
290 | defer queue.Close()
291 |
292 | qFileStatus := queue.Status(fanoutID)
293 |
294 | So(qFileStatus, ShouldNotBeNil)
295 | So(qFileStatus.FrontIndex, ShouldEqual, 0)
296 | So(qFileStatus.HeadIndex, ShouldEqual, 0)
297 | So(qFileStatus.TailIndex, ShouldEqual, 0)
298 | So(qFileStatus.HeadDataPageIndex, ShouldEqual, 0)
299 | So(qFileStatus.HeadDataItemOffset, ShouldEqual, 0)
300 |
301 | So(len(qFileStatus.IndexFileList), ShouldEqual, 0)
302 | So(len(qFileStatus.DataFileList), ShouldEqual, 0)
303 | So(qFileStatus.MetaFileInfo, ShouldNotBeNil)
304 | So(qFileStatus.FrontFileInfo, ShouldNotBeNil)
305 |
306 | })
307 |
308 | Convey("Test non-empty queue status result", t, func() {
309 | path := Tempfile()
310 |
311 | fanoutID := int64(100)
312 |
313 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
314 |
315 | queue := FileFanoutQueue{}
316 | err := queue.Open(path, "fanoutqueue", nil)
317 |
318 | if err != nil {
319 | t.Error("open fanout queue failed", err)
320 | }
321 | defer queue.Close()
322 | defer clearFiles(path, "fanoutqueue")
323 |
324 | data := []byte("hello xmatthew")
325 | dataLen := len(data)
326 |
327 | queue.Enqueue(data)
328 | queue.Dequeue(fanoutID)
329 |
330 | qFileStatus := queue.Status(fanoutID)
331 |
332 | So(qFileStatus, ShouldNotBeNil)
333 | So(qFileStatus.FrontIndex, ShouldEqual, 1)
334 | So(qFileStatus.HeadIndex, ShouldEqual, 1)
335 | So(qFileStatus.TailIndex, ShouldEqual, 0)
336 | So(qFileStatus.HeadDataPageIndex, ShouldEqual, 0)
337 | So(qFileStatus.HeadDataItemOffset, ShouldEqual, dataLen)
338 |
339 | So(len(qFileStatus.IndexFileList), ShouldEqual, 1)
340 |
341 | fileInfo := qFileStatus.IndexFileList[0]
342 | So(fileInfo.CanGC, ShouldBeFalse)
343 | So(fileInfo.FileIndex, ShouldEqual, 0)
344 |
345 | So(len(qFileStatus.DataFileList), ShouldEqual, 1)
346 | fileInfo = qFileStatus.IndexFileList[0]
347 | So(fileInfo.CanGC, ShouldBeFalse)
348 | So(fileInfo.FileIndex, ShouldEqual, 0)
349 | So(qFileStatus.MetaFileInfo, ShouldNotBeNil)
350 | So(qFileStatus.FrontFileInfo, ShouldNotBeNil)
351 |
352 | })
353 |
354 | }
355 |
356 | func TestFanoutQueue_PeekPagination(t *testing.T) {
357 | Convey("Test PeekPagination", t, func() {
358 | path := Tempfile()
359 | clearFiles(path, "fanoutqueue")
360 | fanoutID := int64(100)
361 |
362 | defer clearFrontIndexFiles(path, "fanoutqueue", fanoutID)
363 |
364 | queue := FileFanoutQueue{}
365 | err := queue.Open(path, "fanoutqueue", nil)
366 | if err != nil {
367 | t.Error(err)
368 | }
369 | defer queue.Close()
370 | defer clearFiles(path, "fanoutqueue")
371 |
372 | Convey("test PeekPagination on empty queue", func() {
373 | data, indexs, err := queue.PeekPagination(fanoutID, 0, 0)
374 | So(err, ShouldBeNil)
375 | So(data, ShouldBeEmpty)
376 | So(indexs, ShouldBeEmpty)
377 |
378 | data, indexs, err = queue.PeekPagination(fanoutID, 1, 1)
379 | So(err, ShouldBeNil)
380 | So(data, ShouldBeEmpty)
381 | So(indexs, ShouldBeEmpty)
382 | })
383 |
384 | Convey("test PeekPagination on items small than pagesize", func() {
385 | for i := 0; i < 5; i++ { // add value
386 | _, err := queue.Enqueue([]byte("hello matthew " + strconv.Itoa(i)))
387 | So(err, ShouldBeNil)
388 | }
389 |
390 | data, indexs, err := queue.PeekPagination(fanoutID, 0, 0)
391 | So(err, ShouldBeNil)
392 | So(len(data), ShouldEqual, 5)
393 | So(string(data[4]), ShouldEqual, "hello matthew 4")
394 | So(len(indexs), ShouldEqual, 5)
395 |
396 | data, indexs, err = queue.PeekPagination(fanoutID, 1, 10)
397 | So(err, ShouldBeNil)
398 | So(len(data), ShouldEqual, 5)
399 | So(string(data[4]), ShouldEqual, "hello matthew 4")
400 | So(len(indexs), ShouldEqual, 5)
401 |
402 | data, indexs, err = queue.PeekPagination(fanoutID, 2, 10) // large paing
403 | So(err, ShouldBeNil)
404 | So(data, ShouldBeEmpty)
405 | So(indexs, ShouldBeEmpty)
406 |
407 | data, indexs, err = queue.PeekPagination(fanoutID, 2, 2)
408 | So(err, ShouldBeNil)
409 | So(len(data), ShouldEqual, 2)
410 | So(string(data[1]), ShouldEqual, "hello matthew 3")
411 | So(len(indexs), ShouldEqual, 2)
412 | })
413 |
414 | })
415 | }
416 |
--------------------------------------------------------------------------------
/filequeue.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "errors"
7 | "log"
8 | "os"
9 | "sync"
10 | "time"
11 | )
12 |
13 | const (
14 | // front index page size
15 | defaultFrontPageSize = 1 << 3
16 | // meta file page size
17 | defaultMetaPageSize = 1 << 4
18 | // DefaultDataPageSize data file size
19 | DefaultDataPageSize = 128 * 1024 * 1024
20 |
21 | defaultItemLenBits = 5
22 | defaultIndexItemLen = 1 << defaultItemLenBits
23 | // DefaultIndexItemsPerPage items numbers in one page
24 | DefaultIndexItemsPerPage = 17
25 | defaultItemsPerPage = 1 << DefaultIndexItemsPerPage
26 | // index file size
27 | defaultIndexPageSize = defaultIndexItemLen * defaultItemsPerPage
28 | // MaxInt64 max value of int64
29 | MaxInt64 = 0x7fffffffffffffff
30 | // IndexFileName file name
31 | IndexFileName = "index"
32 | // DataFileName file name
33 | DataFileName = "data"
34 | // MetaFileName file name
35 | MetaFileName = "meta_data"
36 | // FrontFileName file name
37 | FrontFileName = "front_index"
38 |
39 | filePrefix = "page-"
40 | fileSuffix = ".dat"
41 |
42 | defaultFileMode = 0666
43 |
44 | Default_Page_Size = 10
45 | )
46 |
47 | // DefaultOptions default options
48 | var DefaultOptions = &Options{
49 | DataPageSize: DefaultDataPageSize,
50 | indexPageSize: defaultIndexPageSize,
51 | IndexItemsPerPage: DefaultIndexItemsPerPage,
52 | itemsPerPage: defaultItemsPerPage,
53 | AutoGCBySeconds: 0,
54 | }
55 |
56 | // FileQueue queue implements with mapp file
57 | type FileQueue struct {
58 | // front index of the big queue,
59 | frontIndex int64
60 |
61 | // head index of the array, this is the read write barrier.
62 | // readers can only read items before this index, and writes can write this index or after
63 | headIndex int64
64 |
65 | // tail index of the array,
66 | // readers can't read items before this tail
67 | tailIndex int64
68 |
69 | // head index of the data page, this is the to be appended data page index
70 | headDataPageIndex int64
71 |
72 | // head offset of the data page, this is the to be appended data offset
73 | headDataItemOffset int64
74 |
75 | // Protects mmap access during remapping.
76 | // use read and write lock
77 | lock sync.RWMutex
78 |
79 | // lock for enqueue state management
80 | enqueueLock sync.Mutex
81 |
82 | // locks for queue front write management
83 | queueFrontWriteLock sync.Mutex
84 |
85 | path string
86 |
87 | indexFile *DBFactory
88 | dataFile *DBFactory
89 | metaFile *DB
90 | frontFile *DB
91 |
92 | // queue options
93 | options *Options
94 |
95 | // set subscribe action
96 | subscriber func(int64, []byte, error)
97 |
98 | enqueueChan chan bool
99 |
100 | gcLock sync.Mutex
101 |
102 | autoGCQuit chan int
103 |
104 | opened bool
105 | }
106 |
107 | // status info of queue files
108 | type QueueFilesStatus struct {
109 | // front index of the big queue,
110 | FrontIndex int64
111 |
112 | // head index of the array, this is the read write barrier.
113 | // readers can only read items before this index, and writes can write this index or after
114 | HeadIndex int64
115 |
116 | // tail index of the array,
117 | // readers can't read items before this tail
118 | TailIndex int64
119 |
120 | // head index of the data page, this is the to be appended data page index
121 | HeadDataPageIndex int64
122 |
123 | // head offset of the data page, this is the to be appended data offset
124 | HeadDataItemOffset int64
125 |
126 | IndexFileList []*QueueFileInfo
127 | DataFileList []*QueueFileInfo
128 | MetaFileInfo *QueueFileInfo
129 | FrontFileInfo *QueueFileInfo
130 | }
131 |
132 | // queue file info
133 | type QueueFileInfo struct {
134 | Name string
135 | Path string
136 | Size int64
137 | FileIndex int64
138 | CanGC bool
139 | }
140 |
141 | // Status get status info from current queue
142 | func (q *FileQueue) Status() *QueueFilesStatus {
143 |
144 | indexPageIndex := q.frontIndex >> uint(q.options.IndexItemsPerPage)
145 |
146 | bb, err := q.getIndexItemArray(q.frontIndex)
147 | var dataPageIndex int64
148 | if err != nil {
149 | dataPageIndex = -1
150 | } else {
151 | dataPageIndex = BytesToInt(bb[:8])
152 | }
153 |
154 | r := q.status(q.frontIndex, indexPageIndex, dataPageIndex)
155 |
156 | // gc status
157 | return r
158 |
159 | }
160 |
161 | // NewAndOpenFileQueue inital FileQueue and Open by target directory and queue name
162 | func NewAndOpenFileQueue(dir string, queueName string, options *Options) (IOQueue, error) {
163 | fileQueue := &FileQueue{}
164 | err := fileQueue.Open(dir, queueName, options)
165 | if err != nil {
166 | return nil, err
167 | }
168 | return fileQueue, nil
169 | }
170 |
171 | func (q *FileQueue) status(frontIndex, currentIndexPageIndex, currentDataPageIndex int64) *QueueFilesStatus {
172 | result := QueueFilesStatus{FrontIndex: frontIndex, HeadIndex: q.headIndex, TailIndex: q.tailIndex,
173 | HeadDataPageIndex: q.headDataPageIndex, HeadDataItemOffset: q.headDataItemOffset}
174 |
175 | indexPageIndex := q.headIndex >> uint(q.options.IndexItemsPerPage)
176 | result.IndexFileList = wrapFileInfos(q.indexFile, indexPageIndex, currentIndexPageIndex)
177 |
178 | result.DataFileList = wrapFileInfos(q.dataFile, q.headDataPageIndex, currentDataPageIndex)
179 | result.MetaFileInfo, _ = wrapFileInfo(0, q.metaFile)
180 | result.FrontFileInfo, _ = wrapFileInfo(0, q.frontFile)
181 | return &result
182 | }
183 |
184 | // wrapFileInfos wrap queue file info from DBFactory
185 | func wrapFileInfos(factory *DBFactory, maxFileNo, currentFileNo int64) []*QueueFileInfo {
186 | indexFileInfos := make([]*QueueFileInfo, 0)
187 | for i := maxFileNo; i >= 0; i-- {
188 | filePath := factory.getFilePath(i)
189 | _, err := os.Open(filePath)
190 | if err != nil {
191 | continue
192 | }
193 | db, err := factory.acquireDB(i)
194 | if err == nil {
195 | info, err := wrapFileInfo(i, db)
196 | if i < currentFileNo {
197 | info.CanGC = true
198 | }
199 | if err == nil {
200 | indexFileInfos = append(indexFileInfos, info)
201 | }
202 | }
203 | }
204 | return indexFileInfos
205 | }
206 |
207 | // wrapFileInfo wrap queue file info from DB
208 | func wrapFileInfo(fileIndex int64, db *DB) (*QueueFileInfo, error) {
209 | finfo, err := db.file.Stat()
210 | if err != nil {
211 | return nil, err
212 | }
213 | result := &QueueFileInfo{Name: db.file.Name(), Path: db.path, Size: int64(finfo.Size()), FileIndex: fileIndex}
214 | return result, nil
215 | }
216 |
217 | // Open the queue files
218 | func (q *FileQueue) Open(dir string, queueName string, options *Options) error {
219 | if len(dir) == 0 {
220 | return errors.New("parameter 'dir' can not be blank")
221 | }
222 |
223 | if len(queueName) == 0 {
224 | return errors.New("parameter 'queueName' can not be blank")
225 | }
226 |
227 | if !q.opened {
228 | q.opened = true
229 | } else {
230 | return errors.New("FileQueue already opened")
231 | }
232 |
233 | if options == nil {
234 | options = DefaultOptions
235 | }
236 | q.options = options
237 | q.options.itemsPerPage = 1 << uint(q.options.IndexItemsPerPage)
238 | q.options.indexPageSize = defaultIndexItemLen * q.options.itemsPerPage
239 |
240 | path := dir + "/" + queueName
241 |
242 | err := os.MkdirAll(path, os.ModeDir)
243 | if err != nil {
244 | return err
245 | }
246 |
247 | // initialize directories
248 | q.path = path
249 |
250 | err = q.initDirs()
251 | if err != nil {
252 | return err
253 | }
254 | err = q.initFrontFile()
255 | if err != nil {
256 | return err
257 | }
258 | err = q.initMetaFile()
259 | if err != nil {
260 | return err
261 | }
262 |
263 | dataDBFactory := DBFactory{
264 | filePath: q.path + "/" + DataFileName,
265 | filePrefix: filePrefix,
266 | fileSuffix: fileSuffix,
267 | lockMap: make(map[int64]*sync.Mutex),
268 | dbMap: make(map[int64]*DB),
269 | InitialMmapSize: q.options.DataPageSize,
270 | }
271 | q.dataFile = &dataDBFactory
272 |
273 | indexDBFactory := DBFactory{
274 | filePath: q.path + "/" + IndexFileName,
275 | filePrefix: filePrefix,
276 | fileSuffix: fileSuffix,
277 | lockMap: make(map[int64]*sync.Mutex),
278 | dbMap: make(map[int64]*DB),
279 | InitialMmapSize: q.options.indexPageSize,
280 | }
281 | q.indexFile = &indexDBFactory
282 |
283 | err = q.initDataPageIndex()
284 | if err != nil {
285 | return err
286 | }
287 |
288 | q.enqueueChan = make(chan bool, 1)
289 |
290 | // check auto gc
291 | if q.options.AutoGCBySeconds > 0 {
292 | q.autoGC()
293 | }
294 |
295 | return nil
296 | }
297 |
298 | // IsEmpty to determines whether a queue is empty
299 | func (q *FileQueue) IsEmpty() bool {
300 | return q.frontIndex >= q.headIndex
301 | }
302 |
303 | // isEmpty to determines whether a queue is empty by target frontIndex
304 | func (q *FileQueue) isEmpty(frontIndex int64) bool {
305 | return frontIndex >= q.headIndex
306 | }
307 |
308 | // Size to return total number of items available in the queue.
309 | func (q *FileQueue) Size() int64 {
310 | sz := q.headIndex - q.frontIndex
311 | if sz < 0 {
312 | sz = 0
313 | }
314 | return int64(sz)
315 | }
316 |
317 | // to calc size by target frontIndex
318 | func (q *FileQueue) size(frontIndex int64) int64 {
319 | sz := q.headIndex - frontIndex
320 | if sz < 0 {
321 | sz = 0
322 | }
323 | return int64(sz)
324 | }
325 |
326 | // EnqueueAsync adds an item at the queue and HeadIndex will increase
327 | // Asynchouous mode will call back with fn function
328 | func (q *FileQueue) EnqueueAsync(data []byte, fn func(int64, error)) {
329 | go q.doEnqueueAsync(data, fn)
330 | }
331 |
332 | func (q *FileQueue) doEnqueueAsync(data []byte, fn func(int64, error)) {
333 | index, err := q.Enqueue(data)
334 | fn(index, err)
335 | }
336 |
337 | // Enqueue adds an item at the queue and HeadIndex will increase
338 | func (q *FileQueue) Enqueue(data []byte) (int64, error) {
339 | sz := len(data)
340 | if sz == 0 {
341 | return -1, ErrEnqueueDataNull
342 | }
343 | q.lock.RLock()
344 | defer q.lock.RUnlock()
345 |
346 | q.enqueueLock.Lock()
347 | defer q.enqueueLock.Unlock()
348 |
349 | // check if have enough space
350 | if int64(q.headDataItemOffset)+int64(sz) > int64(q.options.DataPageSize) {
351 | q.headDataPageIndex++
352 | q.headDataItemOffset = 0
353 | }
354 |
355 | toAppendDataPageIndex := q.headDataPageIndex
356 | toAppendDataItemOffset := q.headDataItemOffset
357 |
358 | db, err := q.dataFile.acquireDB(toAppendDataPageIndex)
359 | if err != nil {
360 | return -1, err
361 | }
362 |
363 | // write data
364 | copy(db.data[toAppendDataItemOffset:toAppendDataItemOffset+int64(sz)], data[0:sz])
365 |
366 | //update to next
367 | q.headDataItemOffset = q.headDataItemOffset + int64(sz)
368 |
369 | toAppendArrayIndex := q.headIndex
370 | toAppendIndexPageIndex := toAppendArrayIndex >> uint(q.options.IndexItemsPerPage)
371 |
372 | indexDB, err := q.indexFile.acquireDB(toAppendIndexPageIndex)
373 | if err != nil {
374 | return -1, err
375 | }
376 | // calc index offset
377 | toAppendIndexItemOffset := Mod(toAppendArrayIndex, q.options.IndexItemsPerPage) << defaultItemLenBits
378 | // get byte slice
379 | b := new(bytes.Buffer)
380 | binary.Write(b, binary.BigEndian, int64(toAppendDataPageIndex))
381 | binary.Write(b, binary.BigEndian, int32(toAppendDataItemOffset))
382 | binary.Write(b, binary.BigEndian, int32(sz))
383 | binary.Write(b, binary.BigEndian, int64(time.Now().Unix()))
384 | binary.Write(b, binary.BigEndian, int64(0))
385 |
386 | bb := b.Bytes()
387 | copy(indexDB.data[toAppendIndexItemOffset:toAppendIndexItemOffset+defaultIndexItemLen], bb[:defaultIndexItemLen])
388 |
389 | // update next to the head index
390 | q.headIndex = q.headIndex + 1
391 |
392 | // update meta data
393 | b = new(bytes.Buffer)
394 | binary.Write(b, binary.BigEndian, q.headIndex)
395 | binary.Write(b, binary.BigEndian, q.tailIndex)
396 |
397 | bb = b.Bytes()
398 |
399 | sz = len(bb)
400 | copy(q.metaFile.data[:sz], bb[:])
401 |
402 | go q.changeSubscribeStatus(true)
403 | return toAppendArrayIndex, nil
404 | }
405 |
406 | // Dequeue Retrieves and removes the front of a queue
407 | func (q *FileQueue) Dequeue() (int64, []byte, error) {
408 |
409 | if q.IsEmpty() {
410 | return -1, nil, nil
411 | }
412 |
413 | // check and update queue front index info
414 | index, err := q.updateQueueFrontIndex()
415 | if err != nil {
416 | return -1, nil, err
417 | }
418 | bb, err := q.peek(index)
419 | return index, bb, err
420 | }
421 |
422 | // Peek Retrieves the item at the front of a queue
423 | // if item exist return with index id, item data
424 | func (q *FileQueue) Peek() (int64, []byte, error) {
425 | if q.IsEmpty() {
426 | return -1, nil, nil
427 | }
428 | q.lock.RLock()
429 | defer q.lock.RUnlock()
430 |
431 | index := q.frontIndex
432 | bb, err := q.peek(index)
433 | return index, bb, err
434 | }
435 |
436 | // PeekAll Retrieves all the items from the front of a queue
437 | // return array of data and array of index
438 | func (q *FileQueue) PeekAll() ([][]byte, []int64, error) {
439 | if q.IsEmpty() {
440 | return nil, nil, nil
441 | }
442 |
443 | q.lock.RLock()
444 | defer q.lock.RUnlock()
445 | index := q.frontIndex
446 |
447 | return q.peekAll(index, q.Size())
448 | }
449 |
450 | // PeekPagination to peek data from queue by paing feature.
451 | func (q *FileQueue) PeekPagination(page, pagesize uint64) ([][]byte, []int64, error) {
452 | return q.peekPagination(q.frontIndex, q.Size(), page, pagesize)
453 | }
454 |
455 | // peekPagination to peek data from queue by paing feature.
456 | func (q *FileQueue) peekPagination(frontindex int64, size int64, page, pagesize uint64) ([][]byte, []int64, error) {
457 | if page == 0 {
458 | page = 1
459 | }
460 | if pagesize == 0 {
461 | pagesize = Default_Page_Size
462 | }
463 |
464 | begin := (page - 1) * pagesize
465 | end := begin + pagesize
466 |
467 | if begin > uint64(size) { // no data return
468 | return [][]byte{}, []int64{}, nil
469 | }
470 |
471 | if end > uint64(size) {
472 | end = uint64(size)
473 | pagesize = end - begin
474 | }
475 |
476 | // fix the offset
477 | begin = begin + uint64(frontindex)
478 | end = end + uint64(frontindex)
479 |
480 | result := make([][]byte, pagesize)
481 | indexs := make([]int64, pagesize)
482 |
483 | var index int = 0
484 | for i := begin; i < end; i++ {
485 | bb, err := q.peek(int64(i))
486 | if err != nil {
487 | return nil, nil, err
488 | }
489 | result[index] = bb
490 | indexs[index] = int64(i)
491 | index++
492 | }
493 | return result, indexs, nil
494 | }
495 |
496 | // Skip the target n items to front index
497 | func (q *FileQueue) Skip(count int64) error {
498 | if q.IsEmpty() {
499 | return nil
500 | }
501 |
502 | for i := 0; i < int(count); i++ {
503 | // check and update queue front index info
504 | _, err := q.updateQueueFrontIndex()
505 | if err != nil {
506 | return err
507 | }
508 |
509 | if q.IsEmpty() {
510 | return nil
511 | }
512 | }
513 | return nil
514 | }
515 |
516 | // peek item from the queue
517 | func (q *FileQueue) peek(index int64) ([]byte, error) {
518 | // get the queue message from the index
519 | err := q.validateIndex(index)
520 | if err != nil {
521 | return nil, err
522 | }
523 |
524 | bb, err := q.getIndexItemArray(index)
525 | if err != nil {
526 | return nil, err
527 | }
528 | dataPageIndex := BytesToInt(bb[0:8])
529 | dataItemOffset := BytesToInt32(bb[8:12])
530 | dataItemLength := BytesToInt32(bb[12:16])
531 |
532 | dataDB, err := q.dataFile.acquireDB(dataPageIndex)
533 | if err != nil {
534 | return nil, err
535 | }
536 |
537 | ret := make([]byte, dataItemLength)
538 | copy(ret, dataDB.data[dataItemOffset:])
539 | return ret, nil
540 | }
541 |
542 | // peek all items from the queue
543 | func (q *FileQueue) peekAll(index int64, size int64) ([][]byte, []int64, error) {
544 | result := make([][]byte, size)
545 | indexs := make([]int64, size)
546 | for i := 0; i < int(size); i++ {
547 | bb, err := q.peek(index)
548 | if err != nil {
549 | return nil, nil, err
550 | }
551 | result[i] = bb
552 | indexs[i] = index
553 | index++
554 | }
555 | return result, indexs, nil
556 | }
557 |
558 | func (q *FileQueue) validateIndex(index int64) error {
559 | if q.tailIndex <= q.headIndex {
560 | if index < q.tailIndex || index > q.headIndex {
561 | return ErrIndexOutOfBoundTH
562 | }
563 | } else {
564 | if index < q.tailIndex && index >= q.headIndex {
565 | return ErrIndexOutOfBoundTH
566 | }
567 | }
568 |
569 | return nil
570 | }
571 |
572 | func (q *FileQueue) updateQueueFrontIndex() (int64, error) {
573 | q.queueFrontWriteLock.Lock()
574 | defer q.queueFrontWriteLock.Unlock()
575 |
576 | queueFrontIndex := q.frontIndex
577 | nextQueueFrontIndex := queueFrontIndex
578 |
579 | if nextQueueFrontIndex == MaxInt64 {
580 | nextQueueFrontIndex = 0
581 | } else {
582 | nextQueueFrontIndex++
583 | }
584 | q.frontIndex = nextQueueFrontIndex
585 |
586 | bb := IntToBytes(q.frontIndex)
587 | for idx, b := range bb {
588 | q.frontFile.data[idx] = b
589 |
590 | }
591 |
592 | return queueFrontIndex, nil
593 | }
594 |
595 | func (q *FileQueue) initFrontFile() error {
596 | // create index file
597 | q.frontFile = &DB{
598 | path: q.path + "/" + FrontFileName + "/" + GetFileName(filePrefix, fileSuffix, 0),
599 | InitialMmapSize: defaultFrontPageSize,
600 | opened: true,
601 | }
602 |
603 | err := q.frontFile.Open(defaultFileMode)
604 | if err != nil {
605 | return err
606 | }
607 | q.frontIndex = BytesToInt(q.frontFile.data[:defaultFrontPageSize])
608 | Assert(q.frontIndex >= 0, "front index can not be negetive number. value is %v", q.frontIndex)
609 | return nil
610 | }
611 |
612 | func (q *FileQueue) initMetaFile() error {
613 | // create index file
614 | q.metaFile = &DB{
615 | path: q.path + "/" + MetaFileName + "/" + GetFileName(filePrefix, fileSuffix, 0),
616 | InitialMmapSize: defaultMetaPageSize,
617 | opened: true,
618 | }
619 |
620 | err := q.metaFile.Open(defaultFileMode)
621 | if err != nil {
622 | return err
623 | }
624 |
625 | q.headIndex = BytesToInt(q.metaFile.data[:8])
626 | q.tailIndex = BytesToInt(q.metaFile.data[9:16])
627 |
628 | Assert(q.headIndex >= 0, "head index can not be negetive number. value is %v", q.headIndex)
629 | Assert(q.tailIndex >= 0, "tail index can not be negetive number. value is %v", q.tailIndex)
630 | return nil
631 | }
632 |
633 | func (q *FileQueue) initDataPageIndex() error {
634 | if q.IsEmpty() {
635 | q.headDataPageIndex = 0
636 | q.headDataItemOffset = 0
637 | return nil
638 | }
639 | // get from index file
640 | previousIndex := q.headIndex - 1
641 |
642 | bb, err := q.getIndexItemArray(previousIndex)
643 | if err != nil {
644 | return err
645 | }
646 | previousDataPageIndex := BytesToInt(bb[:8])
647 | previousDataItemOffset := BytesToInt32(bb[8:12])
648 | perviousDataItemLength := BytesToInt32(bb[12:16])
649 |
650 | q.headDataPageIndex = previousDataPageIndex
651 | q.headDataItemOffset = int64(previousDataItemOffset + perviousDataItemLength)
652 |
653 | return nil
654 |
655 | }
656 |
657 | func (q *FileQueue) getIndexItemArray(index int64) ([]byte, error) {
658 | // calc index page no
659 | previousIndexPageIndex := index >> uint(q.options.IndexItemsPerPage)
660 |
661 | indexDB, err := q.indexFile.acquireDB(previousIndexPageIndex)
662 | if err != nil {
663 | return nil, err
664 | }
665 | // calc index item offset positon
666 | previousIndexPageOffset := Mod(index, q.options.IndexItemsPerPage) << defaultItemLenBits
667 |
668 | bb := indexDB.data[previousIndexPageOffset : previousIndexPageOffset+defaultIndexItemLen]
669 |
670 | return bb, nil
671 | }
672 |
673 | func (q *FileQueue) initDirs() error {
674 | indexFilePath := q.path + "/" + IndexFileName
675 | err := os.MkdirAll(indexFilePath, os.ModeDir)
676 | if err != nil {
677 | return err
678 | }
679 |
680 | dataFilePath := q.path + "/" + DataFileName
681 | err = os.MkdirAll(dataFilePath, os.ModeDir)
682 | if err != nil {
683 | return err
684 | }
685 |
686 | metaFilePath := q.path + "/" + MetaFileName
687 | err = os.MkdirAll(metaFilePath, os.ModeDir)
688 | if err != nil {
689 | return err
690 | }
691 |
692 | frontFilePath := q.path + "/" + FrontFileName
693 | err = os.MkdirAll(frontFilePath, os.ModeDir)
694 | if err != nil {
695 | return err
696 | }
697 |
698 | return nil
699 | }
700 |
701 | // Close close file queue
702 | func (q *FileQueue) Close() error {
703 | q.lock.Lock()
704 | defer q.lock.Unlock()
705 |
706 | // to close auto gc if opened
707 | if q.autoGCQuit != nil {
708 | q.autoGCQuit <- 1
709 | }
710 |
711 | q.FreeSubscribe()
712 |
713 | // close front index file
714 | if q.frontFile != nil {
715 | q.frontFile.Close()
716 | }
717 |
718 | if q.metaFile != nil {
719 | q.metaFile.Close()
720 | }
721 |
722 | if q.indexFile != nil {
723 | q.indexFile.Close()
724 | }
725 |
726 | if q.dataFile != nil {
727 | q.dataFile.Close()
728 | }
729 |
730 | q.opened = false
731 | return nil
732 | }
733 |
734 | //Gc Delete all used data files to free disk space.
735 | //
736 | // BigQueue will persist enqueued data in disk files, these data files will remain even after
737 | // the data in them has been dequeued later, so your application is responsible to periodically call
738 | // this method to delete all used data files and free disk space.
739 | func (q *FileQueue) Gc() error {
740 | q.gcLock.Lock()
741 | defer q.gcLock.Unlock()
742 | frontIndex := q.frontIndex
743 |
744 | if frontIndex == 0 {
745 | return nil
746 | }
747 |
748 | frontIndex--
749 |
750 | err := q.validateIndex(frontIndex)
751 | if err != nil {
752 | return err
753 | }
754 |
755 | indexPageIndex := frontIndex >> uint(q.options.IndexItemsPerPage)
756 | bb, err := q.getIndexItemArray(frontIndex)
757 | if err != nil {
758 | return err
759 | }
760 |
761 | dataPageIndex := BytesToInt(bb[:8])
762 | if indexPageIndex > 0 {
763 | q.indexFile.removeBeforeIndex(indexPageIndex)
764 | }
765 |
766 | if dataPageIndex > 0 {
767 | q.dataFile.removeBeforeIndex(dataPageIndex)
768 | }
769 |
770 | q.tailIndex = frontIndex
771 |
772 | return nil
773 | }
774 |
775 | // Subscribe subscribe a call back function to subscribe message
776 | func (q *FileQueue) Subscribe(fn func(int64, []byte, error)) error {
777 | if q.enqueueChan == nil {
778 | return ErrSubscribeFailedNoOpenErr
779 | }
780 |
781 | if q.subscriber != nil {
782 | return ErrSubscribeExistErr
783 | }
784 | q.subscriber = fn
785 | go q.doLoopSubscribe()
786 | return nil
787 | }
788 |
789 | // FreeSubscribe free subscriber
790 | func (q *FileQueue) FreeSubscribe() {
791 | q.subscriber = nil
792 | go q.changeSubscribeStatusForce(false)
793 | }
794 |
795 | func (q *FileQueue) changeSubscribeStatus(s bool) {
796 | if len(q.enqueueChan) == 0 {
797 | q.changeSubscribeStatusForce(s)
798 | }
799 | }
800 |
801 | func (q *FileQueue) changeSubscribeStatusForce(s bool) {
802 | q.enqueueChan <- s
803 | }
804 |
805 | func (q *FileQueue) doLoopSubscribe() {
806 | for {
807 | for {
808 | index, bb, err := q.Dequeue()
809 | if bb == nil {
810 | break // queue is empty
811 | }
812 | if q.subscriber != nil {
813 | q.subscriber(index, bb, err)
814 | }
815 | }
816 |
817 | loop := <-q.enqueueChan
818 |
819 | if !loop {
820 | break
821 | }
822 | }
823 | }
824 |
825 | func (q *FileQueue) autoGC() {
826 | ticker := time.NewTicker(time.Second * time.Duration(q.options.AutoGCBySeconds))
827 | go func() {
828 | for {
829 | select {
830 | case <-ticker.C:
831 | q.Gc()
832 | case <-q.autoGCQuit:
833 | ticker.Stop()
834 | goto exit
835 | }
836 | }
837 | exit:
838 | log.Println("Auto gc goroutine exit to end")
839 | }()
840 | }
841 |
--------------------------------------------------------------------------------
/filequeue_test.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "sync"
7 | "testing"
8 | "time"
9 |
10 | . "github.com/smartystreets/goconvey/convey"
11 | )
12 |
13 | // TestFileQueue_OpenError to test Open function which without required parameters.
14 | func TestFileQueue_OpenError(t *testing.T) {
15 |
16 | Convey("TestFileQueue_OpenError", t, func() {
17 | path := Tempfile()
18 | clearFiles(path, "testqueue")
19 | defer clearFiles(path, "testqueue")
20 | Convey("Open with empty path and name", func() {
21 | var queue = &FileQueue{}
22 | //var queue = new(FileQueue)
23 | err := queue.Open("", "", nil)
24 | So(err, ShouldNotBeNil)
25 | So(err, ShouldBeError, "parameter 'dir' can not be blank")
26 | defer queue.Close()
27 | defer clearFiles(path, "testqueue")
28 | })
29 |
30 | Convey("Open with empty name", func() {
31 | var queue = &FileQueue{}
32 | err := queue.Open(path, "", nil)
33 | So(err, ShouldNotBeNil)
34 | So(err, ShouldBeError, "parameter 'queueName' can not be blank")
35 | defer queue.Close()
36 | })
37 |
38 | })
39 |
40 | }
41 |
42 | // TestFileQueue_Open to test open file without any error and check the initial size
43 | func TestFileQueue_Open(t *testing.T) {
44 | path := Tempfile()
45 | clearFiles(path, "testqueue")
46 | defer clearFiles(path, "testqueue")
47 |
48 | Convey("TestFileQueue_Open", t, func() {
49 | var queue = new(FileQueue)
50 |
51 | err := queue.Open(path, "testqueue", nil)
52 |
53 | if err != nil {
54 | t.Error(err)
55 | }
56 | defer queue.Close()
57 | defer clearFiles(path, "testqueue")
58 |
59 | sz := queue.Size()
60 | SoMsg(fmt.Sprintf("Init queue size must be zero, but now is %d", sz), sz, ShouldEqual, 0)
61 |
62 | empty := queue.IsEmpty()
63 | SoMsg("Init queue must be empty, but now is not empty", empty, ShouldBeTrue)
64 | })
65 |
66 | }
67 |
68 | // TestFileQueue_Open to test open file without any error and check the initial size
69 | func TestFileQueue_OpenTwice(t *testing.T) {
70 | path := Tempfile()
71 | clearFiles(path, "testqueue")
72 | defer clearFiles(path, "testqueue")
73 |
74 | Convey("TestFileQueue_OpenTwice", t, func() {
75 | var queue = new(FileQueue)
76 |
77 | err := queue.Open(path, "testqueue", nil)
78 | So(err, ShouldBeNil)
79 |
80 | defer queue.Close()
81 | defer clearFiles(path, "testqueue")
82 |
83 | // open again will return error
84 | Convey("Already open", func() {
85 | err = queue.Open(path, "testqueue", nil)
86 | So(err, ShouldNotBeNil)
87 | So(err, ShouldBeError, "FileQueue already opened")
88 | })
89 |
90 | sz := queue.Size()
91 | SoMsg(fmt.Sprintf("Init queue size must be zero, but now is %d", sz), sz, ShouldEqual, 0)
92 |
93 | empty := queue.IsEmpty()
94 | SoMsg("Init queue must be empty, but now is not empty", empty, ShouldBeTrue)
95 | })
96 |
97 | }
98 |
99 | // TestFileQueue_Enqueue to test enqueue function
100 | func TestFileQueue_Enqueue(t *testing.T) {
101 | path := Tempfile()
102 | clearFiles(path, "testqueue")
103 |
104 | var queue = new(FileQueue)
105 | Convey("TestFileQueue_Enqueue", t, func() {
106 | err := queue.Open(path, "testqueue", nil)
107 | So(err, ShouldBeNil)
108 | enqueue(queue, []byte("hello xiemalin中文"), 10, t)
109 | })
110 | defer queue.Close()
111 | defer clearFiles(path, "testqueue")
112 |
113 | }
114 |
115 | // TestFileQueue_DequeueEmpty to test dequeue item from an empty queue
116 | func TestFileQueue_DequeueEmpty(t *testing.T) {
117 | path := Tempfile()
118 | clearFiles(path, "testqueue")
119 | defer clearFiles(path, "testqueue")
120 |
121 | Convey("TestFileQueue_DequeueEmpty", t, func() {
122 |
123 | var queue = new(FileQueue)
124 |
125 | err := queue.Open(path, "testqueue", nil)
126 |
127 | So(err, ShouldBeNil)
128 | dequeueEmpty(queue, t)
129 | defer queue.Close()
130 | })
131 |
132 | }
133 |
134 | // TestFileQueue_EnqueueDequeue to test enqueue and dequeue
135 | func TestFileQueue_EnqueueDequeue(t *testing.T) {
136 | path := Tempfile()
137 | clearFiles(path, "testqueue")
138 | defer clearFiles(path, "testqueue")
139 |
140 | Convey("TestFileQueue_EnqueueDequeue", t, func() {
141 |
142 | var queue = new(FileQueue)
143 | err := queue.Open(path, "testqueue", nil)
144 | So(err, ShouldBeNil)
145 | defer queue.Close()
146 | enqueue(queue, []byte("hello xiemalin中文"), 10, t)
147 | dequeue(queue, []byte("hello xiemalin中文"), 10, t)
148 | // to check there are no message avaiable
149 | dequeueEmpty(queue, t)
150 | })
151 |
152 | }
153 |
154 | // TestFileQueue_Skip to test skip function
155 | func TestFileQueue_Skip(t *testing.T) {
156 | path := Tempfile()
157 | clearFiles(path, "testqueue")
158 | defer clearFiles(path, "testqueue")
159 |
160 | Convey("TestFileQueue_EnqueueDequeue", t, func() {
161 | var queue = new(FileQueue)
162 |
163 | err := queue.Open(path, "testqueue", nil)
164 |
165 | So(err, ShouldBeNil)
166 | defer queue.Close()
167 |
168 | enqueue(queue, []byte("hello xiemalin中文"), 10, t)
169 |
170 | queue.Skip(5)
171 |
172 | dequeue(queue, []byte("hello xiemalin中文"), 5, t)
173 | // to check there are no message avaiable
174 | dequeueEmpty(queue, t)
175 | })
176 |
177 | }
178 |
179 | // TestFileQueue_Peek to test peek item function
180 | func TestFileQueue_Peek(t *testing.T) {
181 | path := Tempfile()
182 | clearFiles(path, "testqueue")
183 | defer clearFiles(path, "testqueue")
184 |
185 | Convey("TestFileQueue_EnqueueDequeue", t, func() {
186 | var queue = new(FileQueue)
187 |
188 | err := queue.Open(path, "testqueue", nil)
189 |
190 | So(err, ShouldBeNil)
191 | defer queue.Close()
192 |
193 | // peek to an empty queue
194 | index, bb, err := queue.Peek()
195 | So(err, ShouldBeNil)
196 | SoMsg(fmt.Sprintf("Error peek to an empty queue should return index of '-1', but actually is %d", index), index, ShouldEqual, -1)
197 | So(bb, ShouldBeNil)
198 |
199 | enqueue(queue, []byte("hello xiemalin中文"), 10, t)
200 |
201 | index, bb, err = queue.Peek()
202 | index2, bb2, err2 := queue.Peek()
203 | So(index, ShouldEqual, index2)
204 | So(bb, ShouldResemble, bb2)
205 | So(err, ShouldResemble, err2)
206 |
207 | })
208 |
209 | }
210 |
211 | // TestFileQueue_Gc to test gc func after enqueue and dequeue process
212 | func TestFileQueue_Gc(t *testing.T) {
213 | path := Tempfile()
214 | clearFiles(path, "testqueue")
215 | defer clearFiles(path, "testqueue")
216 |
217 | Convey("TestFileQueue_Gc", t, func() {
218 | var queue = new(FileQueue)
219 | // use custom options
220 | var options = &Options{
221 | DataPageSize: 128,
222 | IndexItemsPerPage: 17,
223 | }
224 |
225 | err := queue.Open(path, "testqueue", options)
226 |
227 | So(err, ShouldBeNil)
228 | defer queue.Close()
229 |
230 | enqueue(queue, []byte("hello xiemalin中文"), 500, t)
231 | dequeue(queue, []byte("hello xiemalin中文"), 500, t)
232 | queue.Gc()
233 | })
234 |
235 | }
236 |
237 | // TestFileQueue_AutoGc to test automatic gc function while on enqueue and dequeue process
238 | func TestFileQueue_AutoGc(t *testing.T) {
239 | path := Tempfile()
240 | clearFiles(path, "testqueue")
241 | defer clearFiles(path, "testqueue")
242 |
243 | Convey("TestFileQueue_Gc", t, func() {
244 | var queue = new(FileQueue)
245 | // use custom options
246 | var options = &Options{
247 | DataPageSize: 128,
248 | IndexItemsPerPage: 17,
249 | AutoGCBySeconds: 1,
250 | }
251 |
252 | err := queue.Open(path, "testqueue", options)
253 |
254 | So(err, ShouldBeNil)
255 | defer queue.Close()
256 |
257 | doEnqueue(queue, []byte("hello xiemalin中文"), 500, t)
258 | dequeue(queue, []byte("hello xiemalin中文"), 500, t)
259 |
260 | time.Sleep(2 * time.Second)
261 | })
262 | }
263 |
264 | // TestFileQueue_Subscribe to test subscribe function
265 | func TestFileQueue_Subscribe(t *testing.T) {
266 | path := Tempfile()
267 | clearFiles(path, "testqueue")
268 | defer clearFiles(path, "testqueue")
269 |
270 | Convey("TestFileQueue_Subscribe", t, func() {
271 | i := 0
272 | var queue = new(FileQueue)
273 |
274 | err := queue.Subscribe(func(index int64, bb []byte, err error) {
275 | i++
276 | })
277 | // here should err
278 | So(err, ShouldBeError, ErrSubscribeFailedNoOpenErr)
279 |
280 | err = queue.Open(path, "testqueue", nil)
281 |
282 | queue.Subscribe(func(index int64, bb []byte, err error) {
283 | i++
284 | })
285 |
286 | So(err, ShouldBeNil)
287 | defer queue.Close()
288 |
289 | sz := 10
290 |
291 | doEnqueue(queue, []byte("hello xiemalin中文"), sz, t)
292 |
293 | time.Sleep(time.Duration(2) * time.Second)
294 |
295 | So(i, ShouldEqual, sz)
296 |
297 | queue.FreeSubscribe()
298 | })
299 |
300 | }
301 |
302 | // TestFileQueue_FreeSubscribe to test free subscribe function
303 | func TestFileQueue_FreeSubscribe(t *testing.T) {
304 | path := Tempfile()
305 | clearFiles(path, "testqueue")
306 | defer clearFiles(path, "testqueue")
307 |
308 | Convey("TestFileQueue_FreeSubscribe", t, func() {
309 |
310 | i := 0
311 | var queue = new(FileQueue)
312 |
313 | err := queue.Open(path, "testqueue", nil)
314 |
315 | queue.Subscribe(func(index int64, bb []byte, err error) {
316 | i++
317 | })
318 |
319 | So(err, ShouldBeNil)
320 | defer queue.Close()
321 |
322 | queue.FreeSubscribe()
323 | sz := 10
324 | // no longer receive subscrbie callback
325 | enqueue(queue, []byte("hello xiemalin中文"), sz, t)
326 |
327 | SoMsg(fmt.Sprintf("subscribe count should be 0, but actually is %d", i), i, ShouldEqual, 0)
328 |
329 | })
330 |
331 | }
332 |
333 | // TestFileQueue_FreeSubscribe_MidCycle to test free subscribe function in the middle of cycle
334 | func TestFileQueue_FreeSubscribe_MidCycle(t *testing.T) {
335 | path := Tempfile()
336 | clearFiles(path, "testqueue")
337 | defer clearFiles(path, "testqueue")
338 |
339 | Convey("TestFileQueue_FreeSubscribe_MidCycle", t, func() {
340 | i := 0
341 | var queue = new(FileQueue)
342 |
343 | err := queue.Open(path, "testqueue", nil)
344 |
345 | var wg sync.WaitGroup
346 | wg.Add(5)
347 |
348 | queue.Subscribe(func(index int64, bb []byte, err error) {
349 | defer wg.Done()
350 | i++
351 | if i == 5 {
352 | queue.FreeSubscribe()
353 | }
354 | })
355 |
356 | if err != nil {
357 | fmt.Println(err)
358 | }
359 | defer queue.Close()
360 |
361 | sz := 10
362 | doEnqueue(queue, []byte("hello xiemalin中文"), sz, t)
363 |
364 | wg.Wait()
365 |
366 | So(queue.Size(), ShouldBeLessThanOrEqualTo, 5)
367 | })
368 |
369 | }
370 |
371 | // TestFileQueue_PeekAll
372 | func TestFileQueue_PeekAll(t *testing.T) {
373 | path := Tempfile()
374 | clearFiles(path, "testqueue")
375 | defer clearFiles(path, "testqueue")
376 |
377 | Convey("TestFileQueue_Subscribe", t, func() {
378 |
379 | var queue = new(FileQueue)
380 |
381 | err := queue.Open(path, "testqueue", nil)
382 | So(err, ShouldBeNil)
383 | defer queue.Close()
384 |
385 | sz := 10
386 | // no longer receive subscrbie callback
387 | enqueue(queue, []byte("hello xiemalin中文"), sz, t)
388 |
389 | r, indexs, err := queue.PeekAll()
390 | So(err, ShouldBeNil)
391 | So(len(r), ShouldEqual, 10)
392 | So(len(indexs), ShouldEqual, 10)
393 |
394 | })
395 |
396 | }
397 |
398 | // TestFileQueue_Status
399 | func TestFileQueue_Status(t *testing.T) {
400 | Convey("Test empty queue status result", t, func() {
401 | path := Tempfile()
402 | clearFiles(path, "testqueue")
403 | var queue = new(FileQueue)
404 |
405 | err := queue.Open(path, "testqueue", nil)
406 | So(err, ShouldBeNil)
407 | defer queue.Close()
408 | defer clearFiles(path, "testqueue")
409 |
410 | qFileStatus := queue.Status()
411 |
412 | So(qFileStatus, ShouldNotBeNil)
413 | So(qFileStatus.FrontIndex, ShouldEqual, 0)
414 | So(qFileStatus.HeadIndex, ShouldEqual, 0)
415 | So(qFileStatus.TailIndex, ShouldEqual, 0)
416 | So(qFileStatus.HeadDataPageIndex, ShouldEqual, 0)
417 | So(qFileStatus.HeadDataItemOffset, ShouldEqual, 0)
418 |
419 | So(len(qFileStatus.IndexFileList), ShouldEqual, 1)
420 | So(len(qFileStatus.DataFileList), ShouldEqual, 0)
421 | So(qFileStatus.MetaFileInfo, ShouldNotBeNil)
422 | So(qFileStatus.FrontFileInfo, ShouldNotBeNil)
423 |
424 | })
425 |
426 | Convey("Test non-empty queue status result", t, func() {
427 | path := Tempfile()
428 |
429 | var queue = new(FileQueue)
430 |
431 | err := queue.Open(path, "testqueue", nil)
432 | if err != nil {
433 | t.Error(err)
434 | }
435 | defer queue.Close()
436 | defer clearFiles(path, "testqueue")
437 |
438 | data := []byte("hello xmatthew")
439 | dataLen := len(data)
440 |
441 | queue.Enqueue(data)
442 | queue.Dequeue()
443 |
444 | qFileStatus := queue.Status()
445 |
446 | So(qFileStatus, ShouldNotBeNil)
447 | So(qFileStatus.FrontIndex, ShouldEqual, 1)
448 | So(qFileStatus.HeadIndex, ShouldEqual, 1)
449 | So(qFileStatus.TailIndex, ShouldEqual, 0)
450 | So(qFileStatus.HeadDataPageIndex, ShouldEqual, 0)
451 | So(qFileStatus.HeadDataItemOffset, ShouldEqual, dataLen)
452 |
453 | So(len(qFileStatus.IndexFileList), ShouldEqual, 1)
454 |
455 | fileInfo := qFileStatus.IndexFileList[0]
456 | So(fileInfo.CanGC, ShouldBeFalse)
457 | So(fileInfo.FileIndex, ShouldEqual, 0)
458 |
459 | So(len(qFileStatus.DataFileList), ShouldEqual, 1)
460 | fileInfo = qFileStatus.IndexFileList[0]
461 | So(fileInfo.CanGC, ShouldBeFalse)
462 | So(fileInfo.FileIndex, ShouldEqual, 0)
463 | So(qFileStatus.MetaFileInfo, ShouldNotBeNil)
464 | So(qFileStatus.FrontFileInfo, ShouldNotBeNil)
465 |
466 | // after gc
467 | queue.Enqueue(data)
468 | queue.Dequeue()
469 | queue.Gc()
470 | qFileStatus = queue.Status()
471 | So(qFileStatus.TailIndex, ShouldEqual, 1)
472 |
473 | })
474 |
475 | }
476 |
477 | // TestFileQueue_PeekPagination
478 | func TestFileQueue_PeekPagination(t *testing.T) {
479 | Convey("Test PeekPagination", t, func() {
480 |
481 | path := Tempfile()
482 | clearFiles(path, "testqueue")
483 |
484 | var queue = new(FileQueue)
485 |
486 | err := queue.Open(path, "testqueue", nil)
487 | if err != nil {
488 | t.Error(err)
489 | }
490 | defer queue.Close()
491 | defer clearFiles(path, "testqueue")
492 | Convey("test PeekPagination on empty queue", func() {
493 | data, indexs, err := queue.PeekPagination(0, 0)
494 | So(err, ShouldBeNil)
495 | So(data, ShouldBeEmpty)
496 | So(indexs, ShouldBeEmpty)
497 |
498 | data, indexs, err = queue.PeekPagination(1, 1)
499 | So(err, ShouldBeNil)
500 | So(data, ShouldBeEmpty)
501 | So(indexs, ShouldBeEmpty)
502 | })
503 |
504 | Convey("test PeekPagination on items small than pagesize", func() {
505 | for i := 0; i < 5; i++ { // add value
506 | _, err := queue.Enqueue([]byte("hello matthew " + strconv.Itoa(i)))
507 | So(err, ShouldBeNil)
508 | }
509 |
510 | data, indexs, err := queue.PeekPagination(0, 0)
511 | So(err, ShouldBeNil)
512 | So(len(data), ShouldEqual, 5)
513 | So(string(data[4]), ShouldEqual, "hello matthew 4")
514 | So(len(indexs), ShouldEqual, 5)
515 |
516 | data, indexs, err = queue.PeekPagination(1, 10)
517 | So(err, ShouldBeNil)
518 | So(len(data), ShouldEqual, 5)
519 | So(string(data[4]), ShouldEqual, "hello matthew 4")
520 | So(len(indexs), ShouldEqual, 5)
521 |
522 | data, indexs, err = queue.PeekPagination(2, 10) // large paing
523 | So(err, ShouldBeNil)
524 | So(data, ShouldBeEmpty)
525 | So(indexs, ShouldBeEmpty)
526 |
527 | data, indexs, err = queue.PeekPagination(2, 2)
528 | So(err, ShouldBeNil)
529 | So(len(data), ShouldEqual, 2)
530 | So(string(data[1]), ShouldEqual, "hello matthew 3")
531 | So(len(indexs), ShouldEqual, 2)
532 | })
533 |
534 | })
535 | }
536 |
537 | func TestMultiGoroutinesEnqueueDequeue(t *testing.T) {
538 | path := Tempfile()
539 | clearFiles(path, "testqueue")
540 | defer clearFiles(path, "testqueue")
541 |
542 | enqueueResult := make(chan int, 10)
543 | dequeueResult := make(chan int, 10)
544 |
545 | Convey("TestMultiGoroutinesEnqueueDequeue", t, func() {
546 | var queue = new(FileQueue)
547 | err := queue.Open(path, "testqueue", nil)
548 | So(err, ShouldBeNil)
549 | defer queue.Close()
550 |
551 | syncEnqueu := func(q Queue) {
552 | q.Enqueue([]byte{1, 2})
553 | enqueueResult <- 0
554 | }
555 | for i := 0; i < 10; i++ {
556 | go syncEnqueu(queue)
557 | }
558 |
559 | syncDequeu := func(q Queue) {
560 | q.Dequeue()
561 | dequeueResult <- 0
562 | }
563 | for i := 0; i < 10; i++ {
564 | go syncDequeu(queue)
565 | }
566 |
567 | enqueueSize := 0
568 | dequeueSize := 0
569 | for {
570 | select {
571 | case <-enqueueResult:
572 | enqueueSize++
573 | case <-dequeueResult:
574 | dequeueSize++
575 | }
576 |
577 | if enqueueSize == 10 && dequeueSize == 10 {
578 | return
579 | }
580 | }
581 |
582 | })
583 | }
584 |
585 | // tempfile returns a temporary file path.
586 | func Tempfile() string {
587 | return "./bin/temp"
588 | }
589 |
590 | func enqueue(queue Queue, content []byte, size int, t *testing.T) {
591 | doEnqueue(queue, content, size, t)
592 |
593 | sz := queue.Size()
594 | So(sz, ShouldEqual, size)
595 | }
596 |
597 | func doEnqueue(queue Queue, content []byte, size int, t *testing.T) {
598 | for i := 0; i < size; i++ {
599 | idx, err := queue.Enqueue(content)
600 | So(err, ShouldBeNil)
601 |
602 | So(idx, ShouldEqual, i)
603 | }
604 | }
605 |
606 | func dequeue(queue Queue, expectContent []byte, expectSize int, t *testing.T) {
607 | count := 0
608 | // enqueue 10 items
609 | for i := 0; i < expectSize; i++ {
610 | idx, bb, err := queue.Dequeue()
611 |
612 | So(err, ShouldBeNil)
613 | So(idx, ShouldNotEqual, -1)
614 | count++
615 | So(expectContent, ShouldResemble, bb)
616 | }
617 |
618 | So(count, ShouldEqual, expectSize)
619 |
620 | }
621 |
622 | func dequeueEmpty(queue Queue, t *testing.T) {
623 | idx, _, err := queue.Dequeue()
624 |
625 | So(err, ShouldBeNil)
626 | So(idx, ShouldEqual, -1)
627 | }
628 |
629 | func clearFiles(path string, queueName string) {
630 | RemoveFiles(path + "/" + queueName + "/" + DataFileName)
631 | RemoveFiles(path + "/" + queueName + "/" + FrontFileName)
632 | RemoveFiles(path + "/" + queueName + "/" + IndexFileName)
633 | RemoveFiles(path + "/" + queueName + "/" + MetaFileName)
634 | }
635 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/jhunters/bigqueue
2 |
3 | go 1.13
4 |
5 | require github.com/smartystreets/goconvey v1.7.2
6 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
2 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
3 | github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
4 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
5 | github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
6 | github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
7 | github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
8 | github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
9 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
10 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
11 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
12 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
13 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
14 |
--------------------------------------------------------------------------------
/mmap.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync"
7 | )
8 |
9 | // maxMapSize represents the largest mmap size supported by Bolt.
10 | const maxMapSize = 0x7FFFFFFF // 2GB
11 |
12 | // maxAllocSize is the size used when creating array pointers.
13 | const maxAllocSize = 0xFFFFFFF
14 |
15 | // Are unaligned load/stores broken on this arch?
16 | var brokenUnaligned = false
17 |
18 | // DB represents a collection of buckets persisted to a file on disk.
19 | // All data access is performed through transactions which can be obtained through the DB.
20 | // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
21 | type DB struct {
22 | // If you want to read the entire database fast, you can set MmapFlag to
23 | // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
24 | MmapFlags int
25 |
26 | path string
27 | file *os.File
28 | dataref []byte // mmap'ed readonly, write throws SEGV
29 |
30 | data *[maxMapSize]byte
31 |
32 | InitialMmapSize int
33 |
34 | opened bool
35 |
36 | ops struct {
37 | writeAt func(b []byte, off int64) (n int, err error)
38 | }
39 |
40 | mmaplock sync.RWMutex // Protects mmap access during remapping.
41 | }
42 |
43 | // init creates a new database file and initializes its meta pages.
44 | func (db *DB) init() error {
45 |
46 | buf := make([]byte, db.InitialMmapSize)
47 |
48 | // Write the buffer to our data file.
49 | if _, err := db.ops.writeAt(buf, 0); err != nil {
50 | return err
51 | }
52 | if err := fdatasync(db); err != nil {
53 | return err
54 | }
55 |
56 | return nil
57 | }
58 |
59 | // Open open target db file
60 | func (db *DB) Open(mode os.FileMode) error {
61 |
62 | flag := os.O_RDWR
63 | // Open data file and separate sync handler for metadata writes.
64 | var err error
65 |
66 | if db.file, err = os.OpenFile(db.Path(), flag|os.O_CREATE, mode); err != nil {
67 | _ = db.close()
68 | return err
69 | }
70 |
71 | // Default values for test hooks
72 | db.ops.writeAt = db.file.WriteAt
73 |
74 | // Initialize the database if it doesn't exist.
75 | if info, err := db.file.Stat(); err != nil {
76 | return err
77 | } else if info.Size() == 0 {
78 | // Initialize new files with meta pages.
79 | if err := db.init(); err != nil {
80 | return err
81 | }
82 | }
83 |
84 | // Memory map the data file.
85 | if err := db.mmap(db.InitialMmapSize); err != nil {
86 | _ = db.close()
87 | return err
88 | }
89 |
90 | // Mark the database as opened and return.
91 | return nil
92 | }
93 |
94 | // mmap opens the underlying memory-mapped file and initializes the meta references.
95 | // minsz is the minimum size that the new mmap can be.
96 | func (db *DB) mmap(minsz int) error {
97 | db.mmaplock.Lock()
98 | defer db.mmaplock.Unlock()
99 |
100 | info, err := db.file.Stat()
101 | if err != nil {
102 | return fmt.Errorf("mmap stat error: %s", err)
103 | }
104 |
105 | // Ensure the size is at least the minimum size.
106 | var size = int(info.Size())
107 | if size < minsz {
108 | size = minsz
109 | }
110 |
111 | // Unmap existing data before continuing.
112 | if err := db.munmap(); err != nil {
113 | return err
114 | }
115 |
116 | // Memory-map the data file as a byte slice.
117 | if err := mmap(db, size); err != nil {
118 | return err
119 | }
120 |
121 | return nil
122 | }
123 |
124 | // munmap unmaps the data file from memory.
125 | func (db *DB) munmap() error {
126 | if err := munmap(db); err != nil {
127 | return fmt.Errorf("unmap error: " + err.Error())
128 | }
129 | return nil
130 | }
131 |
132 | // Path returns the path to currently open database file.
133 | func (db *DB) Path() string {
134 | return db.path
135 | }
136 |
137 | // GoString returns the Go string representation of the database.
138 | func (db *DB) GoString() string {
139 | return fmt.Sprintf("bigqueue.DB{path:%q}", db.path)
140 | }
141 |
142 | // Close releases all resources.
143 | func (db *DB) Close() error {
144 | db.mmaplock.RLock()
145 | defer db.mmaplock.RUnlock()
146 |
147 | return db.close()
148 | }
149 |
150 | func (db *DB) close() error {
151 | if !db.opened {
152 | return nil
153 | }
154 |
155 | db.opened = false
156 |
157 | fdatasync(db)
158 |
159 | // Clear ops.
160 | db.ops.writeAt = nil
161 |
162 | // Close the mmap.
163 | if err := db.munmap(); err != nil {
164 | return err
165 | }
166 |
167 | // Close file handles.
168 | if db.file != nil {
169 | // Close the file descriptor.
170 | if err := db.file.Close(); err != nil {
171 | return fmt.Errorf("db file close: %s", err)
172 | }
173 | db.file = nil
174 | }
175 |
176 | db.path = ""
177 | return nil
178 | }
179 |
--------------------------------------------------------------------------------
/mmap_darwin.go:
--------------------------------------------------------------------------------
1 | // +build darwin
2 |
3 | package bigqueue
4 |
5 | import (
6 | "fmt"
7 | "syscall"
8 | "unsafe"
9 | )
10 |
11 | // fdatasync flushes written data to a file descriptor.
12 | func fdatasync(db *DB) error {
13 | return syscall.Fsync(int(db.file.Fd()))
14 | }
15 |
16 | // mmap memory maps a DB's data file.
17 | func mmap(db *DB, sz int) error {
18 | // Map the data file to memory.
19 | b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_WRITE|syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
20 | if err != nil {
21 | return err
22 | }
23 |
24 | // Advise the kernel that the mmap is accessed randomly.
25 | if err := madvise(b, syscall.MADV_RANDOM); err != nil {
26 | return fmt.Errorf("madvise: %s", err)
27 | }
28 |
29 | // Save the original byte slice and convert to a byte array pointer.
30 | db.dataref = b
31 | db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
32 | return nil
33 | }
34 |
35 | // munmap unmaps a DB's data file from memory.
36 | func munmap(db *DB) error {
37 | // Ignore the unmap if we have no mapped data.
38 | if db.dataref == nil {
39 | return nil
40 | }
41 |
42 | // Unmap using the original byte slice.
43 | err := syscall.Munmap(db.dataref)
44 | db.data = nil
45 | return err
46 | }
47 |
48 | // NOTE: This function is copied from stdlib because it is not available on darwin.
49 | func madvise(b []byte, advice int) (err error) {
50 | _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
51 | if e1 != 0 {
52 | err = e1
53 | }
54 | return
55 | }
56 |
--------------------------------------------------------------------------------
/mmap_linux.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "syscall"
6 | "unsafe"
7 | )
8 |
9 | // fdatasync flushes written data to a file descriptor.
10 | func fdatasync(db *DB) error {
11 | return syscall.Fdatasync(int(db.file.Fd()))
12 | }
13 |
14 | // mmap memory maps a DB's data file.
15 | func mmap(db *DB, sz int) error {
16 | // Map the data file to memory.
17 | b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_WRITE|syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
18 | if err != nil {
19 | return err
20 | }
21 |
22 | // Advise the kernel that the mmap is accessed randomly.
23 | if err := madvise(b, syscall.MADV_RANDOM); err != nil {
24 | return fmt.Errorf("madvise: %s", err)
25 | }
26 |
27 | // Save the original byte slice and convert to a byte array pointer.
28 | db.dataref = b
29 | db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
30 | return nil
31 | }
32 |
33 | // munmap unmaps a DB's data file from memory.
34 | func munmap(db *DB) error {
35 | // Ignore the unmap if we have no mapped data.
36 | if db.dataref == nil {
37 | return nil
38 | }
39 |
40 | // Unmap using the original byte slice.
41 | err := syscall.Munmap(db.dataref)
42 | db.data = nil
43 | return err
44 | }
45 |
46 | // NOTE: This function is copied from stdlib because it is not available on darwin.
47 | func madvise(b []byte, advice int) (err error) {
48 | _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
49 | if e1 != 0 {
50 | err = e1
51 | }
52 | return
53 | }
54 |
--------------------------------------------------------------------------------
/mmap_test.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "os"
5 | "testing"
6 |
7 | . "github.com/smartystreets/goconvey/convey"
8 | )
9 |
10 | func Test_MmapWriteAt(t *testing.T) {
11 | path := Tempfile() + "/test.dat"
12 | defer os.Remove(path)
13 |
14 | Convey("Test_MmapWriteAt", t, func() {
15 | db := &DB{
16 | path: path,
17 | InitialMmapSize: 128 * 1024,
18 | opened: true,
19 | }
20 |
21 | err := db.Open(0666)
22 | So(err, ShouldBeNil)
23 |
24 | defer db.Close()
25 |
26 | s := "hello xiemalin"
27 | db.file.Write([]byte(s))
28 |
29 | v := db.data[:len(s)]
30 | So(s, ShouldEqual, string(v))
31 | })
32 |
33 | }
34 |
35 | func Test_MmapWriteAt1(t *testing.T) {
36 | path := Tempfile() + "/test.dat"
37 | defer os.Remove(path)
38 |
39 | Convey("Test_MmapWriteAt1", t, func() {
40 | db := &DB{
41 | path: path,
42 | InitialMmapSize: 128 * 1024,
43 | opened: true,
44 | }
45 |
46 | err := db.Open(0666)
47 | So(err, ShouldBeNil)
48 |
49 | s := "hello xiemalin"
50 | bb := []byte(s)
51 | // for i := 0; i < len(bb); i++ {
52 | // db.data[i] = bb[i]
53 | // }
54 | copy(db.data[:len(bb)], bb)
55 |
56 | db.Close()
57 |
58 | db = &DB{
59 | path: path,
60 | InitialMmapSize: 128 * 1024,
61 | opened: true,
62 | }
63 | //re-open
64 | err = db.Open(0666)
65 | So(err, ShouldBeNil)
66 | defer db.Close()
67 |
68 | s = "hello xiemalin"
69 | v := db.data[:len(s)]
70 | So(s, ShouldEqual, string(v))
71 | })
72 |
73 | }
74 |
--------------------------------------------------------------------------------
/mmap_windows.go:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: Malin Xie
3 | * @Description:
4 | * @Date: 2020-08-28 20:06:46
5 | */
6 | package bigqueue
7 |
8 | import (
9 | "os"
10 | "syscall"
11 | "unsafe"
12 | )
13 |
14 | // LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
15 | var (
16 | modkernel32 = syscall.NewLazyDLL("kernel32.dll")
17 | procLockFileEx = modkernel32.NewProc("LockFileEx")
18 | procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
19 | )
20 |
21 | const (
22 | // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
23 | flagLockExclusive = 2
24 | flagLockFailImmediately = 1
25 |
26 | // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
27 | errLockViolation syscall.Errno = 0x21
28 | )
29 |
30 | // fdatasync flushes written data to a file descriptor.
31 | func fdatasync(db *DB) error {
32 | return db.file.Sync()
33 | }
34 |
35 | // mmap memory maps a DB's data file.
36 | // Based on: https://github.com/edsrzf/mmap-go
37 | func mmap(db *DB, sz int) error {
38 | // Open a file mapping handle.
39 | sizelo := uint32(sz >> 32)
40 | sizehi := uint32(sz) & 0xffffffff
41 | h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READWRITE, sizelo, sizehi, nil)
42 | if h == 0 {
43 | return os.NewSyscallError("CreateFileMapping", errno)
44 | }
45 |
46 | // Create the memory map.
47 | addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_WRITE, 0, 0, uintptr(sz))
48 | if addr == 0 {
49 | return os.NewSyscallError("MapViewOfFile", errno)
50 | }
51 |
52 | // Close mapping handle.
53 | if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
54 | return os.NewSyscallError("CloseHandle", err)
55 | }
56 |
57 | // Convert to a byte array.
58 | db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr))
59 | return nil
60 | }
61 |
62 | // munmap unmaps a pointer from a file.
63 | // Based on: https://github.com/edsrzf/mmap-go
64 | func munmap(db *DB) error {
65 | if db.data == nil {
66 | return nil
67 | }
68 |
69 | addr := (uintptr)(unsafe.Pointer(&db.data[0]))
70 | if err := syscall.UnmapViewOfFile(addr); err != nil {
71 | return os.NewSyscallError("UnmapViewOfFile", err)
72 | }
73 | return nil
74 | }
75 |
--------------------------------------------------------------------------------
/mmapfactory.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "os"
7 | "strconv"
8 | "strings"
9 | "sync"
10 | )
11 |
12 | // DBFactory is used to manupilate mulitple data files by index number
13 | type DBFactory struct {
14 | lockMap map[int64]*sync.Mutex
15 |
16 | // DB mapping with file index no
17 | dbMap map[int64]*DB
18 |
19 | filePrefix string
20 |
21 | fileSuffix string
22 |
23 | lock sync.Mutex
24 |
25 | filePath string
26 |
27 | InitialMmapSize int
28 | }
29 |
30 | func (f *DBFactory) acquireDB(index int64) (*DB, error) {
31 | // add map lock
32 | f.lock.Lock()
33 | db := f.dbMap[index]
34 | if db != nil {
35 | f.lock.Unlock()
36 | return db, nil
37 | }
38 |
39 | lock := f.lockMap[index]
40 | if lock == nil {
41 | lock = &(sync.Mutex{})
42 | f.lockMap[index] = lock
43 | }
44 | defer func() {
45 | delete(f.lockMap, index)
46 | }()
47 | f.lock.Unlock()
48 |
49 | // lock by index
50 | lock.Lock()
51 | defer lock.Unlock()
52 |
53 | db = &DB{
54 | path: f.getFilePath(index),
55 | InitialMmapSize: f.InitialMmapSize,
56 | opened: true,
57 | }
58 |
59 | err := db.Open(defaultFileMode)
60 | if err != nil {
61 | return nil, err
62 | }
63 | f.dbMap[index] = db
64 | return db, nil
65 | }
66 |
67 | func (f *DBFactory) getFilePath(index int64) string {
68 | return f.filePath + "/" + GetFileName(f.filePrefix, f.fileSuffix, index)
69 | }
70 |
71 | // Close all data files
72 | func (f *DBFactory) Close() error {
73 | if f.dbMap != nil {
74 | for k, v := range f.dbMap {
75 | err := v.Close()
76 | if err != nil {
77 | log.Println("Close DB from map failed. ", k, err)
78 | }
79 | }
80 | }
81 |
82 | // set to the emtpy map
83 | f.dbMap = make(map[int64]*DB)
84 | f.lockMap = make(map[int64]*sync.Mutex)
85 |
86 | return nil
87 |
88 | }
89 |
90 | func (f *DBFactory) removeBeforeIndex(index int64) error {
91 |
92 | f.lock.Lock()
93 | defer f.lock.Unlock()
94 |
95 | for idx, db := range f.dbMap {
96 | if int64(idx) < index {
97 | log.Println("Do delete index db file by gc. no=", idx)
98 |
99 | db.Close()
100 | os.Remove(f.getFilePath(idx))
101 | delete(f.dbMap, idx)
102 | }
103 | }
104 |
105 | // double check delete file
106 | files, err := GetFiles(f.filePath)
107 | if err != nil {
108 | return err
109 | }
110 | for i := files.Front(); i != nil; i = i.Next() {
111 | fn := fmt.Sprintf("%v", i.Value)
112 | if strings.HasSuffix(fn, f.fileSuffix) {
113 | fin := f.getFileIndex(fn)
114 | if fin >= 0 && int64(fin) < index {
115 | log.Println("Do delete index db file by gc. no=", fin)
116 | os.Remove(f.getFilePath(fin))
117 | }
118 | }
119 |
120 | }
121 |
122 | return nil
123 | }
124 |
125 | func (f *DBFactory) getFileIndex(fn string) int64 {
126 | beginIndex := strings.LastIndex(fn, "-")
127 | beginIndex = beginIndex + 1
128 |
129 | endIndex := strings.LastIndex(fn, f.fileSuffix)
130 |
131 | sIndex, err := strconv.Atoi(fn[beginIndex:endIndex])
132 | if err != nil {
133 | return -1
134 | }
135 |
136 | return int64(sIndex)
137 | }
138 |
--------------------------------------------------------------------------------
/options.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | // Options the options struct
4 | type Options struct {
5 |
6 | // size in bytes of a data page
7 | DataPageSize int
8 |
9 | // size in bytes of a index page
10 | indexPageSize int
11 |
12 | // the item count is 1 << IndexItemsPerPage
13 | IndexItemsPerPage int
14 |
15 | itemsPerPage int
16 |
17 | // if value > 0 then enable auto gc features and repeat process gc by the specified interval time in seconds.
18 | AutoGCBySeconds int
19 | }
20 |
--------------------------------------------------------------------------------
/queue.go:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: Malin Xie
3 | * @Description:
4 | * @Date: 2020-08-28 20:06:46
5 | */
6 | package bigqueue
7 |
8 | // Queue inteface to define the all necessary functions
9 | type Queue interface {
10 | // Determines whether a queue is empty
11 | // return ture if empty, false otherwise
12 | IsEmpty() bool
13 |
14 | // return avaiable queue size
15 | Size() int64
16 |
17 | // Append an item to the queue and return index no
18 | // if any error ocurres a non-nil error returned
19 | Enqueue(data []byte) (int64, error)
20 |
21 | EnqueueAsync(data []byte, fn func(int64, error))
22 |
23 | Dequeue() (int64, []byte, error)
24 |
25 | Peek() (int64, []byte, error)
26 |
27 | // To skip deqeue target number of items
28 | Skip(count int64) error
29 |
30 | Close() error
31 |
32 | // Delete all used data files to free disk space.
33 | Gc() error
34 |
35 | // Set to asynchous subscribe
36 | Subscribe(fn func(int64, []byte, error)) error
37 |
38 | // to free asynchous subscribe
39 | FreeSubscribe()
40 | }
41 |
42 | // I/O queue inteface to define the all necessary functions
43 | type IOQueue interface {
44 | Queue
45 |
46 | // Open queue from file io info
47 | Open(dir string, queueName string, options *Options) error
48 | }
49 |
50 | // RemoteQueue remote server queue inteface to define the all necessary functions
51 | type RemoteQueue interface {
52 | Queue
53 |
54 | // Open queue from remote server
55 | Open(serverUrl string, queueName string)
56 | }
57 |
--------------------------------------------------------------------------------
/utils.go:
--------------------------------------------------------------------------------
1 | package bigqueue
2 |
3 | import (
4 | "bytes"
5 | "container/list"
6 | "encoding/binary"
7 | "fmt"
8 | "io/ioutil"
9 | "os"
10 | "runtime/debug"
11 | "strconv"
12 | "strings"
13 | )
14 |
15 | // Assert assert will panic with a given formatted message if the given condition is false.
16 | func Assert(condition bool, message string, v ...interface{}) {
17 | if !condition {
18 | panic(fmt.Sprintf("assertion failed: "+message, v...))
19 | }
20 | }
21 |
22 | // Warn print log to os.Stderr
23 | func Warn(v ...interface{}) {
24 | fmt.Fprintln(os.Stderr, v...)
25 | }
26 |
27 | // Warnf print log to os.Stderr
28 | func Warnf(msg string, v ...interface{}) {
29 | fmt.Fprintf(os.Stderr, msg+"\n", v...)
30 | }
31 |
32 | // Printstack print call stack trace info
33 | func Printstack() {
34 | stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
35 | fmt.Fprintln(os.Stderr, stack)
36 | }
37 |
38 | // PathExists to check the target path is exist
39 | // exist return true otherwise return false
40 | func PathExists(path string) (bool, error) {
41 | _, err := os.Stat(path)
42 | if err == nil {
43 | return true, nil
44 | }
45 | if os.IsNotExist(err) {
46 | return false, nil
47 | }
48 | return false, err
49 | }
50 |
51 | // GetFileName to return joined file name
52 | func GetFileName(prefix string, suffix string, index int64) string {
53 | return prefix + strconv.Itoa(int(index)) + suffix
54 | }
55 |
56 | // IntToBytes int64 to byte array
57 | func IntToBytes(n int64) []byte {
58 | x := int64(n)
59 | bytesBuffer := bytes.NewBuffer([]byte{})
60 | binary.Write(bytesBuffer, binary.BigEndian, x)
61 | return bytesBuffer.Bytes()
62 | }
63 |
64 | // BytesToInt byte to int64
65 | func BytesToInt(b []byte) int64 {
66 | bytesBuffer := bytes.NewBuffer(b)
67 |
68 | var x int64
69 | binary.Read(bytesBuffer, binary.BigEndian, &x)
70 |
71 | return int64(x)
72 | }
73 |
74 | // BytesToInt32 bytes to int32
75 | func BytesToInt32(b []byte) int32 {
76 | bytesBuffer := bytes.NewBuffer(b)
77 |
78 | var x int32
79 | binary.Read(bytesBuffer, binary.BigEndian, &x)
80 |
81 | return int32(x)
82 | }
83 |
84 | // Mod return
85 | func Mod(val int64, bits int) int64 {
86 | return val - ((val >> uint(bits)) << uint(bits))
87 | }
88 |
89 | // GetFiles get all files from current directory. not include any sub directories
90 | func GetFiles(pathname string) (*list.List, error) {
91 |
92 | files := list.New()
93 | rd, err := ioutil.ReadDir(pathname)
94 | for _, fi := range rd {
95 | if fi.IsDir() {
96 | continue
97 | } else {
98 | files.PushBack(fi.Name())
99 | }
100 | }
101 | return files, err
102 | }
103 |
104 | // RemoveFiles remove all files from current directory. not include any sub directories
105 | func RemoveFiles(pathname string) error {
106 | list, err := GetFiles(pathname)
107 | if err != nil {
108 | return err
109 | }
110 | for i := list.Front(); i != nil; i = i.Next() {
111 | fn := fmt.Sprintf("%v", i.Value)
112 | err = os.Remove(pathname + "/" + fn)
113 | if err != nil {
114 | return err
115 | }
116 | }
117 | // os.RemoveAll(pathname)
118 | return nil
119 | }
120 |
--------------------------------------------------------------------------------