├── .travis.yml ├── LICENSE ├── README.md └── queue ├── fstorage_engine.go ├── fstorage_gc.go ├── fstorage_io.go ├── fstorage_restore.go ├── fstorage_test.go ├── internal ├── logging │ └── logging.go └── mmap │ ├── mmap.go │ ├── mmap_test.go │ ├── mmap_unix.go │ ├── mmap_windows.go │ ├── msync_netbsd.go │ └── msync_unix.go ├── queue.go ├── queue_error.go ├── queue_inprocessing.go ├── queue_logging.go ├── queue_memory.go ├── queue_memory_test.go ├── queue_options.go ├── queue_test.go ├── queue_tools.go └── worker.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | sudo: false 3 | go: 4 | - tip 5 | before_install: 6 | - go get github.com/mattn/goveralls 7 | 8 | script: 9 | - $HOME/gopath/bin/goveralls -service=travis-ci -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Go Report Card](https://goreportcard.com/badge/github.com/sybrexsys/RapidMQ)](https://goreportcard.com/report/github.com/sybrexsys/RapidMQ) 2 | [![Build Status](https://travis-ci.org/sybrexsys/RapidMQ.svg?branch=master)](https://travis-ci.org/sybrexsys/RapidMQ) 3 | [![Coverage Status](https://coveralls.io/repos/github/sybrexsys/RapidMQ/badge.svg?branch=master)](https://coveralls.io/github/sybrexsys/RapidMQ?branch=master) 4 | [![GoDoc](https://godoc.org/github.com/sybrexsys/RapidMQ?status.png)](https://godoc.org/github.com/sybrexsys/RapidMQ) 5 | 6 | 7 | RapidMQ 8 | ======= 9 | RapidMQ is a pure, extremely productive, lightweight and reliable library for managing of the local messages queue in the [Go programming language](http:golang.org). 10 | 11 | Installation 12 | ----------- 13 | 14 | go get github.com/sybrexsys/RapidMQ/queue 15 | 16 | Requirements 17 | ----------- 18 | 19 | * Need at least `go1.4` or newer. 20 | 21 | Usage 22 | ----------- 23 | 24 | ***Queue*** 25 | 26 | Base structure in the base is Queue 27 | Queue is created with that function: 28 | 29 | ``` 30 | func CreateQueue(Name, StoragePath string, Log Logging, Factory WorkerFactory, Options *Options) (*Queue, error) 31 | ``` 32 | 33 | |Parameters | Type | Description 34 | |:----------------- |:-------------|:---------------------- 35 | |Name |string | Queue name. Used for logging only 36 | |StoragePath |string | Path to the disk storages' files 37 | |Log |Logging | Interface is used to logging of the queue's events. If equal to nil, logging is ensent. Description bellow 38 | |Factory |WorkerFactory | Interface for abstract factory of the workers. Description bellow 39 | |Options |*Options | Options of the queue 40 | 41 | ``` 42 | func (q *Queue) Insert(buf []byte) bool 43 | ``` 44 | Appends the message into the queue. In depends of the timeout's option either is trying to write message to the disk or is trying to process this message in the memory and writing to the disk only if timeout is expired shortly. Returns false if aren't processing / writing of the message in the during of the timeout or has some problems with writing to disk 45 | 46 | ``` 47 | func (q *Queue) Process(worker WorkerID, isOk bool) 48 | ``` 49 | That function must be called from the worker of the message. In depends of the `isOk` parameter either messages are deleting from the queue or are marking as faulty and again processing after some timeout 50 | 51 | ``` 52 | func (q *Queue) Count() uint64 53 | ``` 54 | Returns the count of the messages in the queue 55 | 56 | ``` 57 | func (q *Queue) Close() 58 | ``` 59 | Stops the handler of the messages, saves the messages located in the memory into the disk, closes all opened files. 60 | 61 | ***Message*** 62 | 63 | Description of the structure that will be sent to worker 64 | 65 | ``` 66 | type Message struct { 67 | ID StorageIdx 68 | Buffer []byte 69 | } 70 | ``` 71 | 72 | |Member | Type | Description 73 | |:----------------- |:-------------|:---------------------- 74 | | ID | StorageIdx | ID of the message 75 | | Buffer |[]byte | Buffer with content of the message 76 | 77 | 78 | 79 | 80 | ***WorkerFactory*** 81 | 82 | Worker factory is a structure that create workers for processing messages 83 | Your factory must support next interface: 84 | ``` 85 | type WorkerFactory interface { 86 | CreateWorker() Worker 87 | NeedTimeoutProcessing() bool 88 | } 89 | ``` 90 | 91 | ``` 92 | CreateWorker() Worker 93 | ``` 94 | Creates new worker for this factory with unique ID 95 | 96 | ``` 97 | NeedTimeoutProcessing() bool 98 | ``` 99 | Returns true if possible used some messages in one action (for example, collect large SQL script from lot of the small messages) 100 | 101 | 102 | 103 | ***Worker*** 104 | 105 | If you are using of your worker, he must support next interface 106 | ``` 107 | type Worker interface { 108 | ProcessMessage(*Queue, *Message, chan Worker) 109 | ProcessTimeout(*Queue, chan Worker) 110 | GetID() WorkerID 111 | Close() 112 | } 113 | ``` 114 | 115 | ``` 116 | ProcessMessage(*Queue, *Message, chan Worker) 117 | ``` 118 | Processes message that is stored in `*Message`. 119 | After it the worker must call function `(*Queue).Process` with his unique identifier and with result of the processing, also must be pushed himself into chanal `Worker` 120 | 121 | ``` 122 | ProcessTimeout(*Queue, chan Worker) 123 | ``` 124 | Processing of the event when available messages is absent 125 | After it the worker must call function `(*Queue).Process` with his unique identifier and with result of the processing, also must send himself into chanal `Worker` 126 | 127 | ``` 128 | GetID() WorkerID 129 | ``` 130 | Returns unique identifier of the worker 131 | 132 | ``` 133 | Close() 134 | ``` 135 | Close is called when queue is finishing work with worker. Here you can close connection to database or etc. 136 | 137 | 138 | ***Logging*** 139 | 140 | If you are using of your logging system, it must support next interface 141 | 142 | ``` 143 | type Logging interface { 144 | Trace(msg string, a ...interface{}) 145 | Info(msg string, a ...interface{}) 146 | Warning(msg string, a ...interface{}) 147 | Error(msg string, a ...interface{}) 148 | } 149 | ``` 150 | 151 | 152 | 153 | Author 154 | ------ 155 | ***Vadim Shakun:*** [vadim.shakun@gmail.com](mailto:vadim.shakun@gmail.com) 156 | 157 | License 158 | ------- 159 | RapidMQ is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. 160 | -------------------------------------------------------------------------------- /queue/fstorage_engine.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "hash/crc32" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | "reflect" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | "unsafe" 15 | 16 | "github.com/sybrexsys/RapidMQ/queue/internal/mmap" 17 | ) 18 | 19 | var startTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) 20 | 21 | // InvalidIdx id Invalid index description 22 | const InvalidIdx StorageIdx = 0xFFFFFFFFFFFFFFFF 23 | 24 | var magicNumberDataPrefix = uint32(0x67452301) 25 | var magicNumberDataSuffix = uint32(0xEFCDAB89) 26 | 27 | type fileAccess struct { 28 | *sync.Mutex 29 | Handle *os.File 30 | } 31 | 32 | //StorageIdx is unique identifier of the message in the memory or on the disk 33 | type StorageIdx uint64 34 | 35 | const ( 36 | stateEnable = iota 37 | stateInProcess 38 | stateFree 39 | ) 40 | 41 | const ( 42 | putRecordAsNew = iota 43 | putRecordAsInProcess 44 | putRecordAsProcessedWithError 45 | ) 46 | 47 | type storagePutter interface { 48 | put(buffer []byte, option int) (StorageIdx, error) 49 | UnlockRecord(Idx StorageIdx) error 50 | FreeRecord(Idx StorageIdx) error 51 | } 52 | 53 | var dot = [1]byte{0} 54 | 55 | // availableRecordInfo is Structure returned by GetNext function for receive information about available record 56 | type availableRecordInfo struct { 57 | Idx StorageIdx 58 | ID StorageIdx 59 | FileInfo *fileAccess 60 | 61 | FileOffset uint32 62 | Length int32 63 | } 64 | 65 | type indexFileHeader struct { 66 | MagicNumber uint64 67 | TotalRecord uint64 68 | TotalFree uint64 69 | MinIndex StorageIdx 70 | CRC int32 71 | IndexRecordSize int32 72 | } 73 | 74 | type indexRecord struct { 75 | ID StorageIdx 76 | FileIndex StorageIdx 77 | LastAction time.Duration 78 | State int32 79 | FileOffset uint32 80 | Length int32 81 | TryCount int32 82 | } 83 | 84 | var ( 85 | lastRecord = indexRecord{ 86 | FileIndex: InvalidIdx, 87 | FileOffset: 0xFFFFFFFF, 88 | Length: -1, 89 | State: -1, 90 | TryCount: -1, 91 | } 92 | 93 | emptyRecord = indexRecord{ 94 | FileIndex: InvalidIdx, 95 | FileOffset: 0xFFFFFFFF, 96 | Length: -1, 97 | State: -1, 98 | TryCount: -1, 99 | } 100 | ) 101 | 102 | // fileStorage is struct for present disk storage of the data 103 | type fileStorage struct { 104 | folder string // folder where locatd files of the storage 105 | log Logging // log file for store information about actions 106 | name string //name of then storage. At current time used for save to log file only 107 | options *StorageOptions // storage options 108 | readMutex sync.RWMutex // mutex for work with file handles for output from the sorage 109 | readFiles map[StorageIdx]*fileAccess // map for store read handles of the storage 110 | writeFiles *filequeue // list of the opened handles for writing 111 | idxFile *os.File 112 | mmapinfo mmap.MMap 113 | mmapsize int64 114 | idxMutex sync.Mutex 115 | idx []indexRecord 116 | operations uint32 117 | freeCounts map[StorageIdx]int64 118 | startIdx uint64 119 | lastTimeCheckDeletedFiles time.Duration 120 | timeout time.Duration 121 | notify newMessageNotificator 122 | immediatlyRelease int32 123 | *indexFileHeader 124 | } 125 | 126 | // newID appends new item into storage, test size of the mapped file and increase this value if not enough 127 | func (fs *fileStorage) newID() (StorageIdx, error) { 128 | if fs.TotalRecord == uint64(len(fs.idx)-1) { 129 | // Check possibility to move index for processed elements of the index 130 | if ok, err := fs.checkFreeIndexRecords(true); err == nil { 131 | if !ok { 132 | fs.mmapinfo.Unmap() 133 | indexFileSize, err := fs.calculateNextSize(fs.mmapsize + 1) 134 | fs.idxFile.Seek(indexFileSize-1, 0) 135 | fs.idxFile.Write(dot[0:1]) 136 | fs.mmapinfo, err = mmap.MapRegion(fs.idxFile, int(indexFileSize), mmap.RDWR, 0, 0) 137 | if err != nil { 138 | err1 := fs.idxFile.Close() 139 | if err1 != nil { 140 | return InvalidIdx, err1 141 | } 142 | return InvalidIdx, err 143 | } 144 | fs.mmapsize = indexFileSize 145 | fs.setMMapInfo() 146 | } 147 | } else { 148 | return InvalidIdx, err 149 | } 150 | } 151 | idx := StorageIdx(fs.TotalRecord) + fs.MinIndex 152 | fs.idx[fs.TotalRecord] = emptyRecord 153 | fs.TotalRecord++ 154 | fs.idx[fs.TotalRecord] = lastRecord 155 | return idx, nil 156 | } 157 | 158 | // RecordsSize returns size of the all available records 159 | func (fs *fileStorage) RecordsSize() uint64 { 160 | var total uint64 161 | fs.idxMutex.Lock() 162 | defer fs.idxMutex.Unlock() 163 | for i := fs.startIdx; i < fs.TotalRecord; i++ { 164 | if fs.idx[i].State != stateFree { 165 | total += uint64(fs.idx[i].Length) 166 | } 167 | } 168 | return total 169 | } 170 | 171 | // Count returns count of the all available records 172 | func (fs *fileStorage) Count() uint64 { 173 | fs.idxMutex.Lock() 174 | defer fs.idxMutex.Unlock() 175 | return uint64(fs.TotalRecord) - fs.TotalFree 176 | } 177 | 178 | // description is used in logging system for detect source of the processing message 179 | func (fs *fileStorage) description() string { 180 | return "disk storage" 181 | } 182 | 183 | //GetNext returns information about next available record and mark this record as InProgress 184 | //Possible unmark this record as Enabled with UnlockRecord function or 185 | // mark record as free with FreeRecord function 186 | func (fs *fileStorage) getNext() (*availableRecordInfo, error) { 187 | firstSkiped := InvalidIdx 188 | lastFree := InvalidIdx 189 | NextDuration := time.Duration(0x7FFFFFFFFFFFFFFF) 190 | fs.idxMutex.Lock() 191 | defer fs.idxMutex.Unlock() 192 | checkStartIdx := true 193 | for i := fs.startIdx; i < fs.TotalRecord; i++ { 194 | if checkStartIdx && fs.idx[i].State != stateFree { 195 | fs.startIdx = i 196 | checkStartIdx = false 197 | } 198 | if fs.idx[i].State == stateFree && firstSkiped != InvalidIdx { 199 | lastFree = StorageIdx(i) 200 | } 201 | if fs.idx[i].State == stateEnable { 202 | if fs.options.SkipReturnedRecords && fs.idx[i].TryCount > 0 { 203 | //Check record to overdue timeout to current time 204 | CurrentTime := time.Since(startTime) 205 | TmpDuration := fs.idx[i].LastAction + time.Duration(fs.options.SkipDelayPerTry)* 206 | time.Duration(fs.idx[i].TryCount)*time.Millisecond 207 | if TmpDuration >= CurrentTime { 208 | if TmpDuration < NextDuration { 209 | NextDuration = TmpDuration 210 | } 211 | if firstSkiped == InvalidIdx { 212 | firstSkiped = StorageIdx(i) 213 | } 214 | continue 215 | } 216 | } 217 | if lastFree != InvalidIdx { 218 | fs.idx[lastFree] = fs.idx[firstSkiped] 219 | fs.idx[firstSkiped].State = stateFree 220 | } 221 | tmp := &availableRecordInfo{ 222 | Idx: fs.MinIndex + StorageIdx(i), 223 | FileOffset: fs.idx[i].FileOffset, 224 | Length: fs.idx[i].Length, 225 | ID: fs.idx[i].ID, 226 | } 227 | FileIdx := fs.idx[i].FileIndex 228 | file, err := fs.getReadHandle(tmp.ID, FileIdx) 229 | if err != nil { 230 | return nil, err 231 | } 232 | fs.idx[i].State = stateInProcess 233 | tmp.FileInfo = file 234 | fs.checkFlush() 235 | return tmp, nil 236 | } 237 | } 238 | // Check to messages with timeout and return nearest time of the ability of the recordd 239 | if NextDuration != 0x7FFFFFFFFFFFFFFF { 240 | return nil, &queueError{ 241 | ErrorType: errorInDelay, 242 | NextAvailable: NextDuration - time.Since(startTime), 243 | } 244 | } 245 | // return error {No more} and timestamp of the checking 246 | return nil, &queueError{ 247 | ErrorType: errorNoMore, 248 | NextAvailable: time.Since(startTime), 249 | } 250 | } 251 | 252 | func (fs *fileStorage) getReadHandle(recID, fileIdx StorageIdx) (*fileAccess, error) { 253 | // get open file handle for read record 254 | fs.readMutex.RLock() 255 | file, ok := fs.readFiles[fileIdx] 256 | fs.readMutex.RUnlock() 257 | var err error 258 | if !ok { 259 | file = &fileAccess{} 260 | file.Handle, err = os.Open(fs.folder + dataFileNameByID(fileIdx)) 261 | if err != nil { 262 | fs.log.Error("[QFS:%s:%d] Cannot open datafile {%d} Error:[%s]", fs.name, recID, fileIdx, err.Error()) 263 | return nil, err 264 | } 265 | fs.log.Trace("[QFS:%s:%d] Was opened datafile {%d}", fs.name, recID, fileIdx) 266 | file.Mutex = &sync.Mutex{} 267 | fs.readMutex.Lock() 268 | 269 | // Check count of the opened files and if exceed remove one open handle from the map 270 | if int16(len(fs.readFiles)) > fs.options.MaxOneTimeOpenedFiles { 271 | for k, vak := range fs.readFiles { 272 | vak.Lock() 273 | vak.Handle.Close() 274 | vak.Unlock() 275 | delete(fs.readFiles, k) 276 | break 277 | } 278 | } 279 | fs.readFiles[fileIdx] = file 280 | fs.readMutex.Unlock() 281 | } 282 | return file, nil 283 | } 284 | 285 | // Get returns next available record from the storage 286 | func (fs *fileStorage) Get() (*QueueItem, error) { 287 | var buf []byte 288 | 289 | for { 290 | ai, err := fs.getNext() 291 | if err != nil { 292 | return nil, err 293 | } 294 | // if is not valid message we remove it from queue 295 | if buf = fs.getValidRecord(ai.FileInfo, ai.ID, ai.FileOffset, ai.Length); buf == nil { 296 | err = fs.freeRecord(ai.Idx) 297 | if err != nil { 298 | return nil, err 299 | } 300 | continue 301 | } 302 | stream, err := bufToStream(buf) 303 | if err != nil { 304 | return nil, err 305 | } 306 | tmp := &QueueItem{ 307 | Stream: stream, 308 | ID: ai.ID, 309 | idx: ai.Idx, 310 | storage: fs, 311 | } 312 | return tmp, nil 313 | } 314 | } 315 | 316 | // UnlockRecord unmarks record with index as enabled for next select 317 | func (fs *fileStorage) UnlockRecord(Idx StorageIdx) error { 318 | fs.idxMutex.Lock() 319 | defer fs.idxMutex.Unlock() 320 | if Idx < fs.MinIndex || Idx-fs.MinIndex >= StorageIdx(fs.TotalRecord) { 321 | return errors.New("out of bound") 322 | } 323 | i := uint64(Idx - fs.MinIndex) 324 | if fs.idx[i].State == stateInProcess { 325 | fs.idx[i].State = stateEnable 326 | fs.idx[i].TryCount++ 327 | fs.idx[i].LastAction = time.Since(startTime) 328 | fs.checkFlush() 329 | } 330 | if fs.notify != nil { 331 | fs.notify.newMessageNotification() 332 | } 333 | return nil 334 | } 335 | 336 | // UnlockRecord unmarks record with index as enabled for next select 337 | func (fs *fileStorage) checkUnusedFiles() { 338 | for FileIdx, count := range fs.freeCounts { 339 | if count == 0 && fs.writeFiles.canClear(FileIdx) { 340 | fs.log.Trace("[QFS:%s] Delete datafile {%d} because he does not contain any data", fs.name, FileIdx) 341 | fs.readMutex.Lock() 342 | file, ok := fs.readFiles[FileIdx] 343 | if ok { 344 | err := file.Handle.Close() 345 | if err != nil { 346 | fs.log.Error("Try close file handle with errors %s ", err.Error()) 347 | } 348 | file.Handle = nil 349 | delete(fs.readFiles, FileIdx) 350 | } 351 | fs.readMutex.Unlock() 352 | os.Remove(fs.folder + dataFileNameByID(FileIdx)) 353 | delete(fs.freeCounts, FileIdx) 354 | } 355 | } 356 | } 357 | 358 | // freeRecord marks record as processed and removes unused file with bodies of the messages 359 | func (fs *fileStorage) freeRecord(Idx StorageIdx) error { 360 | if Idx < fs.MinIndex || Idx-fs.MinIndex >= StorageIdx(fs.TotalRecord) { 361 | return errors.New("out of bound") 362 | } 363 | localidx := Idx - fs.MinIndex 364 | if fs.idx[localidx].State != stateFree { 365 | fs.idx[localidx].State = stateFree 366 | FileIdx := fs.idx[localidx].FileIndex 367 | fs.log.Trace("[QFS:%s] Deleted from datafile {%d} count %d ", fs.name, FileIdx, fs.freeCounts[FileIdx]) 368 | fs.freeCounts[FileIdx]-- 369 | if fs.freeCounts[FileIdx] == 0 && fs.writeFiles.canClear(FileIdx) { 370 | fs.log.Trace("[QFS:%s] Delete datafile {%d} because he does not contain any data", fs.name, FileIdx) 371 | fs.readMutex.Lock() 372 | file, ok := fs.readFiles[FileIdx] 373 | if ok { 374 | file.Handle.Close() 375 | file.Handle = nil 376 | delete(fs.readFiles, FileIdx) 377 | } 378 | fs.readMutex.Unlock() 379 | os.Remove(fs.folder + dataFileNameByID(FileIdx)) 380 | delete(fs.freeCounts, FileIdx) 381 | } else { 382 | Now := time.Since(startTime) 383 | if fs.lastTimeCheckDeletedFiles+30*time.Second < Now { 384 | fs.checkUnusedFiles() 385 | fs.lastTimeCheckDeletedFiles = Now 386 | } 387 | } 388 | 389 | fs.TotalFree++ 390 | } 391 | return nil 392 | } 393 | 394 | // FreeRecord marks record as free and in the future this record will never returns via GetNext function 395 | func (fs *fileStorage) FreeRecord(Idx StorageIdx) error { 396 | fs.idxMutex.Lock() 397 | defer fs.idxMutex.Unlock() 398 | return fs.freeRecord(Idx) 399 | } 400 | 401 | // setMMapInfo sets index slice ond header of the index file to memopry mapped file 402 | func (fs *fileStorage) setMMapInfo() { 403 | fs.indexFileHeader = (*indexFileHeader)(unsafe.Pointer(&fs.mmapinfo[0])) //#nosec 404 | head := (*reflect.SliceHeader)(unsafe.Pointer(&fs.idx)) //#nosec 405 | sof := unsafe.Sizeof(*fs.indexFileHeader) //#nosec 406 | head.Data = uintptr(unsafe.Pointer(&fs.mmapinfo[sof])) //#nosec 407 | var ir indexRecord 408 | head.Len = int((uintptr(fs.mmapsize) - sof) / unsafe.Sizeof(ir)) //#nosec 409 | head.Cap = head.Len 410 | } 411 | 412 | // calculateNextSize calculates new size of index file with increase by twice until 1 Gb size 413 | // After this event increments by 1 Gb 414 | func (fs *fileStorage) calculateNextSize(CurrentSize int64) (int64, error) { 415 | if CurrentSize == 0 { 416 | CurrentSize = int64(os.Getpagesize()) 417 | } 418 | for i := uint(15); i <= 30; i++ { 419 | if CurrentSize <= 1<= fs.options.FlushOperations { 538 | fs.flush() 539 | fs.operations = 0 540 | } 541 | } 542 | 543 | // checkValidRecord checks validity of the record 544 | func (fs *fileStorage) getValidRecord(file *fileAccess, index StorageIdx, offset uint32, length int32) []byte { 545 | var buff [16]byte 546 | file.Lock() 547 | defer file.Unlock() 548 | 549 | file.Handle.Seek(int64(offset), 0) 550 | io.ReadFull(file.Handle, buff[:]) 551 | z := binary.LittleEndian.Uint32(buff[:]) 552 | if z != magicNumberDataPrefix { 553 | return nil 554 | } 555 | z64 := binary.LittleEndian.Uint64(buff[4:]) 556 | if StorageIdx(z64) != index { 557 | return nil 558 | } 559 | z = binary.LittleEndian.Uint32(buff[12:]) 560 | if z != uint32(length) { 561 | return nil 562 | } 563 | outbuf := make([]byte, z) 564 | io.ReadFull(file.Handle, outbuf) 565 | io.ReadFull(file.Handle, buff[:8]) 566 | if fs.options.CheckCRCOnRead { 567 | crc := crc32.ChecksumIEEE(outbuf) 568 | z = binary.LittleEndian.Uint32(buff[:]) 569 | if crc != z { 570 | return nil 571 | } 572 | } 573 | z = binary.LittleEndian.Uint32(buff[4:]) 574 | if z != magicNumberDataSuffix { 575 | return nil 576 | } 577 | return outbuf 578 | } 579 | 580 | // checkFreeIndexRecords checks count of the free records in the top of the index table and removes such records 581 | func (fs *fileStorage) checkFreeIndexRecords(IsIncrementIndex bool) (bool, error) { 582 | var Frees uint64 583 | for i := uint64(0); i < fs.TotalRecord; i++ { 584 | if fs.idx[i].State == stateFree { 585 | Frees++ 586 | } else { 587 | break 588 | } 589 | } 590 | percents := uint8(100 * Frees / uint64(len(fs.idx))) 591 | if (IsIncrementIndex && percents < fs.options.PercentFreeForRecalculateOnIncrementIndexFile) || 592 | percents < fs.options.PercentFreeForRecalculateOnExit { 593 | return false, nil 594 | } 595 | fs.flush() 596 | copy(fs.idx[0:], fs.idx[Frees:fs.TotalRecord]) 597 | fs.TotalRecord -= Frees 598 | fs.TotalFree -= Frees 599 | fs.log.Trace("[QFS:%s] Total free count %d", fs.name, fs.TotalFree) 600 | fs.MinIndex += StorageIdx(Frees) 601 | fs.startIdx = 0 602 | fs.flush() 603 | needSize := int64(unsafe.Sizeof(fs.idx[0]))*int64(fs.TotalRecord)*120/100 + int64(unsafe.Sizeof(*fs.indexFileHeader)) 604 | testSize, _ := fs.calculateNextSize(needSize) 605 | if testSize >= fs.mmapsize { 606 | return true, nil 607 | } 608 | fs.mmapinfo.Unmap() 609 | fs.idxFile.Truncate(testSize) 610 | var err error 611 | fs.mmapinfo, err = mmap.MapRegion(fs.idxFile, int(testSize), mmap.RDWR, 0, 0) 612 | if err != nil { 613 | fs.idxFile.Close() 614 | return true, err 615 | } 616 | fs.mmapsize = testSize 617 | fs.setMMapInfo() 618 | return true, nil 619 | } 620 | 621 | //put appends one message to storage and marks state in depend of the option 622 | 623 | func (fs *fileStorage) put(buffer []byte, option int) (StorageIdx, error) { 624 | var ( 625 | tmp indexRecord 626 | offset int64 627 | ) 628 | atomic.StoreInt32(&fs.immediatlyRelease, 1) 629 | fs.idxMutex.Lock() 630 | Idx, err := fs.newID() 631 | if err != nil { 632 | return InvalidIdx, err 633 | } 634 | fs.idxMutex.Unlock() 635 | fs.log.Trace("[QFS:%s:%d] Resuest write handle", fs.name, Idx) 636 | file, err := fs.writeFiles.getHandle(uint32(len(buffer)), Idx, fs.timeout) 637 | if err != nil { 638 | fs.log.Error("[QFS:%s%d] Cannot receive write handle Error: %s", fs.name, Idx, err.Error()) 639 | return InvalidIdx, err 640 | } 641 | defer fs.writeFiles.putHandle(file, err) 642 | offset, err = file.Seek(0, 2) 643 | if err != nil { 644 | fs.log.Error("[QFS:%s:%d] Cannot append to datafile {%d} Error: %s", fs.name, Idx, file.FileIdx, err.Error()) 645 | return InvalidIdx, err 646 | } 647 | err = saveDataFileData(file.File, Idx, buffer) 648 | if err != nil { 649 | fs.log.Error("[QFS:%s:%d] Cannot append to datafile {%d} Error: %s", fs.name, Idx, file.FileIdx, err.Error()) 650 | return InvalidIdx, err 651 | } 652 | fs.log.Trace("[QFS:%s:%d] Appended to datafile {%d} ", fs.name, Idx, file.FileIdx) 653 | 654 | tmp = indexRecord{ 655 | FileIndex: file.FileIdx, 656 | FileOffset: uint32(offset), 657 | Length: int32(len(buffer)), 658 | ID: Idx, 659 | } 660 | switch option { 661 | case putRecordAsNew: 662 | case putRecordAsProcessedWithError: 663 | tmp.TryCount = 1 664 | tmp.LastAction = time.Since(startTime) 665 | case putRecordAsInProcess: 666 | tmp.State = stateInProcess 667 | } 668 | fs.idxMutex.Lock() 669 | intIdx := Idx - fs.MinIndex 670 | fs.idx[intIdx] = tmp 671 | fs.freeCounts[file.FileIdx]++ 672 | fs.log.Trace("[QFS:%s] Appended to datafile {%d} count %d ", fs.name, file.FileIdx, fs.freeCounts[file.FileIdx]) 673 | fs.checkFlush() 674 | fs.idxMutex.Unlock() 675 | return Idx, nil 676 | } 677 | 678 | // Put puts buffer to disk storage with setted timeout 679 | func (fs *fileStorage) Put(buffer []byte) (StorageIdx, error) { 680 | return fs.put(buffer, putRecordAsNew) 681 | } 682 | 683 | // createStorage creates new file storage 684 | func createStorage(StorageName, StorageLocation string, Log Logging, Options *StorageOptions, 685 | TimeOut time.Duration, Notity newMessageNotificator) (*fileStorage, error) { 686 | if Log == nil { 687 | z := nullLog(0) 688 | Log = z 689 | } 690 | Log.Info("[fileStorage][%s] is created...", StorageName) 691 | if Options == nil { 692 | Options = &DefaultStorageOptions 693 | } 694 | tmp := &fileStorage{ 695 | folder: normalizeFilePath(StorageLocation), 696 | name: StorageName, 697 | log: Log, 698 | options: Options, 699 | freeCounts: make(map[StorageIdx]int64), 700 | readFiles: make(map[StorageIdx]*fileAccess), 701 | lastTimeCheckDeletedFiles: time.Since(startTime), 702 | timeout: TimeOut, 703 | notify: Notity, 704 | } 705 | tmp.writeFiles = createIOQueue(tmp) 706 | err := tmp.loadIndexFile() 707 | if err != nil { 708 | tmp.log.Error("[QFS:%s] Cannot create:%s", StorageName, err.Error()) 709 | err = tmp.restoreIndexFile() 710 | if err != nil { 711 | tmp.log.Error("[QFS:%s] Cannot restore:%s", StorageName, err.Error()) 712 | return nil, err 713 | } 714 | } 715 | tmp.deleteUnusedFiles() 716 | Log.Info("[QFS:%s] was created successful...", StorageName) 717 | err = tmp.garbageCollect() 718 | if err != nil { 719 | return nil, err 720 | } 721 | return tmp, nil 722 | } 723 | 724 | // close closes all interhal opened handles 725 | func (fs *fileStorage) close() (err error) { 726 | if fs.mmapinfo != nil { 727 | err = fs.mmapinfo.Unmap() 728 | if err != nil { 729 | return err 730 | } 731 | err = fs.idxFile.Close() 732 | if err != nil { 733 | return err 734 | } 735 | fs.mmapinfo = nil 736 | fs.idxFile = nil 737 | } 738 | fs.readMutex.Lock() 739 | for _, k := range fs.readFiles { 740 | k.Lock() 741 | err = k.Handle.Close() 742 | if err != nil { 743 | return err 744 | } 745 | k.Unlock() 746 | } 747 | fs.readFiles = nil 748 | fs.readMutex.Unlock() 749 | fs.writeFiles.free() 750 | return nil 751 | } 752 | 753 | // Close closes file storage 754 | func (fs *fileStorage) Close() (err error) { 755 | atomic.StoreInt32(&fs.immediatlyRelease, 1) 756 | fs.idxMutex.Lock() 757 | fs.idxMutex.Unlock() 758 | fs.log.Info("[QFS:%s] is closed... Record count is %d", fs.name, fs.TotalRecord-fs.TotalFree) 759 | fs.checkFreeIndexRecords(false) 760 | err = fs.close() 761 | if err != nil { 762 | return err 763 | } 764 | fs.checkUnusedFiles() 765 | fs.log.Info("[QFS:%s] was closed successful...", fs.name) 766 | return nil 767 | } 768 | 769 | func (fs *fileStorage) info() { 770 | fs.idxMutex.Lock() 771 | defer fs.idxMutex.Unlock() 772 | Now := time.Since(startTime) 773 | fs.log.Info("[QFS:%s]\n %v", fs.name, fs.indexFileHeader) 774 | for i := uint64(0); i < fs.TotalRecord; i++ { 775 | s := "" 776 | if fs.idx[i].State == stateEnable && fs.idx[i].TryCount > 0 { 777 | d := fs.idx[i].LastAction + time.Duration(fs.idx[i].TryCount)*time.Duration(fs.options.SkipDelayPerTry) - Now 778 | s = "Available in:" + d.String() 779 | } 780 | fs.log.Info("[QFS:%s:%d] %v %s", fs.name, i+uint64(fs.MinIndex), fs.idx[i], s) 781 | } 782 | } 783 | -------------------------------------------------------------------------------- /queue/fstorage_gc.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "sync/atomic" 7 | ) 8 | 9 | type calcsize struct { 10 | size int64 11 | realsize int64 12 | } 13 | 14 | type garbageCollectInfo struct { 15 | fileIndex StorageIdx 16 | recCount int64 17 | file *fileAccess 18 | } 19 | 20 | // actions: 21 | // 1. You must find all files where size of useful space is not more then 10 per cent 22 | // of total size of the file 23 | // Opened files is not processing 24 | 25 | // after processing delete files from opended for reading files list 26 | 27 | func (fs *fileStorage) garbageCollect() error { 28 | fs.idxMutex.Lock() 29 | defer func() { 30 | fs.idxMutex.Unlock() 31 | fs.log.Trace("[Q:%s] Garbage collection finished", fs.name) 32 | }() 33 | atomic.StoreInt32(&fs.immediatlyRelease, 0) 34 | if fs.TotalRecord == 0 { 35 | return nil 36 | } 37 | fs.log.Trace("[Q:%s] Garbage collection is started", fs.name) 38 | fs.writeFiles.clear() 39 | fs.checkUnusedFiles() 40 | needProcess := make(map[StorageIdx]*calcsize) 41 | for k, cnt := range fs.freeCounts { 42 | fileinfo, err := os.Stat(fs.folder + dataFileNameByID(k)) 43 | if err != nil { 44 | continue 45 | } 46 | rs := fileinfo.Size() 47 | if rs < fs.options.MaxDataFileSize/20 { 48 | continue 49 | } 50 | if cnt < 100 { 51 | continue 52 | } 53 | needProcess[k] = &calcsize{ 54 | size: 0, 55 | realsize: rs / 10, 56 | } 57 | } 58 | // Calculate list of the files where count of the valid records bellow then 10 % of size of file 59 | for i := uint64(0); i < fs.TotalRecord; i++ { 60 | if fs.idx[i].State != stateEnable { 61 | continue 62 | } 63 | fileidx := fs.idx[i].FileIndex 64 | cur, ok := needProcess[fileidx] 65 | if !ok { 66 | continue 67 | } 68 | cur.size += int64(fs.idx[i].Length) 69 | cur.size += 20 70 | if cur.realsize < cur.size { 71 | delete(needProcess, fileidx) 72 | } 73 | } 74 | if len(needProcess) == 0 { 75 | return nil 76 | } 77 | gi := &garbageCollectInfo{ 78 | fileIndex: InvalidIdx, 79 | recCount: 0, 80 | file: &fileAccess{}, 81 | } 82 | 83 | // Move valid records into new file 84 | for i := uint64(0); i < fs.TotalRecord; i++ { 85 | if fs.idx[i].State != stateEnable { 86 | continue 87 | } 88 | fileidx := fs.idx[i].FileIndex 89 | _, ok := needProcess[fileidx] 90 | if !ok { 91 | continue 92 | } 93 | if atomic.LoadInt32(&fs.immediatlyRelease) == 1 { 94 | break 95 | } 96 | if err := fs.moveOneRecord(i, gi); err != nil { 97 | return err 98 | } 99 | } 100 | write := &fileWrite{ 101 | File: gi.file.Handle, 102 | FileIdx: gi.fileIndex, 103 | } 104 | fs.writeFiles.putHandle(write, nil) 105 | fs.freeCounts[gi.fileIndex] = gi.recCount 106 | return nil 107 | } 108 | 109 | func (fs *fileStorage) getNextFreeIndex() StorageIdx { 110 | i := StorageIdx(0) 111 | for { 112 | if _, ok := fs.freeCounts[i]; !ok { 113 | return i 114 | } 115 | i++ 116 | } 117 | } 118 | 119 | func (fs *fileStorage) moveOneRecord(idx uint64, gi *garbageCollectInfo) error { 120 | nidx := StorageIdx(0) 121 | 122 | if gi.file.Handle == nil { 123 | nidx = fs.getNextFreeIndex() 124 | } else { 125 | sz, err := gi.file.Handle.Seek(0, 2) 126 | if err != nil { 127 | return err 128 | } 129 | if sz+int64(fs.idx[idx].Length) > fs.options.MaxDataFileSize { 130 | gi.file.Handle.Close() 131 | fs.freeCounts[gi.fileIndex] = gi.recCount 132 | nidx = fs.getNextFreeIndex() 133 | } 134 | } 135 | 136 | if nidx != 0 { 137 | gi.fileIndex = nidx 138 | datFileName := fs.folder + dataFileNameByID(nidx) 139 | f, err := os.Create(datFileName) 140 | if err != nil { 141 | fs.log.Error("[FSQ:%s] Cannot create datafile {%d} Error: %s", fs.name, idx, err.Error()) 142 | return err 143 | } 144 | fs.log.Trace("[FSQ:%s] Create datafile {%d}", fs.name, idx) 145 | saveDataFileHeader(f) 146 | gi.file.Handle = f 147 | gi.recCount = 0 148 | } 149 | 150 | oldFileIdx := fs.idx[idx].FileIndex 151 | fl, err := fs.getReadHandle(fs.idx[idx].ID, oldFileIdx) 152 | if err != nil { 153 | return err 154 | } 155 | data := fs.getValidRecord(fl, fs.idx[idx].ID, fs.idx[idx].FileOffset, fs.idx[idx].Length) 156 | if data == nil { 157 | return errors.New("empty data was found") 158 | } 159 | 160 | offset, err := gi.file.Handle.Seek(0, 2) 161 | if err != nil { 162 | fs.log.Error("[QFS:%s:%d] Cannot move to datafile {%d} Error: %s", fs.name, fs.idx[idx].ID, gi.fileIndex, err.Error()) 163 | return err 164 | } 165 | err = saveDataFileData(gi.file.Handle, fs.idx[idx].ID, data) 166 | if err != nil { 167 | fs.log.Error("[QFS:%s:%d] Cannot append to datafile {%d} Error: %s", fs.name, fs.idx[idx].ID, gi.fileIndex, err.Error()) 168 | return err 169 | } 170 | fs.idx[idx].FileIndex = gi.fileIndex 171 | fs.idx[idx].FileOffset = uint32(offset) 172 | gi.recCount++ 173 | oldfileCnt := fs.freeCounts[oldFileIdx] 174 | oldfileCnt-- 175 | fs.freeCounts[oldFileIdx] = oldfileCnt 176 | if oldfileCnt == 0 { 177 | delete(fs.freeCounts, oldFileIdx) 178 | m := fs.readFiles[oldFileIdx] 179 | m.Handle.Close() 180 | delete(fs.readFiles, oldFileIdx) 181 | os.Remove(fs.folder + dataFileNameByID(oldFileIdx)) 182 | } 183 | return nil 184 | } 185 | -------------------------------------------------------------------------------- /queue/fstorage_io.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "os" 5 | "runtime" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | const ( 11 | handleInAction = iota 12 | handleInChanel 13 | ) 14 | 15 | type fileWrite struct { 16 | *os.File 17 | FileIdx StorageIdx 18 | } 19 | 20 | type filequeue struct { 21 | sync.RWMutex 22 | memMutex sync.Mutex 23 | storage *fileStorage 24 | state map[StorageIdx]int 25 | toOut chan *fileWrite 26 | } 27 | 28 | func createIOQueue(storage *fileStorage) *filequeue { 29 | return &filequeue{ 30 | state: make(map[StorageIdx]int, 16), 31 | toOut: make(chan *fileWrite, storage.options.MaxOneTimeOpenedFiles), 32 | storage: storage, 33 | } 34 | } 35 | 36 | func (fq *filequeue) getHandle(recordSize uint32, idx StorageIdx, timeout time.Duration) (*fileWrite, error) { 37 | fq.RLock() 38 | defer fq.RUnlock() 39 | var to <-chan time.Time 40 | if timeout != 0 { 41 | to = time.NewTimer(timeout).C 42 | } 43 | for { 44 | select { 45 | case wrk := <-fq.toOut: 46 | if wrk.File == nil { 47 | continue 48 | } 49 | offset, err := wrk.Seek(0, 1) 50 | if err == nil && offset+int64(recordSize) < fq.storage.options.MaxDataFileSize { 51 | fq.memMutex.Lock() 52 | fq.state[wrk.FileIdx] = handleInAction 53 | fq.memMutex.Unlock() 54 | return wrk, nil 55 | } 56 | fq.storage.log.Trace("[FSQ:%s] Datafile {%d} is oversize. Will be closed...", fq.storage.name, idx) 57 | fq.memMutex.Lock() 58 | delete(fq.state, wrk.FileIdx) 59 | fq.memMutex.Unlock() 60 | wrk.Close() 61 | case <-to: 62 | return nil, &queueError{ErrorType: errorTimeOut} 63 | default: 64 | fq.memMutex.Lock() 65 | if len(fq.state) < int(fq.storage.options.MaxOneTimeOpenedFiles) { 66 | datFileName := fq.storage.folder + dataFileNameByID(idx) 67 | file, err := os.Create(datFileName) 68 | if err != nil { 69 | fq.storage.log.Error("[FSQ:%s] Cannot create datafile {%d} Error: %s", fq.storage.name, idx, err.Error()) 70 | return nil, err 71 | } 72 | fq.storage.log.Trace("[FSQ:%s] Create datafile {%d}", fq.storage.name, idx) 73 | saveDataFileHeader(file) 74 | tmp := &fileWrite{ 75 | File: file, 76 | FileIdx: idx, 77 | } 78 | fq.state[idx] = handleInAction 79 | fq.memMutex.Unlock() 80 | return tmp, nil 81 | } 82 | fq.memMutex.Unlock() 83 | runtime.Gosched() 84 | } 85 | } 86 | } 87 | 88 | func (fq *filequeue) putHandle(handle *fileWrite, err error) { 89 | fq.memMutex.Lock() 90 | defer fq.memMutex.Unlock() 91 | fq.storage.log.Trace("[FSQ:%s] Received {%d}", fq.storage.name, handle.FileIdx) 92 | if err == nil { 93 | fq.toOut <- handle 94 | } 95 | if err == nil && handle.File != nil { 96 | fq.state[handle.FileIdx] = handleInChanel 97 | } else { 98 | delete(fq.state, handle.FileIdx) 99 | if handle.File != nil { 100 | handle.Close() 101 | } 102 | } 103 | 104 | } 105 | 106 | func (fq *filequeue) canClear(idx StorageIdx) bool { 107 | fq.memMutex.Lock() 108 | defer fq.memMutex.Unlock() 109 | fq.storage.log.Trace("[FSQ:%s] Received request to delete {%d}", fq.storage.name, idx) 110 | if _, ok := fq.state[idx]; !ok { 111 | fq.storage.log.Error("[FSQ:%s] Not information about this file {%d}", fq.storage.name, idx) 112 | return true 113 | } 114 | return false 115 | } 116 | 117 | func (fq *filequeue) free() { 118 | fq.memMutex.Lock() 119 | defer fq.memMutex.Unlock() 120 | close(fq.toOut) 121 | for wrk := range fq.toOut { 122 | wrk.Close() 123 | } 124 | fq.state = nil 125 | } 126 | 127 | func (fq *filequeue) clear() { 128 | fq.memMutex.Lock() 129 | defer fq.memMutex.Unlock() 130 | close(fq.toOut) 131 | for handle := range fq.toOut { 132 | handle.Close() 133 | } 134 | fq.state = make(map[StorageIdx]int, 16) 135 | fq.toOut = make(chan *fileWrite, cap(fq.toOut)) 136 | } 137 | -------------------------------------------------------------------------------- /queue/fstorage_restore.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "hash/crc32" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | ) 11 | 12 | func (fs *fileStorage) restoreStorageFile(FileIdx StorageIdx) (err error) { 13 | var buf [16]byte 14 | OneRecordProcessed := false 15 | Handle, err := os.Open(fs.folder + dataFileNameByID(FileIdx)) 16 | if err != nil { 17 | return err 18 | } 19 | defer func() { 20 | err = Handle.Close() 21 | }() 22 | _, err = io.ReadFull(Handle, buf[:8]) 23 | if err != nil { 24 | return err 25 | } 26 | if binary.LittleEndian.Uint64(buf[:]) != uint64(magicNumberDataValue) { 27 | return errors.New("not found magic header") 28 | } 29 | for { 30 | offset, err := Handle.Seek(0, 1) 31 | if err != nil { 32 | break 33 | } 34 | _, err = io.ReadFull(Handle, buf[:]) 35 | if err != nil { 36 | break 37 | } 38 | z := binary.LittleEndian.Uint32(buf[:]) 39 | if z != magicNumberDataPrefix { 40 | break 41 | } 42 | ID := StorageIdx(binary.LittleEndian.Uint64(buf[4:])) 43 | length := binary.LittleEndian.Uint32(buf[12:]) 44 | outbuf := make([]byte, length) 45 | _, err = io.ReadFull(Handle, outbuf) 46 | if err != nil { 47 | break 48 | } 49 | _, err = io.ReadFull(Handle, buf[:8]) 50 | if err != nil { 51 | break 52 | } 53 | crc := crc32.ChecksumIEEE(outbuf) 54 | z = binary.LittleEndian.Uint32(buf[:]) 55 | skip := crc != z 56 | z = binary.LittleEndian.Uint32(buf[4:]) 57 | if z != magicNumberDataSuffix { 58 | break 59 | } 60 | if skip { 61 | continue 62 | } 63 | idx, err := fs.newID() 64 | if err != nil { 65 | return nil 66 | } 67 | tmp := indexRecord{ 68 | FileIndex: FileIdx, 69 | FileOffset: uint32(offset), 70 | Length: int32(length), 71 | ID: ID, 72 | } 73 | fs.idx[idx] = tmp 74 | fs.freeCounts[FileIdx]++ 75 | OneRecordProcessed = true 76 | } 77 | if !OneRecordProcessed { 78 | return errors.New("not records was found") 79 | } 80 | return nil 81 | } 82 | 83 | // restoreIndexFile tries restore and repair index file of the storage 84 | func (fs *fileStorage) restoreIndexFile() error { 85 | IsOneFileProcessed := false 86 | if err := fs.prepareIndexFile(); err != nil { 87 | return err 88 | } 89 | path := fs.folder[:len(fs.folder)-1] 90 | listFiles, err := ioutil.ReadDir(path) 91 | if err == nil { 92 | for _, finfo := range listFiles { 93 | if finfo.IsDir() { 94 | continue 95 | } 96 | fname := finfo.Name() 97 | if fname == "index.dat" { 98 | continue 99 | } 100 | idx := checkValidFileDataName(fname) 101 | if idx == -1 { 102 | fn := fs.folder + fname 103 | fs.log.Info("Remove unknown file %s", fn) 104 | err = os.Remove(fn) 105 | if err != nil { 106 | return err 107 | } 108 | continue 109 | } 110 | err = fs.restoreStorageFile(StorageIdx(idx)) 111 | if err != nil { 112 | fn := fs.folder + fname 113 | fs.log.Info("Remove unknown file %s", fn) 114 | err = os.Remove(fn) 115 | if err != nil { 116 | return err 117 | } 118 | continue 119 | } 120 | IsOneFileProcessed = true 121 | } 122 | } 123 | if !IsOneFileProcessed { 124 | return errors.New("not found any valid storage file") 125 | } 126 | return nil 127 | } 128 | 129 | // deleteUnusedFiles deletes all files from folder which is not have linked inforation in the index file 130 | func (fs *fileStorage) deleteUnusedFiles() { 131 | path := fs.folder[:len(fs.folder)-1] 132 | listFiles, err := ioutil.ReadDir(path) 133 | if err == nil { 134 | for _, finfo := range listFiles { 135 | if finfo.IsDir() { 136 | continue 137 | } 138 | fname := finfo.Name() 139 | if fname == "index.dat" { 140 | continue 141 | } 142 | idx := checkValidFileDataName(fname) 143 | if idx == -1 { 144 | fn := fs.folder + fname 145 | fs.log.Info("Remove unknown file %s", fn) 146 | os.Remove(fn) 147 | continue 148 | } 149 | _, ok := fs.freeCounts[StorageIdx(idx)] 150 | if !ok { 151 | fn := fs.folder + fname 152 | fs.log.Info("Remove unknown file %s", fn) 153 | os.Remove(fn) 154 | } 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /queue/fstorage_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "os" 8 | "runtime" 9 | "sync" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func clearTestFolder() { 15 | os.RemoveAll(TestFolder) 16 | os.MkdirAll(TestFolder, 040777) 17 | os.MkdirAll(logFolder, 040777) 18 | } 19 | 20 | var TestStrings = [10]string{ 21 | "one", 22 | "Two", 23 | "Three", 24 | "Four", 25 | "Five", 26 | "Six", 27 | "Seven", 28 | "Eight", 29 | "None", 30 | "Ten", 31 | } 32 | 33 | func readString(r io.Reader) string { 34 | if r == nil { 35 | return "" 36 | } 37 | buf := bufio.NewScanner(r) 38 | buf.Scan() 39 | return buf.Text() 40 | } 41 | 42 | func MySleep(ms time.Duration) { 43 | m := time.After(ms * time.Millisecond) 44 | <-m 45 | } 46 | 47 | func TestCreateNewStorage(t *testing.T) { 48 | clearTestFolder() 49 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 50 | if err != nil { 51 | t.Fatalf("Cannot create storage: %s", err) 52 | } 53 | err = fs.Close() 54 | if err != nil { 55 | t.Fatalf("Cannot close storage: %s", err) 56 | } 57 | } 58 | 59 | func TestDecreaseSizeOfIndexFile(t *testing.T) { 60 | 61 | clearTestFolder() 62 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 63 | if err != nil { 64 | t.Fatalf("Cannot create storage: %s", err) 65 | } 66 | var buffer [50000]byte 67 | for i := 0; i < 2000; i++ { 68 | fs.Put(buffer[:]) 69 | } 70 | i := 0 71 | var z *QueueItem 72 | for i = 0; i < 2000; i++ { 73 | z, err = fs.Get() 74 | if err != nil { 75 | t.Fatalf("Error reading from storage: %s", err) 76 | } 77 | fs.FreeRecord(z.idx) 78 | } 79 | err = fs.Close() 80 | if err != nil { 81 | t.Fatalf("Cannot close storage: %s", err) 82 | } 83 | stat, err1 := os.Stat(TestFolder + "index.dat") 84 | if err1 != nil { 85 | t.Fatalf("Index file was not found") 86 | } 87 | if stat.Size() > 1<<15 { 88 | t.Fatalf("Index file is very large") 89 | } 90 | 91 | } 92 | 93 | func TestFillStorage(t *testing.T) { 94 | var buffer [50000]byte 95 | clearTestFolder() 96 | options := DefaultStorageOptions 97 | options.MaxDataFileSize = 301000 98 | fs, err := createStorage("Test", TestFolder, nil, &options, 0, nil) 99 | if err != nil { 100 | t.Fatalf("Cannot create storage: %s", err) 101 | } 102 | for i := 0; i < 7; i++ { 103 | fs.Put(buffer[:]) 104 | } 105 | err = fs.Close() 106 | if err != nil { 107 | t.Fatalf("Cannot close storage: %s", err) 108 | } 109 | _, err = os.Stat(TestFolder + "index.dat") 110 | if err != nil { 111 | t.Fatalf("Not found index file: %s", err) 112 | } 113 | _, err = os.Stat(TestFolder + dataFileNameByID(0)) 114 | if err != nil { 115 | t.Fatalf("Not found first data file: %s", err) 116 | } 117 | _, err = os.Stat(TestFolder + dataFileNameByID(6)) 118 | if err != nil { 119 | t.Fatalf("Not found second data file: %s", err) 120 | } 121 | } 122 | 123 | func TestFillIncrementIndex(t *testing.T) { 124 | var buffer [100]byte 125 | clearTestFolder() 126 | options := DefaultStorageOptions 127 | options.MaxDataFileSize = 301000 128 | fs, err := createStorage("Test", TestFolder, nil, &options, 0, nil) 129 | if err != nil { 130 | t.Fatalf("Cannot create storage: %s", err) 131 | } 132 | stat, _ := os.Stat(TestFolder + "index.dat") 133 | reccount := int64(0x8000) 134 | if stat.Size() != reccount { 135 | t.Fatalf("Invalid size of the index file") 136 | } 137 | for i := 0; i < 1000; i++ { 138 | fs.Put(buffer[:]) 139 | } 140 | if stat.Size() != 0x8000 { 141 | t.Fatalf("Invalid size of the index file") 142 | } 143 | for i := 0; i < 1000; i++ { 144 | fs.Put(buffer[:]) 145 | } 146 | err = fs.Close() 147 | if err != nil { 148 | t.Fatalf("Cannot close storage: %s", err) 149 | } 150 | stat, err = os.Stat(TestFolder + "index.dat") 151 | if err != nil { 152 | t.Fatalf("Not found index file: %s", err) 153 | } 154 | if stat.Size() < 0xffff { 155 | t.Fatalf("Not incremented index file") 156 | } 157 | 158 | } 159 | 160 | func TestFillingData(t *testing.T) { 161 | 162 | var z *QueueItem 163 | 164 | clearTestFolder() 165 | options := DefaultStorageOptions 166 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 167 | if err != nil { 168 | t.Fatalf("Cannot create storage: %s", err) 169 | } 170 | for i := 0; i < 500; i++ { 171 | fs.Put([]byte("Test of the work with all structures")) 172 | } 173 | for i := 0; i < 500; i++ { 174 | z, err = fs.Get() 175 | if err != nil { 176 | t.Fatalf("Error reading from storage: %s", err) 177 | } 178 | fs.FreeRecord(z.idx) 179 | } 180 | for i := 0; i < 1000; i++ { 181 | fs.Put([]byte("Test of the work with all structures")) 182 | } 183 | for _, k := range TestStrings { 184 | fs.Put([]byte(k)) 185 | } 186 | for i := 0; i < 1000; i++ { 187 | z, err = fs.Get() 188 | if err != nil { 189 | t.Fatalf("Error reading from storage: %s", err) 190 | } 191 | fs.FreeRecord(z.idx) 192 | } 193 | if fs.Count() != 10 { 194 | t.Fatalf("Not release full storage: ") 195 | } 196 | odd := 0 197 | for z, err = fs.Get(); err == nil; z, err = fs.Get() { 198 | if TestStrings[odd] != readString(z.Stream) { 199 | t.Fatalf("Invalid value of the storage\n") 200 | } 201 | odd++ 202 | } 203 | if err != nil { 204 | if _, ok := err.(*queueError); !ok { 205 | t.Fatalf("Error reading from storage: %s", err) 206 | } 207 | } 208 | fs.info() 209 | err = fs.Close() 210 | if err != nil { 211 | t.Fatalf("Cannot close storage: %s", err) 212 | } 213 | fs, err = createStorage("Test", TestFolder, nil, &options, 0, nil) 214 | if err != nil { 215 | t.Fatalf("Cannot create storage: %s", err) 216 | } 217 | odd = 0 218 | for z, _ := fs.Get(); err == nil; z, err = fs.Get() { 219 | if TestStrings[odd] != readString(z.Stream) { 220 | t.Fatalf("Invalid value of the storage\n") 221 | } 222 | err = fs.FreeRecord(z.idx) 223 | if err != nil { 224 | t.Fatalf("Error free record: %s", err) 225 | } 226 | odd++ 227 | } 228 | if err != nil { 229 | if _, ok := err.(*queueError); !ok { 230 | t.Fatalf("Error reading from storage: %s", err) 231 | } 232 | } 233 | for i := 0; i < 500; i++ { 234 | fs.Put([]byte("Test of the work with all structures")) 235 | } 236 | for i := 0; i < 500; i++ { 237 | z, err = fs.Get() 238 | if err != nil { 239 | t.Fatalf("Error reading from storage: %s", err) 240 | } 241 | fs.FreeRecord(z.idx) 242 | } 243 | if fs.Count() != 0 { 244 | t.Fatalf("Not release full storage: ") 245 | 246 | } 247 | err = fs.Close() 248 | if err != nil { 249 | t.Fatalf("Cannot close storage: %s", err) 250 | } 251 | } 252 | 253 | func TestWorkWithReleaseRecord(t *testing.T) { 254 | 255 | var z *QueueItem 256 | clearTestFolder() 257 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 258 | if err != nil { 259 | t.Fatalf("Cannot create storage: %s", err) 260 | } 261 | for _, k := range TestStrings { 262 | fs.Put([]byte(k)) 263 | } 264 | z, err = fs.Get() 265 | fs.UnlockRecord(z.idx) 266 | z, err = fs.Get() 267 | testString := readString(z.Stream) 268 | if TestStrings[1] != testString { 269 | t.Fatalf("Invalid value in the storage. Returned: %s Must return second elementh because first is busy yet", testString) 270 | } 271 | MySleep(500) 272 | z, err = fs.Get() 273 | zz := readString(z.Stream) 274 | if TestStrings[0] != zz { 275 | t.Fatalf("Invalid value in the storage : %s. Must return first element because timeout expired ", zz) 276 | } 277 | err = fs.Close() 278 | if err != nil { 279 | t.Fatalf("Cannot close storage: %s", err) 280 | } 281 | } 282 | 283 | func TestWorkStorageSize(t *testing.T) { 284 | clearTestFolder() 285 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 286 | if err != nil { 287 | t.Fatalf("Cannot create storage: %s", err) 288 | } 289 | for _, k := range TestStrings { 290 | fs.Put([]byte(k)) 291 | } 292 | if fs.Count() != 10 { 293 | t.Fatalf("Not release full storage: ") 294 | } 295 | r := 0 296 | for _, k := range TestStrings { 297 | r += len([]byte(k)) 298 | } 299 | if uint64(r) != fs.RecordsSize() { 300 | t.Fatalf("Invalid value of the storage\n") 301 | } 302 | fs.info() 303 | err = fs.Close() 304 | if err != nil { 305 | t.Fatalf("Cannot close storage: %s", err) 306 | } 307 | } 308 | 309 | func TestFillDeleteUnusedDataFiles(t *testing.T) { 310 | var buffer [2000]byte 311 | clearTestFolder() 312 | zzz := 99 313 | options := DefaultStorageOptions 314 | options.MaxDataFileSize = 200000 315 | options.CheckCRCOnRead = true 316 | fs, err := createStorage("Test", TestFolder, nil, &options, 0, nil) 317 | if err != nil { 318 | t.Fatalf("Cannot create storage: %s", err) 319 | } 320 | for i := 0; i < zzz; i++ { 321 | fs.Put(buffer[:]) 322 | } 323 | for i := 0; i < zzz-2; i++ { 324 | z, err := fs.Get() 325 | if err != nil { 326 | t.Fatalf("Error reading from storage: %s", err) 327 | } 328 | err = fs.FreeRecord(z.idx) 329 | if err != nil { 330 | t.Fatalf("Error free record: %s", err) 331 | } 332 | } 333 | z, err := fs.Get() 334 | if err != nil { 335 | t.Fatalf("Error reading from storage: %s", err) 336 | } 337 | err = fs.FreeRecord(z.idx) 338 | if err != nil { 339 | t.Fatalf("Error free record: %s", err) 340 | } 341 | z, err = fs.Get() 342 | if err != nil { 343 | t.Fatalf("Error reading from storage: %s", err) 344 | } 345 | fs.FreeRecord(z.idx) 346 | err = fs.Close() 347 | if err != nil { 348 | t.Fatalf("Cannot close storage: %s", err) 349 | } 350 | _, err = os.Stat(TestFolder + "stg00000.dat") 351 | if err == nil { 352 | t.Fatalf("File with unused data was not delete") 353 | } 354 | } 355 | 356 | func TestErrors(t *testing.T) { 357 | 358 | var z *QueueItem 359 | clearTestFolder() 360 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 361 | if err != nil { 362 | t.Fatalf("Cannot create storage: %s", err) 363 | } 364 | for zq, k := range TestStrings { 365 | if zq > 0 { 366 | break 367 | } 368 | fs.Put([]byte(k)) 369 | } 370 | z, err = fs.Get() 371 | if err != nil { 372 | t.Fatalf("Cannot receive item. Err: %v", err) 373 | } 374 | fs.UnlockRecord(z.idx) 375 | z, err = fs.Get() 376 | if err != nil { 377 | if _, ok := err.(*queueError); !ok { 378 | t.Fatalf("Must create filestorage error, but receive %v", err) 379 | } 380 | } else { 381 | t.Fatalf("Must create filestorage error") 382 | } 383 | MySleep(500) 384 | fs.Get() 385 | _, err = fs.Get() 386 | if err != nil { 387 | if _, ok := err.(*queueError); !ok { 388 | t.Fatalf("Must create filestorage error, but receive %v", err) 389 | } 390 | } else { 391 | t.Fatalf("Must create filestorage error") 392 | } 393 | err = fs.Close() 394 | if err != nil { 395 | t.Fatalf("Cannot close storage: %s", err) 396 | } 397 | } 398 | 399 | func TestParralels(t *testing.T) { 400 | 401 | clearTestFolder() 402 | var tmp [0x3fff]byte 403 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 404 | if err != nil { 405 | t.Fatalf("Cannot create storage: %s", err) 406 | } 407 | var wg sync.WaitGroup 408 | for i := int64(0); i < 20; i++ { 409 | wg.Add(1) 410 | go func(id int64) { 411 | defer wg.Done() 412 | for i := 0; i < 100; i++ { 413 | fs.Put(tmp[:]) 414 | fs.Put([]byte(fmt.Sprintf("Routine:%d Iteration %d", id, i))) 415 | } 416 | }(i) 417 | } 418 | wg.Wait() 419 | for i := 0; i < 5; i++ { 420 | wg.Add(1) 421 | go func(id int) { 422 | defer wg.Done() 423 | for z, err := fs.Get(); err == nil; z, err = fs.Get() { 424 | fs.FreeRecord(z.idx) 425 | } 426 | }(i) 427 | } 428 | wg.Wait() 429 | err = fs.Close() 430 | if err != nil { 431 | t.Fatalf("Cannot close storage: %s", err) 432 | } 433 | } 434 | 435 | func TestWorkStorageCheckMoveErroredMessages(t *testing.T) { 436 | clearTestFolder() 437 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 438 | if err != nil { 439 | t.Fatalf("Cannot create storage: %s", err) 440 | } 441 | 442 | _, err = fs.Put(make([]byte, 1000)) 443 | if err != nil { 444 | t.Fatalf("Cannot put to storage: %s", err) 445 | } 446 | _, err = fs.Put(make([]byte, 1000)) 447 | if err != nil { 448 | t.Fatalf("Cannot put to storage: %s", err) 449 | } 450 | _, err = fs.Put(make([]byte, 1000)) 451 | if err != nil { 452 | t.Fatalf("Cannot put to storage: %s", err) 453 | } 454 | a, err := fs.Get() 455 | if err != nil { 456 | t.Fatalf("Cannot get from storage: %s", err) 457 | } 458 | b, err := fs.Get() 459 | if err != nil { 460 | t.Fatalf("Cannot get from storage: %s", err) 461 | } 462 | err = fs.UnlockRecord(a.idx) 463 | if err != nil { 464 | t.Fatalf("Cannot unlock record: %s", err) 465 | } 466 | err = fs.FreeRecord(b.idx) 467 | if err != nil { 468 | t.Fatalf("Cannot free record: %s", err) 469 | } 470 | _, err = fs.Get() 471 | if err != nil { 472 | t.Fatal("Must not be error. C element must be returned") 473 | } 474 | _, err = fs.Get() 475 | if err == nil { 476 | t.Fatal("Must be error. 'Will available when' must be") 477 | } 478 | e := err.Error() 479 | myerr, ok := err.(*queueError) 480 | if !ok || myerr.ErrorType != errorInDelay { 481 | t.Fatalf("Must be internal error. 'Will available when' must be. Found:%s", e) 482 | } 483 | time.Sleep(myerr.NextAvailable) 484 | a, err = fs.Get() 485 | if err != nil { 486 | t.Fatal("Must be return valid value") 487 | } 488 | if a.idx != 1 { 489 | t.Fatalf("Must be return index value 1, returned: %d", a.idx) 490 | } 491 | err = fs.Close() 492 | if err != nil { 493 | t.Fatalf("Cannot close storage: %s", err) 494 | } 495 | } 496 | 497 | func TestWorkStorageRestore(t *testing.T) { 498 | clearTestFolder() 499 | fs, err := createStorage("Test", TestFolder, nil, nil, 0, nil) 500 | if err != nil { 501 | t.Fatalf("Cannot create storage: %s", err) 502 | } 503 | for _, k := range TestStrings { 504 | fs.Put([]byte(k)) 505 | } 506 | err = fs.Close() 507 | if err != nil { 508 | t.Fatalf("Cannot close storage: %s", err) 509 | } 510 | os.Remove(TestFolder + "index.dat") 511 | Handle, _ := os.Create(TestFolder + dataFileNameByID(100)) 512 | Handle.Close() 513 | Handle, _ = os.Create(TestFolder + "test" + dataFileNameByID(100)) 514 | Handle.Close() 515 | fs, err = createStorage("Test", TestFolder, nil, nil, 0, nil) 516 | if err != nil { 517 | t.Fatalf("Cannot create storage: %s", err) 518 | } 519 | if fs.Count() != uint64(len(TestStrings)) { 520 | t.Fatalf("Put %d messages. Restored: %d", len(TestStrings), fs.Count()) 521 | } 522 | err = fs.Close() 523 | if err != nil { 524 | t.Fatalf("Cannot close storage: %s", err) 525 | } 526 | Handle, _ = os.Create(TestFolder + dataFileNameByID(100)) 527 | Handle.Close() 528 | Handle, _ = os.Create(TestFolder + "test" + dataFileNameByID(100)) 529 | Handle.Close() 530 | 531 | fs, err = createStorage("Test", TestFolder, nil, nil, 0, nil) 532 | if err != nil { 533 | t.Fatalf("Cannot create storage: %s", err) 534 | } 535 | if fs.Count() != uint64(len(TestStrings)) { 536 | t.Fatalf("Put %d messages. Restored: %d", len(TestStrings), fs.Count()) 537 | } 538 | err = fs.Close() 539 | if err != nil { 540 | t.Fatalf("Cannot close storage: %s", err) 541 | } 542 | } 543 | 544 | var TestFolder string 545 | var logFolder string 546 | 547 | func init() { 548 | tmp := normalizeFilePath(os.TempDir()) 549 | if runtime.GOOS == "windows" { 550 | TestFolder = tmp + "queue\\" 551 | logFolder = tmp + "queue\\log\\" 552 | } else { 553 | TestFolder = tmp + "queue/" 554 | logFolder = tmp + "queue/log/" 555 | } 556 | 557 | } 558 | -------------------------------------------------------------------------------- /queue/internal/logging/logging.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "path" 8 | "runtime" 9 | "strconv" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | const ( 15 | logComMessage = iota 16 | logComClose 17 | ) 18 | 19 | const ( 20 | logInfoTypeError = iota 21 | logInfoTypeWarning 22 | logInfoTypeInfo 23 | logInfoTypeTrace 24 | ) 25 | 26 | type logMessage struct { 27 | command int 28 | message string 29 | } 30 | 31 | //Logger is structure for internal use 32 | type Logger struct { 33 | chanal chan logMessage 34 | isOuted chan struct{} 35 | file *os.File 36 | canwrite int32 37 | fileName string 38 | maxFileSize int64 39 | level byte 40 | } 41 | 42 | func addPrefix(infotype int, msg string, a ...interface{}) string { 43 | var m string 44 | switch infotype { 45 | case logInfoTypeError: 46 | m = " [E] " + msg 47 | case logInfoTypeWarning: 48 | m = " [W] " + msg 49 | case logInfoTypeTrace: 50 | m = " [T] " + msg 51 | default: 52 | m = " [I] " + msg 53 | } 54 | return fmt.Sprintf(m, a...) 55 | } 56 | 57 | func intToStrWithZero(Num, count int) string { 58 | const zeros string = "000000000000000000000000000000" 59 | str := strconv.Itoa(Num) 60 | return zeros[1:count-len(str)+1] + str 61 | } 62 | 63 | func (logger *Logger) startRotation() { 64 | for { 65 | //time.Sleep(100) 66 | data, ok := <-logger.chanal 67 | if !ok || data.command == logComClose { 68 | break 69 | } 70 | logger.saveMessage(data.message) 71 | } 72 | logger.isOuted <- struct{}{} 73 | } 74 | 75 | func (logger *Logger) saveHeader() { 76 | writeStrToFile(logger.file, "") 77 | writeStrToFile(logger.file, "----- Started -----") 78 | writeStrToFile(logger.file, fmt.Sprintf("NumCPU:[%d] OS:[%s] Arch:[%s]", runtime.NumCPU(), runtime.GOOS, runtime.GOARCH)) 79 | writeStrToFile(logger.file, "") 80 | writeStrToFile(logger.file, "-------------------") 81 | } 82 | 83 | func writeStrToFile(file *os.File, message string) error { 84 | str := time.Now().Format("2006-01-02 15:04:05.000") + message + "\n" 85 | buffer := []byte(str) 86 | if _, err := file.Write(buffer); err != nil { 87 | return err 88 | } 89 | return nil 90 | } 91 | 92 | func (logger *Logger) saveMessage(msg string) error { 93 | pos, err := logger.file.Seek(0, 2) 94 | if err != nil { 95 | return err 96 | } 97 | if pos > logger.maxFileSize { 98 | logger.file.Close() 99 | ext := path.Ext(logger.fileName) 100 | bakfile := logger.fileName[0:len(logger.fileName)-len(ext)] + ".bak" 101 | if err = os.Remove(bakfile); err == nil || os.IsNotExist(err) { 102 | os.Rename(logger.fileName, bakfile) 103 | } 104 | logger.file, err = os.OpenFile(logger.fileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) 105 | if err != nil { 106 | atomic.StoreInt32(&logger.canwrite, 0) 107 | return err 108 | } 109 | logger.saveHeader() 110 | } 111 | 112 | if err = writeStrToFile(logger.file, msg); err != nil { 113 | atomic.StoreInt32(&logger.canwrite, 0) 114 | return err 115 | } 116 | return nil 117 | } 118 | 119 | //CreateLog is created new logging system 120 | func CreateLog(fileName string, maxFileSize int64, level byte) (logger *Logger, err error) { 121 | logger = new(Logger) 122 | logger.fileName = fileName 123 | logger.maxFileSize = maxFileSize 124 | logger.level = level 125 | logger.canwrite = 0 126 | logger.chanal = make(chan logMessage, 1000) 127 | logger.isOuted = make(chan struct{}) 128 | err = nil 129 | logger.file, err = os.OpenFile(logger.fileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) 130 | if err != nil { 131 | return nil, err 132 | } 133 | atomic.StoreInt32(&logger.canwrite, 1) 134 | logger.saveHeader() 135 | go logger.startRotation() 136 | return 137 | } 138 | 139 | func (logger *Logger) infoout(infotype, id int, msg string, a ...interface{}) { 140 | m := addPrefix(infotype, msg, a...) 141 | if logger == nil { 142 | log.Println(m) 143 | return 144 | } 145 | if atomic.AddInt32(&logger.canwrite, 0) == 0 { 146 | return 147 | } 148 | logger.chanal <- logMessage{ 149 | command: logComMessage, 150 | message: m, 151 | } 152 | } 153 | 154 | // Trace outputs the message into log with Trace level 155 | func (logger *Logger) Trace(msg string, a ...interface{}) { 156 | logger.infoout(logInfoTypeTrace, 0, msg, a...) 157 | } 158 | 159 | // Warning outputs the message into log with Warning level 160 | func (logger *Logger) Warning(msg string, a ...interface{}) { 161 | logger.infoout(logInfoTypeWarning, 0, msg, a...) 162 | } 163 | 164 | // Error outputs the message into log with Warning level 165 | func (logger *Logger) Error(msg string, a ...interface{}) { 166 | logger.infoout(logInfoTypeError, 0, msg, a...) 167 | } 168 | 169 | // Info outputs the message into log with Warning level 170 | func (logger *Logger) Info(msg string, a ...interface{}) { 171 | logger.infoout(logInfoTypeInfo, 0, msg, a...) 172 | } 173 | 174 | // Close closes all opened handles and stop logging 175 | func (logger *Logger) Close() { 176 | 177 | logger.chanal <- logMessage{ 178 | command: logComClose, 179 | message: "", 180 | } 181 | <-logger.isOuted 182 | logger.file.Close() 183 | 184 | } 185 | -------------------------------------------------------------------------------- /queue/internal/mmap/mmap.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // This file defines the common package interface and contains a little bit of 6 | // factored out logic. 7 | 8 | // Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface, 9 | // but doesn't go out of its way to abstract away every little platform detail. 10 | // This specifically means: 11 | // * forked processes may or may not inherit mappings 12 | // * a file's timestamp may or may not be updated by writes through mappings 13 | // * specifying a size larger than the file's actual size can increase the file's size 14 | // * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms 15 | package mmap 16 | 17 | import ( 18 | "errors" 19 | "os" 20 | "reflect" 21 | "unsafe" 22 | ) 23 | 24 | const ( 25 | // RDONLY maps the memory read-only. 26 | // Attempts to write to the MMap object will result in undefined behavior. 27 | RDONLY = 0 28 | // RDWR maps the memory as read-write. Writes to the MMap object will update the 29 | // underlying file. 30 | RDWR = 1 << iota 31 | // COPY maps the memory as copy-on-write. Writes to the MMap object will affect 32 | // memory, but the underlying file will remain unchanged. 33 | COPY 34 | // EXEC if is set, the mapped memory is marked as executable. 35 | EXEC 36 | ) 37 | 38 | const ( 39 | // ANON If the flag is set, the mapped memory will not be backed by a file. 40 | ANON = 1 << iota 41 | ) 42 | 43 | // MMap represents a file mapped into memory. 44 | type MMap []byte 45 | 46 | // Map maps an entire file into memory. 47 | // If ANON is set in flags, f is ignored. 48 | func Map(f *os.File, prot, flags int) (MMap, error) { 49 | return MapRegion(f, -1, prot, flags, 0) 50 | } 51 | 52 | // MapRegion maps part of a file into memory. 53 | // The offset parameter must be a multiple of the system's page size. 54 | // If length < 0, the entire file will be mapped. 55 | // If ANON is set in flags, f is ignored. 56 | func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) { 57 | var fd uintptr 58 | if flags&ANON == 0 { 59 | fd = uintptr(f.Fd()) 60 | if length < 0 { 61 | fi, err := f.Stat() 62 | if err != nil { 63 | return nil, err 64 | } 65 | length = int(fi.Size()) 66 | } 67 | } else { 68 | if length <= 0 { 69 | return nil, errors.New("anonymous mapping requires non-zero length") 70 | } 71 | fd = ^uintptr(0) 72 | } 73 | return mmap(length, uintptr(prot), uintptr(flags), fd, offset) 74 | } 75 | 76 | func (m *MMap) header() *reflect.SliceHeader { 77 | return (*reflect.SliceHeader)(unsafe.Pointer(m)) 78 | } 79 | 80 | // Lock keeps the mapped region in physical memory, ensuring that it will not be 81 | // swapped out. 82 | func (m MMap) Lock() error { 83 | dh := m.header() 84 | return lock(dh.Data, uintptr(dh.Len)) 85 | } 86 | 87 | // Unlock reverses the effect of Lock, allowing the mapped region to potentially 88 | // be swapped out. 89 | // If m is already unlocked, aan error will result. 90 | func (m MMap) Unlock() error { 91 | dh := m.header() 92 | return unlock(dh.Data, uintptr(dh.Len)) 93 | } 94 | 95 | // Flush synchronizes the mapping's contents to the file's contents on disk. 96 | func (m MMap) Flush() error { 97 | dh := m.header() 98 | return flush(dh.Data, uintptr(dh.Len)) 99 | } 100 | 101 | // Unmap deletes the memory mapped region, flushes any remaining changes, and sets 102 | // m to nil. 103 | // Trying to read or write any remaining references to m after Unmap is called will 104 | // result in undefined behavior. 105 | // Unmap should only be called on the slice value that was originally returned from 106 | // a call to Map. Calling Unmap on a derived slice may cause errors. 107 | func (m *MMap) Unmap() error { 108 | dh := m.header() 109 | err := unmap(dh.Data, uintptr(dh.Len)) 110 | *m = nil 111 | return err 112 | } 113 | -------------------------------------------------------------------------------- /queue/internal/mmap/mmap_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // These tests are adapted from gommap: http://labix.org/gommap 6 | // Copyright (c) 2010, Gustavo Niemeyer 7 | 8 | package mmap 9 | 10 | import ( 11 | "bytes" 12 | "io/ioutil" 13 | "os" 14 | "path/filepath" 15 | "testing" 16 | ) 17 | 18 | var testData = []byte("0123456789ABCDEF") 19 | var testPath = filepath.Join(os.TempDir(), "testdata") 20 | 21 | func init() { 22 | f := openFile(os.O_RDWR | os.O_CREATE | os.O_TRUNC) 23 | f.Write(testData) 24 | f.Close() 25 | } 26 | 27 | func openFile(flags int) *os.File { 28 | f, err := os.OpenFile(testPath, flags, 0644) 29 | if err != nil { 30 | panic(err.Error()) 31 | } 32 | return f 33 | } 34 | 35 | func TestUnmap(t *testing.T) { 36 | f := openFile(os.O_RDONLY) 37 | defer f.Close() 38 | mmap, err := Map(f, RDONLY, 0) 39 | if err != nil { 40 | t.Errorf("error mapping: %s", err) 41 | } 42 | if err := mmap.Unmap(); err != nil { 43 | t.Errorf("error unmapping: %s", err) 44 | } 45 | } 46 | 47 | func TestReadWrite(t *testing.T) { 48 | f := openFile(os.O_RDWR) 49 | defer f.Close() 50 | mmap, err := Map(f, RDWR, 0) 51 | if err != nil { 52 | t.Errorf("error mapping: %s", err) 53 | } 54 | defer mmap.Unmap() 55 | if !bytes.Equal(testData, mmap) { 56 | t.Errorf("mmap != testData: %q, %q", mmap, testData) 57 | } 58 | 59 | mmap[9] = 'X' 60 | mmap.Flush() 61 | 62 | fileData, err := ioutil.ReadAll(f) 63 | if err != nil { 64 | t.Errorf("error reading file: %s", err) 65 | } 66 | if !bytes.Equal(fileData, []byte("012345678XABCDEF")) { 67 | t.Errorf("file wasn't modified") 68 | } 69 | 70 | // leave things how we found them 71 | mmap[9] = '9' 72 | mmap.Flush() 73 | } 74 | 75 | func TestProtFlagsAndErr(t *testing.T) { 76 | f := openFile(os.O_RDONLY) 77 | defer f.Close() 78 | if _, err := Map(f, RDWR, 0); err == nil { 79 | t.Errorf("expected error") 80 | } 81 | } 82 | 83 | func TestFlags(t *testing.T) { 84 | f := openFile(os.O_RDWR) 85 | defer f.Close() 86 | mmap, err := Map(f, COPY, 0) 87 | if err != nil { 88 | t.Errorf("error mapping: %s", err) 89 | } 90 | defer mmap.Unmap() 91 | 92 | mmap[9] = 'X' 93 | mmap.Flush() 94 | 95 | fileData, err := ioutil.ReadAll(f) 96 | if err != nil { 97 | t.Errorf("error reading file: %s", err) 98 | } 99 | if !bytes.Equal(fileData, testData) { 100 | t.Errorf("file was modified") 101 | } 102 | } 103 | 104 | // Test that we can map files from non-0 offsets 105 | // The page size on most Unixes is 4KB, but on Windows it's 64KB 106 | func TestNonZeroOffset(t *testing.T) { 107 | const pageSize = 65536 108 | 109 | // Create a 2-page sized file 110 | bigFilePath := filepath.Join(os.TempDir(), "nonzero") 111 | fileobj, err := os.OpenFile(bigFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) 112 | if err != nil { 113 | panic(err.Error()) 114 | } 115 | 116 | bigData := make([]byte, 2*pageSize, 2*pageSize) 117 | fileobj.Write(bigData) 118 | fileobj.Close() 119 | 120 | // Map the first page by itself 121 | fileobj, err = os.OpenFile(bigFilePath, os.O_RDONLY, 0) 122 | if err != nil { 123 | panic(err.Error()) 124 | } 125 | m, err := MapRegion(fileobj, pageSize, RDONLY, 0, 0) 126 | if err != nil { 127 | t.Errorf("error mapping file: %s", err) 128 | } 129 | m.Unmap() 130 | fileobj.Close() 131 | 132 | // Map the second page by itself 133 | fileobj, err = os.OpenFile(bigFilePath, os.O_RDONLY, 0) 134 | if err != nil { 135 | panic(err.Error()) 136 | } 137 | m, err = MapRegion(fileobj, pageSize, RDONLY, 0, pageSize) 138 | if err != nil { 139 | t.Errorf("error mapping file: %s", err) 140 | } 141 | m.Unmap() 142 | fileobj.Close() 143 | } 144 | -------------------------------------------------------------------------------- /queue/internal/mmap/mmap_unix.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build darwin dragonfly freebsd linux openbsd solaris netbsd 6 | 7 | package mmap 8 | 9 | import ( 10 | "syscall" 11 | ) 12 | 13 | func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) { 14 | flags := syscall.MAP_SHARED 15 | prot := syscall.PROT_READ 16 | switch { 17 | case inprot© != 0: 18 | prot |= syscall.PROT_WRITE 19 | flags = syscall.MAP_PRIVATE 20 | case inprot&RDWR != 0: 21 | prot |= syscall.PROT_WRITE 22 | } 23 | if inprot&EXEC != 0 { 24 | prot |= syscall.PROT_EXEC 25 | } 26 | if inflags&ANON != 0 { 27 | flags |= syscall.MAP_ANON 28 | } 29 | 30 | b, err := syscall.Mmap(int(fd), off, len, prot, flags) 31 | if err != nil { 32 | return nil, err 33 | } 34 | return b, nil 35 | } 36 | 37 | func flush(addr, len uintptr) error { 38 | _, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC) 39 | if errno != 0 { 40 | return syscall.Errno(errno) 41 | } 42 | return nil 43 | } 44 | 45 | func lock(addr, len uintptr) error { 46 | _, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0) 47 | if errno != 0 { 48 | return syscall.Errno(errno) 49 | } 50 | return nil 51 | } 52 | 53 | func unlock(addr, len uintptr) error { 54 | _, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0) 55 | if errno != 0 { 56 | return syscall.Errno(errno) 57 | } 58 | return nil 59 | } 60 | 61 | func unmap(addr, len uintptr) error { 62 | _, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0) 63 | if errno != 0 { 64 | return syscall.Errno(errno) 65 | } 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /queue/internal/mmap/mmap_windows.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package mmap 6 | 7 | import ( 8 | "errors" 9 | "os" 10 | "sync" 11 | "syscall" 12 | ) 13 | 14 | // mmap on Windows is a two-step process. 15 | // First, we call CreateFileMapping to get a handle. 16 | // Then, we call MapviewToFile to get an actual pointer into memory. 17 | // Because we want to emulate a POSIX-style mmap, we don't want to expose 18 | // the handle -- only the pointer. We also want to return only a byte slice, 19 | // not a struct, so it's convenient to manipulate. 20 | 21 | // We keep this map so that we can get back the original handle from the memory address. 22 | 23 | type addrinfo struct { 24 | file syscall.Handle 25 | mapview syscall.Handle 26 | } 27 | 28 | var handleLock sync.Mutex 29 | var handleMap = map[uintptr]*addrinfo{} 30 | 31 | func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { 32 | flProtect := uint32(syscall.PAGE_READONLY) 33 | dwDesiredAccess := uint32(syscall.FILE_MAP_READ) 34 | switch { 35 | case prot© != 0: 36 | flProtect = syscall.PAGE_WRITECOPY 37 | dwDesiredAccess = syscall.FILE_MAP_COPY 38 | case prot&RDWR != 0: 39 | flProtect = syscall.PAGE_READWRITE 40 | dwDesiredAccess = syscall.FILE_MAP_WRITE 41 | } 42 | if prot&EXEC != 0 { 43 | flProtect <<= 4 44 | dwDesiredAccess |= syscall.FILE_MAP_EXECUTE 45 | } 46 | 47 | // The maximum size is the area of the file, starting from 0, 48 | // that we wish to allow to be mappable. It is the sum of 49 | // the length the user requested, plus the offset where that length 50 | // is starting from. This does not map the data into memory. 51 | maxSizeHigh := uint32((off + int64(len)) >> 32) 52 | maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF) 53 | // TODO: Do we need to set some security attributes? It might help portability. 54 | h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil) 55 | if h == 0 { 56 | return nil, os.NewSyscallError("CreateFileMapping", errno) 57 | } 58 | 59 | // Actually map a view of the data into memory. The view's size 60 | // is the length the user requested. 61 | fileOffsetHigh := uint32(off >> 32) 62 | fileOffsetLow := uint32(off & 0xFFFFFFFF) 63 | addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len)) 64 | if addr == 0 { 65 | return nil, os.NewSyscallError("MapViewOfFile", errno) 66 | } 67 | handleLock.Lock() 68 | handleMap[addr] = &addrinfo{ 69 | file: syscall.Handle(hfile), 70 | mapview: h, 71 | } 72 | handleLock.Unlock() 73 | 74 | m := MMap{} 75 | dh := m.header() 76 | dh.Data = addr 77 | dh.Len = len 78 | dh.Cap = dh.Len 79 | 80 | return m, nil 81 | } 82 | 83 | func flush(addr, len uintptr) error { 84 | errno := syscall.FlushViewOfFile(addr, len) 85 | if errno != nil { 86 | return os.NewSyscallError("FlushViewOfFile", errno) 87 | } 88 | 89 | handleLock.Lock() 90 | defer handleLock.Unlock() 91 | handle, ok := handleMap[addr] 92 | if !ok { 93 | // should be impossible; we would've errored above 94 | return errors.New("unknown base address") 95 | } 96 | 97 | errno = syscall.FlushFileBuffers(handle.file) 98 | return os.NewSyscallError("FlushFileBuffers", errno) 99 | } 100 | 101 | func lock(addr, len uintptr) error { 102 | errno := syscall.VirtualLock(addr, len) 103 | return os.NewSyscallError("VirtualLock", errno) 104 | } 105 | 106 | func unlock(addr, len uintptr) error { 107 | errno := syscall.VirtualUnlock(addr, len) 108 | return os.NewSyscallError("VirtualUnlock", errno) 109 | } 110 | 111 | func unmap(addr, len uintptr) (err error) { 112 | err = flush(addr, len) 113 | if err != nil { 114 | return err 115 | } 116 | // Lock the UnmapViewOfFile along with the handleMap deletion. 117 | // As soon as we unmap the view, the OS is free to give the 118 | // same addr to another new map. We don't want another goroutine 119 | // to insert and remove the same addr into handleMap while 120 | // we're trying to remove our old addr/handle pair. 121 | handleLock.Lock() 122 | defer handleLock.Unlock() 123 | err = syscall.UnmapViewOfFile(addr) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | handle, ok := handleMap[addr] 129 | if !ok { 130 | // should be impossible; we would've errored above 131 | return errors.New("unknown base address") 132 | } 133 | delete(handleMap, addr) 134 | 135 | e := syscall.CloseHandle(syscall.Handle(handle.mapview)) 136 | return os.NewSyscallError("CloseHandle", e) 137 | } 138 | -------------------------------------------------------------------------------- /queue/internal/mmap/msync_netbsd.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package mmap 6 | 7 | const _SYS_MSYNC = 277 8 | const _MS_SYNC = 0x04 9 | -------------------------------------------------------------------------------- /queue/internal/mmap/msync_unix.go: -------------------------------------------------------------------------------- 1 | // Copyright 2011 Evan Shaw. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | // +build darwin dragonfly freebsd linux openbsd solaris 6 | 7 | package mmap 8 | 9 | import ( 10 | "syscall" 11 | ) 12 | 13 | const _SYS_MSYNC = syscall.SYS_MSYNC 14 | const _MS_SYNC = syscall.MS_SYNC 15 | -------------------------------------------------------------------------------- /queue/queue.go: -------------------------------------------------------------------------------- 1 | //Package queue contain methods and structures for managing of the message queue 2 | package queue 3 | 4 | //TODO: for empty list skip size of theindex file 5 | 6 | import ( 7 | "encoding/binary" 8 | "io" 9 | "runtime" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | // QueueItem is elementh of the queue 15 | type QueueItem struct { // nolint 16 | idx StorageIdx 17 | ID StorageIdx 18 | Stream io.ReadSeeker 19 | storage storageProcessing 20 | } 21 | 22 | //Queue is a base structure for managing of the messages 23 | type Queue struct { 24 | name string 25 | options *Options 26 | workers chan Worker 27 | tmpworkers chan Worker 28 | log Logging 29 | newMessage chan struct{} 30 | stopEvent chan struct{} 31 | stopedHandle chan struct{} 32 | storage *fileStorage 33 | memory *queueMemory 34 | factory WorkerFactory 35 | inProcess *inProcessingPerWorker 36 | total int32 37 | totalWorkers int32 38 | lastTimeGC time.Duration 39 | } 40 | 41 | type newMessageNotificator interface { 42 | newMessageNotification() 43 | } 44 | 45 | //CreateQueue is function than creates and inits internal states : 46 | func CreateQueue(Name, StoragePath string, Log Logging, Factory WorkerFactory, Options *Options) (*Queue, error) { 47 | if Factory == nil { 48 | Factory = &nullWorkerFactory{} 49 | } 50 | if Options == nil { 51 | Options = &DefaultQueueOptions 52 | } 53 | if Log == nil { 54 | z := nullLog(0) 55 | Log = z 56 | } 57 | Log.Info("[Q:%s] is created...", Name) 58 | 59 | tmp := &Queue{ 60 | total: 0, 61 | workers: make(chan Worker, Options.MaximumWorkersCount), 62 | stopedHandle: make(chan struct{}), 63 | newMessage: make(chan struct{}, 1), 64 | log: Log, 65 | options: Options, 66 | factory: Factory, 67 | name: Name, 68 | stopEvent: make(chan struct{}), 69 | lastTimeGC: time.Since(startTime), 70 | } 71 | 72 | fs, err := createStorage(Name, StoragePath, Log, Options.StorageOptions, Options.InputTimeOut, tmp) 73 | if err != nil { 74 | Log.Error("[Q:%s] cannot store storage...", Name) 75 | return nil, err 76 | } 77 | 78 | tmp.storage = fs 79 | tmp.memory = createMemoryQueue(Name, Options.MaximumMessagesInQueue, Options.MaximumQueueMessagesSize, 80 | fs, Log, Options.InputTimeOut, tmp) 81 | 82 | if Factory.NeedTimeoutProcessing() { 83 | tmp.tmpworkers = make(chan Worker, Options.MaximumWorkersCount) 84 | } 85 | 86 | if Factory.CanCreateWorkers() { 87 | for i := uint16(0); i < Options.MinimunWorkersCount; i++ { 88 | newWorker, err := Factory.CreateWorker() 89 | if err != nil { 90 | tmp.log.Trace("[Q:%s] New worker was not created. Error:%s", tmp.name, err.Error()) 91 | } else { 92 | tmp.workers <- newWorker 93 | tmp.log.Trace("[Q:%s] [W:%d] New worker was created.", tmp.name, newWorker.GetID()) 94 | } 95 | } 96 | tmp.totalWorkers = int32(len(tmp.workers)) 97 | } else { 98 | tmp.totalWorkers = 0 99 | } 100 | tmp.inProcess = createInProcessing(tmp, Options.MinimunWorkersCount, Options.MaximumMessagesPerWorker) 101 | 102 | Log.Trace("[Q:%s] Initial count of the workers is %d", Name, len(tmp.workers)) 103 | Log.Info("[Q:%s] Was created successful", Name) 104 | 105 | if tmp.storage.Count() != 0 { 106 | Log.Trace("[Q:%s] Found unprocessed messages in storage...", Name) 107 | } 108 | Log.Info("[Q:%s] Start main loop", Name) 109 | go tmp.loop() 110 | return tmp, nil 111 | } 112 | 113 | func (q *Queue) newMessageNotification() { 114 | select { 115 | case q.newMessage <- struct{}{}: 116 | default: 117 | } 118 | } 119 | 120 | func (q *Queue) getOneItemFromStorage() (*QueueItem, error) { 121 | MemData, err := q.memory.Get() 122 | if err == nil { 123 | stream, err := bufToStream(MemData.buf) 124 | if err != nil { 125 | return nil, err 126 | } 127 | fw := &QueueItem{ 128 | idx: MemData.idx, 129 | ID: MemData.idx, 130 | Stream: stream, 131 | storage: q.memory, 132 | } 133 | return fw, nil 134 | } 135 | return q.storage.Get() 136 | } 137 | 138 | // Process must be called from the worker of the message. In depends 139 | // of the `isOk` parameter either messages are deleting from the queue 140 | // or are marking as faulty and again processing after some timeout 141 | func (q *Queue) process(worker WorkerID, isOk bool) { 142 | q.log.Trace("[Q:%s] Receiver answer from worker (%d) [%v]", q.name, worker, isOk) 143 | q.inProcess.processList(worker, isOk) 144 | } 145 | 146 | func (q *Queue) dropWorker(worker WorkerID) { 147 | q.log.Trace("[Q:%s] [W:%d] Dropping worker", q.name, worker) 148 | q.inProcess.processList(worker, false) 149 | atomic.AddInt32(&q.totalWorkers, -1) 150 | } 151 | 152 | //Count returns the count of the messages in the queue 153 | func (q *Queue) Count() uint64 { 154 | return q.storage.Count() + q.memory.Count() 155 | } 156 | 157 | // Insert appends the message into the queue. In depends of the timeout's option either is trying 158 | // to write message to the disk or is trying to process this message in the memory and writing to the 159 | // disk only if timeout is expired shortly. Returns false if aren't processing / writing of the message 160 | // in the during of the timeout or has some problems with writing to disk 161 | func (q *Queue) Insert(buf []byte) bool { 162 | if q.options.InputTimeOut == 0 { 163 | return q.insert(buf, nil) 164 | } 165 | var timeoutch <-chan time.Time 166 | ch := make(chan bool, 1) 167 | go q.insert(buf, ch) 168 | timeoutch = time.NewTimer(q.options.InputTimeOut << 1).C 169 | for { 170 | select { 171 | case answer := <-ch: 172 | return answer 173 | case <-timeoutch: 174 | return false 175 | } 176 | } 177 | } 178 | 179 | // InsertFile appends file to queue. After processing content of the file if result of the execution of the worker is 180 | // successful file will deleted. 181 | func (q *Queue) InsertFile(fileName string) bool { 182 | var prefix [8]byte 183 | binary.LittleEndian.PutUint64(prefix[:], magicNumberIsFile) 184 | buf := []byte(string(prefix[:]) + fileName) 185 | if q.options.InputTimeOut == 0 { 186 | return q.insert(buf, nil) 187 | } 188 | var timeoutch <-chan time.Time 189 | ch := make(chan bool, 1) 190 | go q.insert(buf, ch) 191 | timeoutch = time.NewTimer(q.options.InputTimeOut << 1).C 192 | for { 193 | select { 194 | case answer := <-ch: 195 | return answer 196 | case <-timeoutch: 197 | return false 198 | } 199 | } 200 | } 201 | 202 | func (q *Queue) insert(buf []byte, ch chan bool) bool { 203 | if ch == nil { 204 | ID, err := q.storage.Put(buf) 205 | if err == nil { 206 | q.log.Trace("[Q:%s:%d] Stored to file storage", q.name, ID) 207 | q.newMessageNotification() 208 | } else { 209 | q.log.Error("[Q:%s:%d] Storing to storage with error result [%s] ", q.name, ID, err.Error()) 210 | } 211 | return err == nil 212 | } 213 | if q.storage.Count() == 0 { 214 | if ID, err := q.memory.Put(buf, ch); err == nil { 215 | q.log.Trace("[Q:%s:%d] Stored to memory storage", q.name, ID) 216 | q.newMessageNotification() 217 | return true 218 | } 219 | } 220 | 221 | ID, err := q.storage.Put(buf) 222 | if err == nil { 223 | q.log.Trace("[Q:%s:%d] Stored to file storage", q.name, ID) 224 | q.newMessageNotification() 225 | } else { 226 | q.log.Error("[Q:%s:%d] Storing to storage with error result [%s] ", q.name, ID, err.Error()) 227 | } 228 | ch <- err == nil 229 | return err == nil 230 | } 231 | 232 | func (q *Queue) errorProcessing() { 233 | } 234 | 235 | func (q *Queue) timeoutProcess() { 236 | 237 | Timeout := time.NewTimer(30 * time.Second).C 238 | // wait until all Workers finished its work 239 | forloop: 240 | for { 241 | select { 242 | case <-Timeout: 243 | break forloop 244 | default: 245 | // If all Workers is in chanel then break this loop 246 | if len(q.workers) == int(q.totalWorkers) { 247 | break forloop 248 | } 249 | runtime.Gosched() 250 | } 251 | } 252 | 253 | if q.tmpworkers == nil { 254 | return 255 | } 256 | //for each worker we check on unfinished work and if found we send to this 257 | // Worker request to timeout processing 258 | // We using temporary chanel becase me must detect processing of the all Workers 259 | for len(q.workers) > 0 { 260 | worker := <-q.workers 261 | if q.inProcess.messagesInProcess(worker.GetID()) > 0 { 262 | go func() { 263 | worker := worker 264 | r := worker.ProcessTimeout() 265 | switch r { 266 | case ProcessedSuccessful: 267 | q.process(worker.GetID(), true) 268 | case ProcessedWithError: 269 | q.process(worker.GetID(), false) 270 | case ProcessedKillWorker: 271 | q.dropWorker(worker.GetID()) 272 | worker.Close() 273 | return 274 | default: 275 | } 276 | q.tmpworkers <- worker 277 | q.log.Trace("[Q:%s] [W:%d] Worker ready for work", q.name, worker.GetID()) 278 | }() 279 | } else { 280 | q.tmpworkers <- worker 281 | } 282 | } 283 | 284 | Timeout = time.NewTimer(30 * time.Second).C 285 | forloop2: 286 | for { 287 | select { 288 | case <-Timeout: 289 | break forloop2 290 | default: 291 | if len(q.tmpworkers) == int(q.totalWorkers) { 292 | break forloop2 293 | } 294 | runtime.Gosched() 295 | } 296 | } 297 | // Now all Warkers is processed and don't have any unprocessed messages 298 | // Because timeout and not present messages in the storage we decrement count of the Workers 299 | for len(q.tmpworkers) > int(q.options.MinimunWorkersCount) { 300 | worker := <-q.tmpworkers 301 | q.inProcess.delete(worker.GetID()) 302 | q.totalWorkers-- 303 | } 304 | q.workers, q.tmpworkers = q.tmpworkers, q.workers 305 | 306 | } 307 | 308 | func (q *Queue) loop() { 309 | var to <-chan time.Time 310 | MaxWorkers := q.options.MaximumWorkersCount 311 | AvailableWorker := q.workers 312 | Timer := time.NewTimer(time.Millisecond * 10000) 313 | AC := 0 314 | MC := 1 315 | gofor: 316 | for { 317 | select { 318 | case <-q.stopEvent: 319 | break gofor 320 | case <-q.newMessage: 321 | if AvailableWorker == nil { 322 | // q.log.Trace("[Q:%s]New message was received or timeout expired. Start reading messages", q.name) 323 | AvailableWorker = q.workers 324 | } 325 | if q.factory.CanCreateWorkers() && len(q.workers) == 0 && uint16(q.totalWorkers) < MaxWorkers { 326 | tmp, err := q.factory.CreateWorker() 327 | if err == nil { 328 | q.workers <- tmp 329 | q.totalWorkers++ 330 | q.log.Trace("[Q:%s] [W:%d] New worker was created Current count is %d ", q.name, tmp.GetID(), q.totalWorkers) 331 | } else { 332 | q.log.Error("[Q:%s] New worker was created with error %s ", q.name, err.Error()) 333 | } 334 | } 335 | to = nil 336 | case worker := <-AvailableWorker: 337 | inProcessItem := q.inProcess.addToList(worker.GetID()) 338 | if inProcessItem == nil { 339 | go func() { 340 | worker := worker 341 | r := worker.ProcessTimeout() 342 | switch r { 343 | case ProcessedSuccessful: 344 | q.process(worker.GetID(), true) 345 | case ProcessedWithError: 346 | q.process(worker.GetID(), false) 347 | case ProcessedKillWorker: 348 | q.dropWorker(worker.GetID()) 349 | worker.Close() 350 | return 351 | default: 352 | } 353 | q.workers <- worker 354 | q.log.Trace("[Q:%s] [W:%d] Worker ready for work", q.name, worker.GetID()) 355 | }() 356 | continue 357 | } 358 | item, err := q.getOneItemFromStorage() 359 | if err == nil { 360 | inProcessItem[0] = item 361 | q.log.Trace("[Q:%s] [W:%d] [M:%d] Loaded from %s and sent to worker", q.name, worker.GetID(), item.ID, item.storage.description()) 362 | go func() { 363 | worker := worker 364 | r := worker.ProcessMessage(item) 365 | switch r { 366 | case ProcessedSuccessful: 367 | q.process(worker.GetID(), true) 368 | case ProcessedWithError: 369 | q.process(worker.GetID(), false) 370 | case ProcessedKillWorker: 371 | q.dropWorker(worker.GetID()) 372 | worker.Close() 373 | return 374 | default: 375 | } 376 | q.workers <- worker 377 | q.log.Trace("[Q:%s] [W:%d] Worker ready for work", q.name, worker.GetID()) 378 | }() 379 | MC = 1 380 | continue 381 | } 382 | q.inProcess.decrementList(worker.GetID()) 383 | AvailableWorker = nil 384 | q.workers <- worker 385 | myerr, ok := err.(*queueError) 386 | timer := time.Millisecond * 10000 387 | if ok { 388 | switch myerr.ErrorType { 389 | case errorInDelay: 390 | if myerr.NextAvailable < timer { 391 | timer = myerr.NextAvailable 392 | } 393 | q.log.Trace("[Q:%s] Next messages will available in %s", q.name, myerr.NextAvailable.String()) 394 | case errorNoMore: 395 | // q.log.Trace("[Q:%s] No mo available messages", q.name) 396 | } 397 | } else { 398 | q.log.Trace("[Q:%s] Received answer from storage %s", q.name, err.Error()) 399 | q.errorProcessing() 400 | } 401 | if q.tmpworkers != nil { 402 | q.timeoutProcess() 403 | } 404 | if !Timer.Stop() { 405 | select { 406 | case <-Timer.C: 407 | default: 408 | } 409 | } 410 | Timer.Reset(timer) 411 | to = Timer.C //time.After(timer) 412 | case <-to: 413 | if AvailableWorker == nil { 414 | AvailableWorker = q.workers 415 | AC++ 416 | if AC >= MC { 417 | q.log.Trace("[Q:%s] Idle ", q.name) 418 | if MC == 1 { 419 | MC += 2 420 | go q.storage.garbageCollect() 421 | q.log.Trace("[Q:%s] GC was started", q.name) 422 | } else { 423 | MC += 3 424 | } 425 | AC = 0 426 | } 427 | } 428 | } 429 | } 430 | q.timeoutProcess() 431 | q.stopedHandle <- struct{}{} 432 | } 433 | 434 | func (q *Queue) close() { 435 | q.stopEvent <- struct{}{} 436 | <-q.stopedHandle 437 | close(q.workers) 438 | for w := range q.workers { 439 | w.Close() 440 | } 441 | q.factory.Close() 442 | q.memory.Close() 443 | q.storage.Close() 444 | 445 | } 446 | 447 | func (q *Queue) info() { 448 | q.storage.info() 449 | } 450 | 451 | // Close stops the handler of the messages, saves the messages located in 452 | // the memory into the disk, closes all opened files. 453 | func (q *Queue) Close() { 454 | q.log.Info("[Q:%s] is closed...", q.name) 455 | q.close() 456 | q.log.Info("[Q:%s] was closed...", q.name) 457 | } 458 | -------------------------------------------------------------------------------- /queue/queue_error.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | const ( 9 | errorNoMore = iota 10 | errorInDelay 11 | errorOverSize 12 | errorOverCount 13 | errorTimeOut 14 | ) 15 | 16 | type queueError struct { 17 | ErrorType int32 18 | NextAvailable time.Duration 19 | } 20 | 21 | func (e *queueError) Error() string { 22 | return fmt.Sprintf("queue internal error #%d (%v)", e.ErrorType, e.NextAvailable) 23 | } 24 | -------------------------------------------------------------------------------- /queue/queue_inprocessing.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | ) 7 | 8 | type storageProcessing interface { 9 | FreeRecord(idx StorageIdx) error 10 | UnlockRecord(idx StorageIdx) error 11 | description() string 12 | } 13 | 14 | type inProcessingList struct { 15 | list []*QueueItem 16 | cnt uint16 17 | } 18 | 19 | type inProcessingPerWorker struct { 20 | sync.RWMutex 21 | maxPerWorker uint16 22 | workers map[WorkerID]*inProcessingList 23 | q *Queue 24 | } 25 | 26 | func createInProcessing(q *Queue, workerCount, maxPerWorker uint16) *inProcessingPerWorker { 27 | return &inProcessingPerWorker{ 28 | maxPerWorker: maxPerWorker, 29 | workers: make(map[WorkerID]*inProcessingList, workerCount), 30 | q: q, 31 | } 32 | } 33 | 34 | func (ipw *inProcessingPerWorker) addToList(worker WorkerID) []*QueueItem { 35 | ipw.RLock() 36 | list, ok := ipw.workers[worker] 37 | ipw.RUnlock() 38 | if !ok { 39 | list = &inProcessingList{ 40 | list: make([]*QueueItem, ipw.maxPerWorker), 41 | } 42 | ipw.Lock() 43 | ipw.workers[worker] = list 44 | ipw.Unlock() 45 | } 46 | if list.cnt == ipw.maxPerWorker { 47 | return nil 48 | } 49 | list.cnt++ 50 | return list.list[list.cnt-1 : list.cnt] 51 | } 52 | 53 | func (ipw *inProcessingPerWorker) decrementList(worker WorkerID) { 54 | ipw.RLock() 55 | list, ok := ipw.workers[worker] 56 | ipw.RUnlock() 57 | if !ok { 58 | return 59 | } 60 | list.cnt-- 61 | } 62 | 63 | func (ipw *inProcessingPerWorker) processList(worker WorkerID, isOk bool) { 64 | ipw.Lock() 65 | list, ok := ipw.workers[worker] 66 | ipw.Unlock() 67 | if !ok { 68 | ipw.q.log.Trace("[Q:%s] !!!Not records for (%d)", ipw.q.name, worker) 69 | return 70 | } 71 | if list.cnt == 0 { 72 | ipw.q.log.Trace("[Q:%s] !!!!Not records for (%d)", ipw.q.name, worker) 73 | } 74 | for i := uint16(0); i < list.cnt; i++ { 75 | file, isFile := list.list[i].Stream.(*os.File) 76 | if isFile { 77 | file.Close() 78 | } 79 | list.list[i].Stream = nil 80 | if isOk { 81 | ipw.q.log.Trace("[Q:%s:%d] Delete message from %s", ipw.q.name, list.list[i].ID, list.list[i].storage.description()) 82 | list.list[i].storage.FreeRecord(list.list[i].idx) 83 | } else { 84 | ipw.q.log.Trace("[Q:%s:%d] Mark message in %s as faulty", ipw.q.name, list.list[i].ID, list.list[i].storage.description()) 85 | list.list[i].storage.UnlockRecord(list.list[i].idx) 86 | } 87 | } 88 | list.cnt = 0 89 | } 90 | 91 | func (ipw *inProcessingPerWorker) messagesInProcess(worker WorkerID) uint16 { 92 | ipw.RLock() 93 | list, ok := ipw.workers[worker] 94 | ipw.RUnlock() 95 | if !ok { 96 | return 0 97 | } 98 | return list.cnt 99 | } 100 | 101 | func (ipw *inProcessingPerWorker) delete(worker WorkerID) { 102 | ipw.Lock() 103 | delete(ipw.workers, worker) 104 | ipw.Unlock() 105 | } 106 | -------------------------------------------------------------------------------- /queue/queue_logging.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | // Logging is the interface that must support logging system for work with queue 4 | type Logging interface { 5 | Trace(msg string, a ...interface{}) 6 | Info(msg string, a ...interface{}) 7 | Warning(msg string, a ...interface{}) 8 | Error(msg string, a ...interface{}) 9 | } 10 | 11 | type nullLog int 12 | 13 | func (logger nullLog) Trace(msg string, a ...interface{}) { 14 | } 15 | 16 | func (logger nullLog) Warning(msg string, a ...interface{}) { 17 | } 18 | 19 | func (logger nullLog) Error(msg string, a ...interface{}) { 20 | } 21 | 22 | func (logger nullLog) Info(msg string, a ...interface{}) { 23 | } 24 | -------------------------------------------------------------------------------- /queue/queue_memory.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type queueMemoryItem struct { 9 | idx StorageIdx 10 | ch chan bool 11 | buf []byte 12 | isBusy bool 13 | filestorageidx StorageIdx 14 | timeOut time.Duration 15 | } 16 | 17 | type queueMemory struct { 18 | maxCnt uint64 19 | cnt uint64 20 | sync.Mutex 21 | name string 22 | idx StorageIdx 23 | list []*queueMemoryItem 24 | putter storagePutter 25 | log Logging 26 | timeout time.Duration 27 | notify newMessageNotificator 28 | maxSize int32 29 | size int32 30 | } 31 | 32 | func createMemoryQueue(Name string, MaxCount uint16, MaxSize int32, 33 | Putter storagePutter, Log Logging, TimeOut time.Duration, Notify newMessageNotificator) *queueMemory { 34 | if Log == nil { 35 | Log = nullLog(0) 36 | } 37 | tmp := &queueMemory{ 38 | name: Name, 39 | list: make([]*queueMemoryItem, MaxCount), 40 | maxCnt: uint64(MaxCount), 41 | maxSize: MaxSize, 42 | putter: Putter, 43 | log: Log, 44 | timeout: TimeOut, 45 | } 46 | return tmp 47 | } 48 | 49 | func (mq *queueMemory) processTimeOut(curidx *int) { 50 | var ( 51 | idx StorageIdx 52 | err error 53 | ) 54 | if mq.list[*curidx].isBusy { 55 | if mq.list[*curidx].filestorageidx == InvalidIdx { 56 | idx, err = mq.putter.put(mq.list[*curidx].buf, putRecordAsInProcess) 57 | } else { 58 | return 59 | } 60 | } else { 61 | idx, err = mq.putter.put(mq.list[*curidx].buf, putRecordAsNew) 62 | } 63 | if err == nil && mq.notify != nil { 64 | mq.notify.newMessageNotification() 65 | } 66 | if err != nil { 67 | mq.log.Error("[Q:%s:%d] Moved to file storage with error result [%s] ", mq.name, mq.list[*curidx].idx, err.Error()) 68 | select { 69 | case mq.list[*curidx].ch <- false: 70 | default: 71 | } 72 | mq.size -= int32(len(mq.list[*curidx].buf)) 73 | copy(mq.list[*curidx:mq.cnt], mq.list[*curidx+1:mq.cnt]) 74 | mq.cnt-- 75 | *curidx-- 76 | return 77 | } 78 | if mq.list[*curidx].isBusy { 79 | mq.list[*curidx].filestorageidx = idx 80 | mq.log.Trace("[Q:%s:%d] copied to file storage with new idx [%d] ", mq.name, mq.list[*curidx].idx, idx) 81 | } else { 82 | select { 83 | case mq.list[*curidx].ch <- false: 84 | default: 85 | } 86 | mq.log.Trace("[Q:%s:%d] moveid to file storage with new idx [%d] ", mq.name, mq.list[*curidx].idx, idx) 87 | mq.size -= int32(len(mq.list[*curidx].buf)) 88 | copy(mq.list[*curidx:mq.cnt], mq.list[*curidx+1:mq.cnt]) 89 | mq.cnt-- 90 | *curidx-- 91 | } 92 | } 93 | 94 | func (mq *queueMemory) checkTimeOut(CurrentDuration time.Duration) { 95 | for i := 0; i < int(mq.cnt); i++ { 96 | if mq.list[i].timeOut < CurrentDuration { 97 | mq.processTimeOut(&i) 98 | } 99 | } 100 | } 101 | 102 | func (mq *queueMemory) Close() { 103 | mq.Lock() 104 | defer mq.Unlock() 105 | mq.checkTimeOut(0x7FFFFFFFFFFFFFFF) 106 | } 107 | 108 | func (mq *queueMemory) description() string { 109 | return "memory storage" 110 | } 111 | 112 | func (mq *queueMemory) FreeRecord(idx StorageIdx) error { 113 | var tmp *queueMemoryItem 114 | mq.Lock() 115 | defer mq.Unlock() 116 | CurrentDuration := time.Since(startTime) 117 | for i := 0; i < int(mq.cnt); i++ { 118 | if mq.list[i].idx == idx { 119 | tmp = mq.list[i] 120 | copy(mq.list[i:mq.cnt], mq.list[i+1:mq.cnt]) 121 | mq.cnt-- 122 | mq.size -= int32(len(tmp.buf)) 123 | i-- 124 | // send notification to waitnig timeout goroutine about processed message 125 | if tmp.filestorageidx == InvalidIdx { 126 | select { 127 | case tmp.ch <- true: 128 | default: 129 | } 130 | } else { 131 | mq.putter.FreeRecord(tmp.filestorageidx) 132 | } 133 | } else { 134 | if mq.list[i].timeOut < CurrentDuration { 135 | mq.processTimeOut(&i) 136 | } 137 | } 138 | } 139 | return nil 140 | } 141 | 142 | func (mq *queueMemory) UnlockRecord(idx StorageIdx) error { 143 | var ( 144 | err error 145 | Idx StorageIdx 146 | ) 147 | mq.Lock() 148 | defer mq.Unlock() 149 | Idx = InvalidIdx 150 | CurrentDuration := time.Since(startTime) 151 | for i := 0; i < int(mq.cnt); i++ { 152 | if mq.list[i].idx == idx { 153 | if mq.list[i].filestorageidx != InvalidIdx { 154 | err = mq.putter.UnlockRecord(mq.list[i].filestorageidx) 155 | } else { 156 | Idx, err = mq.putter.put(mq.list[i].buf, putRecordAsProcessedWithError) 157 | } 158 | if err == nil && mq.notify != nil { 159 | mq.notify.newMessageNotification() 160 | } 161 | 162 | select { 163 | case mq.list[i].ch <- err == nil: 164 | default: 165 | } 166 | if err != nil { 167 | return err 168 | } 169 | if Idx != InvalidIdx { 170 | mq.log.Trace("[Q:%s:%d] moved to file storage with new idx [%d] ", mq.name, mq.list[i].idx, Idx) 171 | } 172 | // remove from list 173 | mq.size -= int32(len(mq.list[i].buf)) 174 | copy(mq.list[i:mq.cnt], mq.list[i+1:mq.cnt]) 175 | mq.cnt-- 176 | i-- 177 | } else { 178 | if mq.list[i].timeOut < CurrentDuration { 179 | mq.processTimeOut(&i) 180 | } 181 | } 182 | } 183 | return nil 184 | } 185 | 186 | func (mq *queueMemory) Put(buf []byte, Chan chan bool) (StorageIdx, error) { 187 | mq.Lock() 188 | defer mq.Unlock() 189 | if mq.cnt == mq.maxCnt { 190 | return InvalidIdx, &queueError{ErrorType: errorOverCount} 191 | } 192 | if mq.size+int32(len(buf)) >= mq.maxSize { 193 | return InvalidIdx, &queueError{ErrorType: errorOverSize} 194 | } 195 | tmp := &queueMemoryItem{ 196 | buf: buf, 197 | ch: Chan, 198 | idx: mq.idx, 199 | filestorageidx: InvalidIdx, 200 | timeOut: time.Since(startTime) + mq.timeout, 201 | } 202 | mq.list[mq.cnt] = tmp 203 | mq.cnt++ 204 | mq.idx++ 205 | mq.size += int32(len(buf)) 206 | return tmp.idx, nil 207 | } 208 | 209 | func (mq *queueMemory) Get() (*queueMemoryItem, error) { 210 | mq.Lock() 211 | defer mq.Unlock() 212 | CurrentDuration := time.Since(startTime) 213 | for i := 0; i < int(mq.cnt); i++ { 214 | if mq.list[i].timeOut < CurrentDuration { 215 | mq.processTimeOut(&i) 216 | continue 217 | } 218 | if !mq.list[i].isBusy { 219 | mq.list[i].isBusy = true 220 | return mq.list[i], nil 221 | } 222 | } 223 | return nil, &queueError{ErrorType: errorNoMore} 224 | } 225 | 226 | func (mq *queueMemory) Count() uint64 { 227 | mq.Lock() 228 | defer mq.Unlock() 229 | return mq.cnt 230 | } 231 | 232 | func (mq *queueMemory) Size() uint64 { 233 | mq.Lock() 234 | defer mq.Unlock() 235 | return uint64(mq.size) 236 | } 237 | -------------------------------------------------------------------------------- /queue/queue_memory_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | // empty storage for checking of the memory queue 10 | type testPutter struct { 11 | ID StorageIdx 12 | Cnt int 13 | } 14 | 15 | func (tp *testPutter) put(buffer []byte, option int) (StorageIdx, error) { 16 | tp.ID++ 17 | return tp.ID - 1, nil 18 | } 19 | func (tp *testPutter) UnlockRecord(Idx StorageIdx) error { 20 | tp.Cnt++ 21 | return nil 22 | } 23 | 24 | func (tp *testPutter) FreeRecord(Idx StorageIdx) error { 25 | if Idx == 0 { 26 | tp.Cnt = 100 27 | } 28 | return nil 29 | } 30 | 31 | type testPutterError struct { 32 | } 33 | 34 | func (tp *testPutterError) put(buffer []byte, option int) (StorageIdx, error) { 35 | return InvalidIdx, errors.New("Some error") 36 | } 37 | func (tp *testPutterError) UnlockRecord(Idx StorageIdx) error { 38 | return errors.New("Some error") 39 | } 40 | 41 | func (tp *testPutterError) FreeRecord(Idx StorageIdx) error { 42 | return nil 43 | } 44 | 45 | func TestMemoryQueueClose(t *testing.T) { 46 | MessagesCount := 10 47 | putter := &testPutter{} 48 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*500, nil) 49 | if mq == nil { 50 | t.Fatal("Cannot create memory storage") 51 | } 52 | for i := 0; i < MessagesCount; i++ { 53 | mq.Put(make([]byte, 100), nil) 54 | } 55 | mq.Close() 56 | if putter.ID != StorageIdx(MessagesCount) { 57 | t.Fatalf("Putter did not receive all messages. Received %d/%d.", putter.ID, MessagesCount) 58 | } 59 | if mq.Count() != 0 { 60 | t.Fatalf("After close of the storage in present %d messages.", mq.Count()) 61 | } 62 | if mq.Size() != 0 { 63 | t.Fatalf("After close of the storage size is %d bytes.", mq.Size()) 64 | } 65 | } 66 | 67 | func TestMemoryQueueOverLengthAndOverSize(t *testing.T) { 68 | MessagesCount := 10 69 | putter := &testPutter{} 70 | mq := createMemoryQueue("", uint16(MessagesCount), 1024*1024, putter, nil, time.Millisecond*500, nil) 71 | if mq == nil { 72 | t.Fatal("Cannot create memory storage") 73 | } 74 | for i := 0; i < MessagesCount; i++ { 75 | mq.Put(make([]byte, 100), nil) 76 | } 77 | if _, err := mq.Put(make([]byte, 100), nil); err == nil { 78 | t.Fatal("Not passed overlength test") 79 | } 80 | mq.Close() 81 | for i := 0; i < 2; i++ { 82 | mq.Put(make([]byte, 510*1024), nil) 83 | } 84 | if _, err := mq.Put(make([]byte, 4097), nil); err == nil { 85 | t.Fatal("Not passed oversize test") 86 | } 87 | } 88 | 89 | func TestMemoryQueueTimeouts(t *testing.T) { 90 | // MessagesCount := 10 91 | putter := &testPutter{} 92 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*100, nil) 93 | if mq == nil { 94 | t.Fatal("Cannot create memory storage") 95 | } 96 | } 97 | 98 | func TestMemoryQueueTestInProcessSkip(t *testing.T) { 99 | // MessagesCount := 10 100 | putter := &testPutter{} 101 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*100, nil) 102 | if mq == nil { 103 | t.Fatal("Cannot create memory storage") 104 | } 105 | mq.Put(make([]byte, 100), nil) 106 | data, _ := mq.Get() 107 | mq.UnlockRecord(data.idx) 108 | if putter.ID != 1 { 109 | t.Fatal("Cannot move error message to file storage") 110 | } 111 | } 112 | 113 | func TestMemoryQueueTestInTimeOut(t *testing.T) { 114 | // MessagesCount := 10 115 | putter := &testPutter{} 116 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*50, nil) 117 | if mq == nil { 118 | t.Fatal("Cannot create memory storage") 119 | } 120 | mq.Put(make([]byte, 100), nil) 121 | mq.Put(make([]byte, 100), nil) 122 | time.Sleep(time.Millisecond * 50) 123 | _, err := mq.Get() 124 | if err == nil { 125 | t.Fatal("records must be moved to file storage") 126 | } 127 | } 128 | 129 | func TestMemoryQueueTestInTimeOutWithError(t *testing.T) { 130 | putter := &testPutterError{} 131 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*50, nil) 132 | if mq == nil { 133 | t.Fatal("Cannot create memory storage") 134 | } 135 | ch := make(chan bool) 136 | timer := time.After(500 * time.Millisecond) 137 | go func() { 138 | mq.Put(make([]byte, 100), ch) 139 | time.Sleep(time.Millisecond * 50) 140 | mq.Get() 141 | }() 142 | processed := false 143 | state := false 144 | select { 145 | case state = <-ch: 146 | processed = true 147 | case <-timer: 148 | } 149 | if !processed || state { 150 | t.Fatal("function must return false") 151 | } 152 | } 153 | 154 | func TestMemoryQueueTestInTimeOutProcessedRecord(t *testing.T) { 155 | putter := &testPutter{} 156 | mq := createMemoryQueue("", 1024, 1024*1024*16, putter, nil, time.Millisecond*50, nil) 157 | if mq == nil { 158 | t.Fatal("Cannot create memory storage") 159 | } 160 | mq.Put(make([]byte, 100), nil) 161 | data, _ := mq.Get() 162 | time.Sleep(time.Millisecond * 50) 163 | mq.Get() 164 | mq.FreeRecord(data.idx) 165 | if putter.Cnt != 100 { 166 | t.Fatal("Processed record must be removed from file storage too.") 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /queue/queue_options.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import "time" 4 | 5 | // StorageOptions holds the optional parameters for the disk storage of the messages. 6 | type StorageOptions struct { 7 | // maximum size of the storage's data files 8 | MaxDataFileSize int64 9 | 10 | // Count of the operation with storage when index file will be flushed 11 | FlushOperations uint32 12 | 13 | // Count of the percents if the free messages before close of the when index file will be reformed 14 | PercentFreeForRecalculateOnExit uint8 15 | 16 | // Count of the percents if the free messages when index file will be reformed 17 | PercentFreeForRecalculateOnIncrementIndexFile uint8 18 | 19 | // Depends skip error messages if timeout of the waiting did not finished yet 20 | SkipReturnedRecords bool 21 | 22 | // Duration of the timeout. Time of the next processing calculated by TimeOfError+CountOfTheErrors*SkipDelayPerTry 23 | SkipDelayPerTry uint32 24 | 25 | // Depends check crc of the message before sent in to worker 26 | CheckCRCOnRead bool 27 | 28 | // Count of the one time opened for reading and for writing files. Open files are counting separately 29 | MaxOneTimeOpenedFiles int16 30 | 31 | // If queue index file is corrupted then will recreate index file and try to restore ,essages information 32 | DeleteInvalidIndexFile bool 33 | } 34 | 35 | //Options holds the optional parameters for the managing of the messages. 36 | type Options struct { 37 | // Options for file storage connected to this queue 38 | StorageOptions *StorageOptions 39 | 40 | // In the during of timeout, message must be processed or saved to disk 41 | InputTimeOut time.Duration 42 | 43 | // Maximum size of the messages what can be processed without storing to disk 44 | MaximumQueueMessagesSize int32 45 | 46 | // Maximum count of the messages what can be processed without storing to disk 47 | MaximumMessagesInQueue uint16 48 | 49 | // Minimum count of the workers per queue 50 | MinimunWorkersCount uint16 51 | 52 | // Minimum count of the workers per queue 53 | MaximumWorkersCount uint16 54 | 55 | // Maximum count of the messages that thw worker can crocess per one time 56 | MaximumMessagesPerWorker uint16 57 | } 58 | 59 | // DefaultStorageOptions is default options for filestorage 60 | var DefaultStorageOptions = StorageOptions{ 61 | MaxDataFileSize: 0x1FFFFFFF, // 62 | FlushOperations: 512, 63 | PercentFreeForRecalculateOnExit: 5, 64 | PercentFreeForRecalculateOnIncrementIndexFile: 10, 65 | SkipReturnedRecords: true, 66 | SkipDelayPerTry: 500, 67 | CheckCRCOnRead: false, 68 | MaxOneTimeOpenedFiles: 12, 69 | DeleteInvalidIndexFile: true, 70 | } 71 | 72 | // DefaultQueueOptions is default options for queue 73 | var DefaultQueueOptions = Options{ 74 | MinimunWorkersCount: 4, 75 | MaximumWorkersCount: 32, 76 | StorageOptions: nil, 77 | MaximumMessagesPerWorker: 2048, 78 | InputTimeOut: 5 * time.Second, 79 | MaximumMessagesInQueue: 2048, 80 | MaximumQueueMessagesSize: 16 * 1024 * 1024, 81 | } 82 | -------------------------------------------------------------------------------- /queue/queue_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "io" 5 | "math/rand" 6 | "os" 7 | "sync" 8 | "sync/atomic" 9 | "testing" 10 | "time" 11 | 12 | "github.com/sybrexsys/RapidMQ/queue/internal/logging" 13 | ) 14 | 15 | const ( 16 | testStateOk = iota 17 | testStateOkBeforeError 18 | testStateErrorsBeforeOk 19 | ) 20 | 21 | type nullWorkerTestUnit struct { 22 | id WorkerID 23 | cnt int 24 | delay time.Duration 25 | state int 26 | changeStateCount int 27 | isTimeOut bool 28 | } 29 | 30 | type nullWorkerTestUnitFactory struct { 31 | id WorkerID 32 | cnt int 33 | delay time.Duration 34 | state int 35 | changeStateCount int 36 | isTimeOut bool 37 | } 38 | 39 | func (n *nullWorkerTestUnitFactory) CreateWorker() (Worker, error) { 40 | return &nullWorkerTestUnit{ 41 | id: WorkerID(atomic.AddUint64((*uint64)(&n.id), 1) - 1), 42 | changeStateCount: n.changeStateCount, 43 | delay: n.delay, 44 | state: n.state, 45 | isTimeOut: n.isTimeOut, 46 | }, nil 47 | } 48 | 49 | func (n *nullWorkerTestUnitFactory) NeedTimeoutProcessing() bool { 50 | return n.isTimeOut 51 | } 52 | 53 | func (n *nullWorkerTestUnitFactory) CanCreateWorkers() bool { 54 | return true 55 | } 56 | 57 | func (n *nullWorkerTestUnitFactory) Close() { 58 | } 59 | 60 | func (n *nullWorkerTestUnit) Close() { 61 | } 62 | 63 | func (n *nullWorkerTestUnit) isOk() bool { 64 | switch n.state { 65 | case testStateOk: 66 | return true 67 | case testStateOkBeforeError: 68 | if n.cnt == n.changeStateCount { 69 | n.cnt = 0 70 | return false 71 | } 72 | n.cnt++ 73 | return true 74 | 75 | case testStateErrorsBeforeOk: 76 | if n.cnt == n.changeStateCount { 77 | n.cnt = 0 78 | return true 79 | } 80 | n.cnt++ 81 | return false 82 | } 83 | return true 84 | } 85 | 86 | func (n *nullWorkerTestUnit) ProcessMessage(msg *QueueItem) int { 87 | time.Sleep(n.delay) 88 | if !n.isTimeOut { 89 | if n.isOk() { 90 | return ProcessedSuccessful 91 | } 92 | return ProcessedWithError 93 | } 94 | return ProcessedWaitNext 95 | } 96 | 97 | func (n *nullWorkerTestUnit) ProcessTimeout() int { 98 | if n.isOk() { 99 | return ProcessedSuccessful 100 | } 101 | return ProcessedWithError 102 | } 103 | 104 | func (n *nullWorkerTestUnit) GetID() WorkerID { 105 | return WorkerID(atomic.AddUint64((*uint64)(&n.id), 0)) 106 | } 107 | 108 | func WorkOptions(t *testing.T, Step int, kk int64, opt *Options, factory WorkerFactory, withLoging bool) bool { 109 | var ( 110 | log *logging.Logger 111 | err error 112 | ) 113 | isOk := true 114 | start := time.Now() 115 | clearTestFolder() 116 | if withLoging { 117 | log, err = logging.CreateLog(logFolder+"/logfile.log", 1024*1024*200, 255) 118 | if err != nil { 119 | t.Fatalf("Cannot create logging file: %s", err) 120 | } 121 | } 122 | 123 | q, err := CreateQueue("Test", TestFolder, log, factory, opt) 124 | if err != nil { 125 | t.Fatalf("Cannot create storage: %s", err) 126 | } 127 | totsize := uint64(0) 128 | tot := uint64(0) 129 | var m sync.Mutex 130 | var wg sync.WaitGroup 131 | for i := int64(0); i < kk; i++ { 132 | wg.Add(1) 133 | go func(id int64) { 134 | defer wg.Done() 135 | for j := int64(0); j < kk; j++ { 136 | tmp := make([]byte, rand.Intn(0x3fff)) 137 | m.Lock() 138 | totsize += uint64(len(tmp)) 139 | wrk := tot 140 | tot++ 141 | m.Unlock() 142 | saved := q.Insert(tmp) 143 | if !saved { 144 | t.Fatalf("Cannot insert date:%d", wrk) 145 | } 146 | 147 | } 148 | }(i) 149 | } 150 | wg.Wait() 151 | picker := time.NewTicker(10 * time.Millisecond) 152 | d := 0 153 | for range picker.C { 154 | d++ 155 | if d == 500 { 156 | t.Errorf("Step %v Not finished in %s...\n", Step, time.Since(start)) 157 | isOk = false 158 | break 159 | } 160 | if q.Count() == 0 { 161 | break 162 | } 163 | 164 | } 165 | q.Close() 166 | if log != nil { 167 | log.Close() 168 | } 169 | return isOk 170 | } 171 | 172 | type testOptions struct { 173 | kk int64 174 | factory WorkerFactory 175 | options Options 176 | logging bool 177 | } 178 | 179 | func TestQueue(t *testing.T) { 180 | DefaultQueueOptionsWithOutTimeout := DefaultQueueOptions 181 | DefaultQueueOptionsWithOutTimeout.InputTimeOut = 0 182 | 183 | tests := []testOptions{ 184 | { // Timeout On 185 | kk: 20, 186 | options: DefaultQueueOptions, 187 | factory: &nullWorkerTestUnitFactory{ 188 | isTimeOut: false, 189 | state: testStateOk, 190 | }, 191 | logging: true, 192 | }, 193 | { // 194 | kk: 20, 195 | options: DefaultQueueOptions, 196 | factory: &nullWorkerTestUnitFactory{ 197 | isTimeOut: false, 198 | state: testStateOkBeforeError, 199 | changeStateCount: 20, 200 | }, 201 | logging: true, 202 | }, 203 | 204 | { // Timeout Off 205 | kk: 20, 206 | options: DefaultQueueOptionsWithOutTimeout, 207 | factory: &nullWorkerTestUnitFactory{ 208 | isTimeOut: false, 209 | state: testStateOk, 210 | }, 211 | logging: true, 212 | }, 213 | { // 214 | kk: 20, 215 | options: DefaultQueueOptionsWithOutTimeout, 216 | factory: &nullWorkerTestUnitFactory{ 217 | isTimeOut: false, 218 | state: testStateOkBeforeError, 219 | changeStateCount: 20, 220 | }, 221 | logging: true, 222 | }, 223 | 224 | { // Timeout On 225 | kk: 20, 226 | options: DefaultQueueOptions, 227 | factory: &nullWorkerTestUnitFactory{ 228 | isTimeOut: true, 229 | state: testStateOk, 230 | }, 231 | logging: true, 232 | }, 233 | { // 234 | kk: 20, 235 | options: DefaultQueueOptions, 236 | factory: &nullWorkerTestUnitFactory{ 237 | isTimeOut: true, 238 | state: testStateOkBeforeError, 239 | changeStateCount: 20, 240 | }, 241 | logging: true, 242 | }, 243 | { // Timeout Off 244 | kk: 20, 245 | options: DefaultQueueOptionsWithOutTimeout, 246 | factory: &nullWorkerTestUnitFactory{ 247 | isTimeOut: true, 248 | state: testStateOk, 249 | }, 250 | logging: true, 251 | }, 252 | { // 253 | kk: 20, 254 | options: DefaultQueueOptionsWithOutTimeout, 255 | factory: &nullWorkerTestUnitFactory{ 256 | isTimeOut: true, 257 | state: testStateOkBeforeError, 258 | changeStateCount: 20, 259 | }, 260 | logging: true, 261 | }, 262 | 263 | { // 264 | kk: 100, 265 | options: DefaultQueueOptions, 266 | factory: &nullWorkerTestUnitFactory{ 267 | isTimeOut: true, 268 | state: testStateOkBeforeError, 269 | changeStateCount: 20, 270 | }, 271 | logging: true, 272 | }, 273 | 274 | { // 275 | kk: 100, 276 | options: DefaultQueueOptionsWithOutTimeout, 277 | factory: &nullWorkerFactory{}, 278 | logging: true, 279 | }, 280 | } 281 | 282 | for s, k := range tests { 283 | if !WorkOptions(t, s, k.kk, &k.options, k.factory, k.logging) { 284 | break 285 | } 286 | 287 | } 288 | 289 | } 290 | 291 | const fileCount = 50 292 | 293 | type z struct { 294 | i int 295 | } 296 | 297 | func BenchmarkMapInside(b *testing.B) { 298 | RecCount := b.N 299 | m := make(map[StorageIdx]struct{}, fileCount) 300 | for i := StorageIdx(0); i < fileCount; i++ { 301 | m[i] = struct{}{} 302 | } 303 | mem := make([]StorageIdx, RecCount) 304 | for k := 0; k < RecCount; k++ { 305 | mem[k] = StorageIdx(rand.Int31n(fileCount)) 306 | } 307 | 308 | check := make(map[StorageIdx]*z) 309 | for kk := range m { 310 | check[kk] = &z{i: 0} 311 | } 312 | for k := 0; k < RecCount; k++ { 313 | idx := mem[k] 314 | op := check[idx] 315 | op.i++ 316 | } 317 | } 318 | 319 | func BenchmarkMapOutside(b *testing.B) { 320 | RecCount := b.N 321 | m := make(map[StorageIdx]struct{}, fileCount) 322 | for i := StorageIdx(0); i < fileCount; i++ { 323 | m[i] = struct{}{} 324 | } 325 | mem := make([]StorageIdx, RecCount) 326 | for k := 0; k < RecCount; k++ { 327 | mem[k] = StorageIdx(rand.Int31n(fileCount)) 328 | } 329 | 330 | check := make(map[StorageIdx]*z) 331 | for kk := range m { 332 | check[kk] = &z{i: 0} 333 | } 334 | for kk := range check { 335 | cnt := 0 336 | for k := 0; k < RecCount; k++ { 337 | if mem[k] == kk { 338 | cnt++ 339 | } 340 | } 341 | check[kk].i = cnt 342 | } 343 | } 344 | 345 | func TestGCQueue(t *testing.T) { 346 | clearTestFolder() 347 | log, err := logging.CreateLog(logFolder+"logfile.log", 1024*1024*200, 255) 348 | if err != nil { 349 | t.Fatalf("Cannot create logging file: %s", err) 350 | } 351 | q, err := CreateQueue("Test", TestFolder, log, &nullWorkerFactory{}, nil) 352 | if err != nil { 353 | t.Fatalf("Cannot create storage: %s", err) 354 | } 355 | tmp := make([]byte, rand.Intn(0x3fff)) 356 | saved := q.Insert(tmp) 357 | if !saved { 358 | t.Fatalf("Cannot insert date") 359 | } 360 | time.Sleep(11 * time.Second) 361 | _, err1 := os.Stat(TestFolder + "stg00000.dat") 362 | if err1 == nil { 363 | t.Fatalf("Dat file must me deleted") 364 | } 365 | q.Close() 366 | if log != nil { 367 | log.Close() 368 | } 369 | } 370 | 371 | type TestWorker struct { 372 | id WorkerID 373 | } 374 | 375 | type TestWorkerFactory struct { 376 | id WorkerID 377 | } 378 | 379 | func (n *TestWorkerFactory) CreateWorker() (Worker, error) { 380 | return &TestWorker{ 381 | id: WorkerID(atomic.AddUint64((*uint64)(&n.id), 1) - 1), 382 | }, nil 383 | } 384 | 385 | func (n *TestWorkerFactory) CanCreateWorkers() bool { 386 | return true 387 | } 388 | 389 | func (n *TestWorkerFactory) NeedTimeoutProcessing() bool { 390 | return false 391 | } 392 | 393 | func (n *TestWorkerFactory) Close() { 394 | } 395 | 396 | func (n *TestWorker) ProcessMessage(msg *QueueItem) int { 397 | start, _ := msg.Stream.Seek(0, io.SeekCurrent) 398 | size, _ := msg.Stream.Seek(0, io.SeekEnd) 399 | size -= start 400 | msg.Stream.Seek(start, io.SeekStart) 401 | buf := make([]byte, size) 402 | msg.Stream.Read(buf) 403 | if string(buf) != "error" { 404 | return ProcessedSuccessful 405 | } 406 | return ProcessedWithError 407 | } 408 | 409 | func (n *TestWorker) ProcessTimeout() int { 410 | return ProcessedSuccessful 411 | } 412 | 413 | func (n *TestWorker) GetID() WorkerID { 414 | return n.id 415 | } 416 | 417 | func (n *TestWorker) Close() { 418 | } 419 | 420 | func TestErrorOnChangeFromMemoryToDisk(t *testing.T) { 421 | clearTestFolder() 422 | log, err := logging.CreateLog(logFolder+"logfile.log", 1024*1024*200, 255) 423 | if err != nil { 424 | t.Fatalf("Cannot create logging file: %s", err) 425 | } 426 | opt := DefaultQueueOptions 427 | opt.InputTimeOut = 0 428 | q, err := CreateQueue("Test", TestFolder, log, &TestWorkerFactory{}, &opt) 429 | if err != nil { 430 | t.Fatalf("Cannot create storage: %s", err) 431 | } 432 | 433 | tmp := make([]byte, 50000) 434 | for i := 0; i < 2; i++ { 435 | saved := q.Insert(tmp) 436 | if !saved { 437 | t.Fatalf("Cannot insert date") 438 | } 439 | } 440 | saved := q.Insert([]byte("error")) 441 | if !saved { 442 | t.Fatalf("Cannot insert date") 443 | } 444 | for i := 0; i < 2; i++ { 445 | saved := q.Insert(tmp) 446 | if !saved { 447 | t.Fatalf("Cannot insert date") 448 | } 449 | } 450 | time.Sleep(2000 * time.Millisecond) 451 | q.Close() 452 | t.Log("Last\n") 453 | log.Close() 454 | 455 | } 456 | -------------------------------------------------------------------------------- /queue/queue_tools.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "hash/crc32" 7 | "io" 8 | "os" 9 | "path" 10 | "strconv" 11 | ) 12 | 13 | const magicNumberValue = 0xEFCDAB8967452301 14 | const magicNumberDataValue = 0x0123456789ABCDEF 15 | const magicNumberIsFile = 0xDEADDADAFCDBACAB 16 | 17 | // dataFileNameByID creates filename by index of the record 18 | func dataFileNameByID(ID StorageIdx) string { 19 | const zeros string = "000000" 20 | var length int 21 | if ID&0xFFFF == ID { 22 | length = 4 23 | } else if ID&0xFFFFFFFF == ID { 24 | length = 8 25 | } else if ID&0xFFFFFFFFFFFF == ID { 26 | length = 12 27 | } else { 28 | length = 16 29 | } 30 | length++ 31 | str := strconv.FormatUint(uint64(ID), 16) 32 | return "stg" + zeros[1:length-len(str)+1] + str + ".dat" 33 | } 34 | 35 | // checkValidFileDataName checks validity of the filename 36 | func checkValidFileDataName(name string) int64 { 37 | ext := path.Ext(name) 38 | if ext != ".dat" { 39 | return -1 40 | } 41 | fname := path.Base(name) 42 | fname = fname[:len(fname)-len(ext)] 43 | if len(fname) < 3 { 44 | return -1 45 | } 46 | if fname[:3] != "stg" { 47 | return -1 48 | } 49 | fname = fname[3:] 50 | res, err := strconv.ParseInt(fname, 16, 0) 51 | if err != nil { 52 | return -1 53 | } 54 | return res 55 | } 56 | 57 | // saveDataPrefix saves to data file information about data record (prefix and size of the record) 58 | func saveDataPrefix(fs *os.File, index StorageIdx, size int32) error { 59 | var valAll [16]byte 60 | binary.LittleEndian.PutUint32(valAll[:], magicNumberDataPrefix) 61 | binary.LittleEndian.PutUint64(valAll[4:], uint64(index)) 62 | binary.LittleEndian.PutUint32(valAll[12:], uint32(size)) 63 | _, err := fs.Write(valAll[:]) 64 | return err 65 | } 66 | 67 | //saveDataSuffix Saves suffix of the data record 68 | func saveDataSuffix(fs *os.File, crc uint32) error { 69 | var val [8]byte 70 | binary.LittleEndian.PutUint32(val[:], crc) 71 | binary.LittleEndian.PutUint32(val[4:], magicNumberDataSuffix) 72 | _, err := fs.Write(val[:]) 73 | return err 74 | } 75 | 76 | //saveDataFileHeader saves identity information about data file 77 | func saveDataFileHeader(fs *os.File) error { 78 | var val [8]byte 79 | binary.LittleEndian.PutUint64(val[:], uint64(magicNumberDataValue)) 80 | _, err := fs.Write(val[:]) 81 | return err 82 | } 83 | 84 | func saveDataFileData(File *os.File, Idx StorageIdx, buffer []byte) error { 85 | crc := crc32.ChecksumIEEE(buffer) 86 | if err := saveDataPrefix(File, Idx, int32(len(buffer))); err != nil { 87 | return err 88 | } 89 | if _, err := File.Write(buffer); err != nil { 90 | return err 91 | } 92 | return saveDataSuffix(File, crc) 93 | } 94 | 95 | func bufToStream(buf []byte) (io.ReadSeeker, error) { 96 | if len(buf) < 8 || binary.LittleEndian.Uint64(buf[:]) != uint64(magicNumberIsFile) { 97 | return bytes.NewReader(buf), nil 98 | } 99 | fileName := string(buf[8:]) 100 | return os.OpenFile(fileName, os.O_RDONLY, 0666) 101 | } 102 | 103 | func normalizeFilePath(Path string) string { 104 | a := []rune(Path) 105 | if a[len(a)-1] == rune(os.PathSeparator) { 106 | return Path 107 | } 108 | return Path + string(os.PathSeparator) 109 | } 110 | -------------------------------------------------------------------------------- /queue/worker.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import "sync/atomic" 4 | 5 | //WorkerFactory is interface for creating new workers 6 | type WorkerFactory interface { 7 | // Creates new worker for this factory with unique ID 8 | CreateWorker() (Worker, error) 9 | // Returns true if possible used some messages in one action (for example, 10 | // collect large SQL script from lot of the small messages) 11 | NeedTimeoutProcessing() bool 12 | CanCreateWorkers() bool 13 | Close() 14 | } 15 | 16 | //WorkerID is an identifier of the worker 17 | type WorkerID uint64 18 | 19 | // Results of the execution of the worker 20 | const ( 21 | ProcessedSuccessful = iota 22 | ProcessedWithError 23 | ProcessedWaitNext 24 | ProcessedKillWorker 25 | ) 26 | 27 | //Worker is interface that allow to structure to processing outgoing message 28 | type Worker interface { 29 | // Processes message that is stored in `*Message`. 30 | // After it the worker must call function `(*Queue).Process` with his unique identifier 31 | // and with result of the processing, also must be pushed himself into chanal `Worker` 32 | ProcessMessage(*QueueItem) int 33 | 34 | // Processing of the event when available messages is absent 35 | // After it the worker must call function `(*Queue).Process` with his unique identifier and 36 | // with result of the processing, also must send himself into chanal `Worker` 37 | ProcessTimeout() int 38 | // Returns unique identifier of the worker 39 | GetID() WorkerID 40 | // Close is called when queue is finishing work with worker. Here you can close connection to database or etc. 41 | Close() 42 | } 43 | 44 | type QueueWorkerFactory interface { // nolint 45 | CreateWorker() (Worker, error) 46 | NeedTimeoutProcessing() bool 47 | CanCreateWorkers() bool 48 | Close() 49 | } 50 | 51 | type nullWorker struct { 52 | id WorkerID 53 | generateErrors bool 54 | } 55 | 56 | type nullWorkerFactory struct { 57 | generateErrors bool 58 | nextID WorkerID 59 | } 60 | 61 | func (n *nullWorker) ProcessMessage(msg *QueueItem) int { 62 | return ProcessedSuccessful 63 | } 64 | 65 | func (n *nullWorker) ProcessTimeout() int { 66 | return ProcessedSuccessful 67 | } 68 | 69 | func (n *nullWorker) GetID() WorkerID { 70 | return n.id 71 | } 72 | 73 | func (n *nullWorker) Close() { 74 | } 75 | 76 | func (n *nullWorkerFactory) NeedTimeoutProcessing() bool { 77 | return false 78 | } 79 | 80 | func (n *nullWorkerFactory) CreateWorker() (Worker, error) { 81 | return &nullWorker{ 82 | id: WorkerID(atomic.AddUint64((*uint64)(&n.nextID), 1) - 1), 83 | generateErrors: n.generateErrors, 84 | }, nil 85 | } 86 | 87 | func (n *nullWorkerFactory) CanCreateWorkers() bool { 88 | return true 89 | } 90 | 91 | func (n *nullWorkerFactory) Close() {} 92 | 93 | type saveMessagesFactory struct{} 94 | 95 | func (n *saveMessagesFactory) NeedTimeoutProcessing() bool { 96 | return false 97 | } 98 | 99 | func (n *saveMessagesFactory) CreateWorker() (Worker, error) { 100 | return nil, nil 101 | } 102 | 103 | func (n *saveMessagesFactory) CanCreateWorkers() bool { 104 | return false 105 | } 106 | 107 | func (n *saveMessagesFactory) Close() {} 108 | --------------------------------------------------------------------------------