├── .travis.yml ├── LICENSE ├── README.md ├── README_RU.md ├── actions.go ├── codecov.yml ├── coffer.go ├── coffer_bench_test.go ├── coffer_test.go ├── config.go ├── db.go ├── domain ├── entities.go └── repositories.go ├── examples ├── clean_dir.go ├── finance │ ├── finance.go │ └── handlers.go └── quick_start │ ├── handlers.go │ └── quik_start.go ├── go.mod ├── go.sum ├── helper.go ├── reports ├── codes │ └── codes.go └── reports.go ├── services ├── batcher │ ├── LICENSE │ ├── README.md │ ├── batcher.go │ ├── batcher_test.go │ ├── client.go │ ├── client_test.go │ ├── indicator.go │ └── worker.go ├── filenamer │ └── filenamer.go ├── journal │ ├── config.go │ ├── journal.go │ └── journal_test.go ├── log.go ├── porter │ ├── README.md │ ├── keeper.go │ └── porter.go ├── repositories │ ├── handlers │ │ └── handlers.go │ └── records │ │ ├── records.go │ │ └── storage.go ├── resources │ ├── resources.go │ ├── resources_config.go │ └── resources_test.go └── startstop │ ├── config.go │ └── startstop.go ├── test └── README.md └── usecases ├── checkpoint.go ├── config.go ├── follow_interactor.go ├── helpers.go ├── interfaces.go ├── loader.go ├── operations.go ├── operations_test.go ├── records_interactor.go ├── requests.go └── transaction.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.9 5 | 6 | before_install: 7 | - go get -t -v ./... 8 | 9 | script: 10 | - go test -race -coverprofile=coverage.txt -covermode=atomic 11 | 12 | after_success: 13 | - bash <(curl -s https://codecov.io/bash) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | LICENSE 2 | 3 | Copyright (c) 2019-2021 Eduard Sesigin. All rights reserved. Contacts: 4 | The license for this software and associated documentation files (the "Software"). 5 | 6 | "Software" is available under different licensing options designed to accommodate the needs of our various users: 7 | 8 | 1) "Software" licensed under the GNU Lesser General Public License (LGPL) version 3, is appropriate for the use of "Software" 9 | provided you can comply with the terms and conditions of the GNU LGPL version 3 (or GNU GPL version 3). 10 | 2) "Software" licensed under commercial licenses is appropriate for development of proprietary/commercial software where you 11 | do not want to share any source code with third parties or otherwise cannot comply with the terms of the GNU LGPL version 3. 12 | 13 | "Software" documentation is licensed under the terms of the GNU Free Documentation License (FDL) version 1.3, 14 | as published by the Free Software Foundation. Alternatively, you may use the documentation in accordance with 15 | the terms contained in a written agreement between you and the author of the documentation. 16 | 17 | For information about selling software, contact the author of the software by e-mail 18 | 19 | DISCLAIMER 20 | 21 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO 22 | THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![GoDoc](https://godoc.org/github.com/claygod/coffer?status.svg)](https://godoc.org/github.com/claygod/coffer) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) [![Travis CI](https://travis-ci.org/claygod/coffer.svg?branch=master)](https://travis-ci.org/claygod/coffer) [![Go Report Card](https://goreportcard.com/badge/github.com/claygod/coffer)](https://goreportcard.com/report/github.com/claygod/coffer) [![codecov](https://codecov.io/gh/claygod/coffer/branch/master/graph/badge.svg)](https://codecov.io/gh/claygod/coffer) 2 | 3 | 4 | # Coffer 5 | 6 | Simply ACID* key-value database. At the medium or even low `latency` it tries to 7 | provide greater `throughput` without losing the ACID properties of the database. The 8 | database provides the ability to create record headers at own discretion and use them 9 | as transactions. The maximum size of stored data is limited by the size of the 10 | computer's RAM. 11 | 12 | *is a set of properties of database transactions intended to guarantee validity even in 13 | the event of errors, power failures, etc. 14 | 15 | Properties: 16 | - high throughput 17 | - tolerated latency 18 | - high reliability 19 | 20 | ACID: 21 | - good durabilty 22 | - compulsory isolation 23 | - atomic operations 24 | - consistent transactions 25 | 26 | ## Table of Contents 27 | 28 | * [Usage](#Usage) 29 | * [Examples](#Examples) 30 | * [API](#api) 31 | + [Methods](#methods) 32 | * [Config](#config) 33 | + [Handler](#Handler) 34 | - [Example of a handler without using an argument](#Example-of-a-handler-without-using-an-argument) 35 | - [Example of a handler using an argument](#Example-of-a-handler-using-an-argument) 36 | * [Launch](#Launch) 37 | - [Start](#Start) 38 | - [Follow](#Follow) 39 | * [Data storage](#Data-storage) 40 | - [Data loading after an incorrect shutdown](#Data-loading-after-an-incorrect-shutdown ) 41 | * [Error codes](#Error-codes) 42 | - [Code List](#Code-List) 43 | - [Code checks through methods](#Code-checks-through-methods) 44 | * [Benchmark](#Benchmark) 45 | * [Dependencies](#Dependencies) 46 | * [ToDo](#TODO) 47 | 48 | ## Usage 49 | 50 | ```golang 51 | package main 52 | 53 | import ( 54 | "fmt" 55 | 56 | "github.com/claygod/coffer" 57 | ) 58 | 59 | const curDir = "./" 60 | 61 | func main() { 62 | // STEP init 63 | db, err, wrn := coffer.Db(curDir).Create() 64 | switch { 65 | case err != nil: 66 | fmt.Println("Error:", err) 67 | return 68 | case wrn != nil: 69 | fmt.Println("Warning:", err) 70 | return 71 | } 72 | if !db.Start() { 73 | fmt.Println("Error: not start") 74 | return 75 | } 76 | defer db.Stop() 77 | 78 | // STEP write 79 | if rep := db.Write("foo", []byte("bar")); rep.IsCodeError() { 80 | fmt.Sprintf("Write error: code `%d` msg `%s`", rep.Code, rep.Error) 81 | return 82 | } 83 | 84 | // STEP read 85 | rep := db.Read("foo") 86 | if rep.IsCodeError() { 87 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 88 | return 89 | } 90 | fmt.Println(string(rep.Data)) 91 | } 92 | ``` 93 | 94 | ### Examples 95 | 96 | Use the following links to find many examples how use the transactions: 97 | 98 | - `Quick start` https://github.com/claygod/coffer/tree/master/examples/quick_start 99 | - `Finance` https://github.com/claygod/coffer/tree/master/examples/finance 100 | 101 | ## API 102 | 103 | Started DB returns reports after has performed an operation. Reports containing: 104 | - code (error codes here: `github.com/claygod/coffer/reports/codes`) 105 | - error 106 | - data 107 | - other details 108 | Reporting structures read here:: `github.com/claygod/coffer/reports` 109 | 110 | ### Methods 111 | 112 | * Start 113 | * Stop 114 | * StopHard 115 | * Save 116 | * Write 117 | * WriteList 118 | * WriteListUnsafe 119 | * Read 120 | * ReadList 121 | * ReadListUnsafe 122 | * Delete 123 | * DeleteListStrict 124 | * DeleteListOptional 125 | * Transaction 126 | * Count 127 | * CountUnsafe 128 | * RecordsList 129 | * RecordsListUnsafe 130 | * RecordsListWithPrefix 131 | * RecordsListWithSuffix 132 | 133 | Pay attention! 134 | 135 | All requests which names contain `Unsafe` can be usually executed in both cases: when the 136 | database is running or stopped (not running). In the second case (when DB is stopped), 137 | should not make requests in parallel, because in this case the consistency of DB can 138 | be compromised and data lost. 139 | 140 | Other methods work only if the database is running. 141 | 142 | #### Start 143 | 144 | The `Follow` interactor turns on while running a database. It controls the relevance of the 145 | current checkpoint. 146 | 147 | #### Stop 148 | 149 | Stop DB. If you want to periodically stop and start the database in your application, probably, 150 | you may want to create a new client when the DB has been stopped. 151 | 152 | #### Write 153 | 154 | Write a new record in a database specifying the key and value. Their length must satisfy the 155 | requirements specified in configuration files. 156 | 157 | #### WriteList 158 | 159 | Write several records to the database specifying corresponding `map` in the arguments. 160 | 161 | Strict mode (strictMode=true): 162 | The operation performs if there are no records with these keys. 163 | A list of existed records is returned. 164 | 165 | Optional mode (strictMode=false): 166 | The operation performs regardless of whether there are records with these keys or not. 167 | A list of existed records is returned. 168 | 169 | Important: this argument is a reference argument; it cannot be changed in the called code! 170 | 171 | #### WriteListUnsafe 172 | 173 | Write several records to the database specified the corresponding map in the arguments. 174 | This method exists in order to fill the database faster before it starts. 175 | The method is not for parallel use. 176 | 177 | #### Read 178 | 179 | Read one record from the database. In the received report there will be a result code. 180 | If it is positive, that means that the value in the right data field. 181 | 182 | #### ReadList 183 | 184 | Read several records. There is a limit on the maximum number of readable records in the 185 | configuration. Except found records the list of not found records is returned. 186 | 187 | #### ReadListUnsafe 188 | 189 | Read several records. The method can be called when the database is stopped (not running). 190 | The method is not for parallel use. 191 | 192 | #### Delete 193 | 194 | Remove a single record. 195 | 196 | #### DeleteList 197 | 198 | Strict mode (true): 199 | Delete several records. It is possible only if all records are in the database. 200 | If at least there is a lack of one record, none of records will be deleted. 201 | 202 | Optional mode (false): 203 | Delete several records. All found records from the list in will be deleted in DB. 204 | 205 | #### Transaction 206 | 207 | Make a transaction. The transaction should be added in the database at the stage of creating 208 | and configuring. The user of the database is responsible for the consistency of the functionality of 209 | transaction handlers between different runs of the database. 210 | The transaction returns new values which are stored in the DB. 211 | 212 | #### Count 213 | 214 | Get the number of records in the database. A request can be made only when the database has started. 215 | 216 | #### CountUnsafe 217 | 218 | Get the number of records in the database. Requests to a stopped (or not running), 219 | database cannot be made in parallel! 220 | 221 | #### RecordsList 222 | 223 | Get a list of all database keys. With a large number of records in the database, the request 224 | will be slow. Use it only at great need to avoid problems. 225 | The method works only when the database is running. 226 | 227 | #### RecordsListUnsafe 228 | 229 | Get a list of all database keys. With a large number of records in the database, the request 230 | will be slow. Use it only at great need to avoid problems. The method is not for parallel use 231 | while using a request when database is stopped (or not running). 232 | 233 | #### RecordsListWithPrefix 234 | 235 | Get a list of all keys with prefix specified in the argument (prefix is the begging of record string). 236 | 237 | ## Config 238 | 239 | If you specify the path to the database directory all configuration parameters will be 240 | reset to the default: 241 | 242 | cof, err, wrn := Db(dirPath) . Create() 243 | 244 | Default values can be found in the `/config.go` file. But each of the parameters can be 245 | configured: 246 | 247 | ```golang 248 | Db(dirPath). 249 | BatchSize(batchSize). 250 | LimitRecordsPerLogfile(limitRecordsPerLogfile). 251 | FollowPause(100*time.Second). 252 | LogsByCheckpoint(1000). 253 | AllowStartupErrLoadLogs(true). 254 | MaxKeyLength(maxKeyLength). 255 | MaxValueLength(maxValueLength). 256 | MaxRecsPerOperation(1000000). 257 | RemoveUnlessLogs(true). 258 | LimitMemory(100 * 1000000). 259 | LimitDisk(1000 * 1000000). 260 | Handler("handler1", &handler1). 261 | Handler("handler2", &handler2). 262 | Handlers(map[string]*handler). 263 | Create() 264 | ``` 265 | 266 | ### Db 267 | 268 | Specify the work directory where the database will store files. In case of a new 269 | database the directory should not contain files with the “log”, “check”, “checkpoint” 270 | extensions. 271 | 272 | ### BatchSize 273 | 274 | The maximum number of records which database can add at a time (this applies to 275 | setting up internal processes; this does not apply to the number of records added at a 276 | time). 277 | Decreasing of this parameter slightly improves the `latency` (but not too much). 278 | Increasing of this parameter slightly degrades the `latency`, but at the same time 279 | increases the `throughput`. 280 | 281 | ### LimitRecordsPerLogfile 282 | 283 | A number of operations which is going to be written to one log file. Small number 284 | forces the database creates new files very often, and it adversely affects the speed of 285 | the database. A big number reduces the number of pauses while creating files, but 286 | the size of files increases. 287 | 288 | ### FollowPause 289 | 290 | The size of the time interval for starting the `Follow` interactor, which analyzes old 291 | logs and periodically creates new checkpoints. 292 | 293 | ### LogsByCheckpoint 294 | 295 | The option specifies after how many full log files it is necessary to create a new 296 | checkpoint (the smaller number, the more often it should be created). For good 297 | productivity, it’s better not to do it too often. 298 | 299 | ### AllowStartupErrLoadLogs 300 | 301 | The option allows the database works at startup, even if the last log file was 302 | completed incorrectly, i.e. the last record is corrupted (a typical situation for an 303 | abnormal shutdown). By default, the option is enabled. 304 | 305 | ### MaxKeyLength 306 | 307 | This is the maximum allowable key length. 308 | 309 | ### MaxValueLength 310 | 311 | This is the maximum size of the value length. 312 | 313 | ### MaxRecsPerOperation 314 | 315 | This is the maximum number of records that is possible per operation. 316 | 317 | ### RemoveUnlessLogs 318 | 319 | The option is for deleting old files. After `Follow` has created a new checkpoint, with the 320 | permission of this option, it removes unnecessary operations logs. If for some reason 321 | it’s needed to store the whole log of operations, this option can be disabled. But be 322 | ready that this will increase the consumption of disk space. 323 | 324 | ### LimitMemory 325 | 326 | This is the minimum size of free RAM. When this limit reaches, the database 327 | terminates all operations and stops to avoid data loss. 328 | 329 | ### LimitDisk 330 | 331 | This is the minimum amount of free space on the hard drive. When this limit reaches, 332 | the database terminates all operations and stops to avoid data loss. 333 | 334 | ### Handler 335 | 336 | Add a transaction handler. It is important that the name of the handler and the results 337 | of its work should be idempotent while running the same database at different time. 338 | Otherwise handlers will work differently and it will leads to a violation of data 339 | consistency. If you intend to make changes to handlers time to time, adding a version 340 | number to the key helps streamline this process. 341 | 342 | Conditions: 343 | - The argument passed to the handler must be a number (a slice of bytes). 344 | - If you need to transfer complex structures, it must be serialized into bytes. 345 | - The handler can only operate on existing records. 346 | - The handler cannot delete database records. 347 | - The handler should return the new values of all the requested records at the end of his work. 348 | - The number of records modified with the header is specified in the `MaxRecsPerOperation` configuration. 349 | 350 | #### Example of a handler without using an argument 351 | 352 | ```golang 353 | func HandlerExchange(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 354 | if arg != nil { 355 | return nil, fmt.Errorf("Args not null.") 356 | } else if len(recs) != 2 { 357 | return nil, fmt.Errorf("Want 2 records, have %d", len(recs)) 358 | } 359 | recsKeys := make([]string, 0, 2) 360 | recsValues := make([][]byte, 0, 2) 361 | for k, v := range recs { 362 | recsKeys = append(recsKeys, k) 363 | recsValues = append(recsValues, v) 364 | } 365 | out := make(map[string][]byte, 2) 366 | out[recsKeys[0]] = recsValues[1] 367 | out[recsKeys[1]] = recsValues[0] 368 | return out, nil 369 | } 370 | ``` 371 | 372 | #### Example of a handler using an argument 373 | 374 | ```golang 375 | func HandlerDebit(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 376 | if arg == nil || len(arg) != 8 { 377 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 378 | } else if len(recs) != 1 { 379 | return nil, fmt.Errorf("Want 1 record, have %d", len(recs)) 380 | } 381 | delta := bytesToUint64(arg) 382 | var recKey string 383 | var recValue []byte 384 | for k, v := range recs { 385 | recKey = k 386 | recValue = v 387 | } 388 | if len(recValue) != 8 { 389 | return nil, fmt.Errorf("The length of the value in the record is %d bytes, but 8 bytes are needed", len(recValue)) 390 | } 391 | curAmount := bytesToUint64(recValue) 392 | newAmount := curAmount + delta 393 | if curAmount > newAmount { 394 | return nil, fmt.Errorf("Account overflow. There is %d, a debit of %d.", curAmount, delta) 395 | } 396 | return map[string][]byte{recKey: uint64ToBytes(newAmount)}, nil 397 | } 398 | ``` 399 | 400 | ### Handlers 401 | 402 | Add several handlers to the database at once. Important: handlers with matching keys 403 | are overwritten. 404 | 405 | ### Create 406 | 407 | A mandatory command (must be the last one) finishes the configuration and creates 408 | the database. 409 | 410 | ## Launch 411 | 412 | ### Start 413 | 414 | At starting DB, the number in the end should be a checkpoint. If it is not, the database 415 | has been stopped incorrectly. In this “error” case the last available checkpoint and all 416 | logs after the checkpoint are loaded until it is possible. 417 | 418 | Load the data until it is possible and finish the loading (there is must be a broken log 419 | (log with uncompleted data) or last log which has created before database has been 420 | stopped incorrectly, so you can load all available data). After all available data has 421 | loaded the database creates a new checkpoint. Only after it you can continue work 422 | with code. 423 | 424 | ### Follow 425 | 426 | After the database has been started, it writes all operations to a log. As a result, the 427 | log file can greatly grow. If at the end of the application work the database is correctly 428 | stopped, a new checkpoint appears. At the next start of DB, the data will be taken 429 | from it. 430 | 431 | But if the database is incorrectly stopped a new checkpoint will not be created. In this 432 | case, at a new start of DB, the database loads the old checkpoint and re-performs all 433 | operations that has been completed and recorded in the log. This process can take 434 | much time, and as a result, the database will be loading for a long time (not always 435 | acceptable for applications). 436 | 437 | That is why there is the follower mechanism in the database that methodically goes 438 | through the logs while working of the database and periodically creates checkpoints 439 | which are closer to the current moment. Also, the follower has a functionality to clean 440 | old logs and checkpoints in order to free up the space of hard drive. 441 | 442 | ## Data storage 443 | 444 | Your data is stored as files in the directory that has been specified while creating the 445 | database. Files with the log extension contain a description of completed operations. 446 | Files with the `checkpoint` extension contain snapshots of the database state at a 447 | certain point. Files with the `check` extension contain an incomplete snapshot of the 448 | database state. Using the `RemoveUnlessLogs` configuration parameter, you can force 449 | the database to delete old and unnecessary files in order to save the disk space. 450 | 451 | If the database is stopped in the regular mode, the last file, which has been written to 452 | the disk, is the `checkpoint` file. The number of the `checkpoint` will be the maximum 453 | number. If the database is stopped incorrectly, most likely that files with the `log` or 454 | `check` extensions will have the maximum number. 455 | 456 | Attention! Before the database has not been completely stopped, it is forbidden to 457 | carry out any operations with database files. 458 | 459 | If you want to copy the database to somewhere, you must copy all content of the 460 | directory. If you want to take a minimum of files while copying, then you need: to 461 | copy the file with the `checkpoint` extension (which has the maximum number), and 462 | all files with the `log` extension (which have bigger number than copied file with the 463 | `checkpoint` extension). 464 | 465 | ### Data loading after an incorrect shutdown 466 | 467 | If the application work, which is using the database, is not completed correctly, then 468 | at the next staring the application, the database will try to find the last valid snapshot 469 | of the `checkpoint` state. 470 | 471 | When the file is found, the database will upload it, and then upload all the `log` files 472 | with big numbers. We expect that the last `log` file might not be filled completely 473 | because during the recording the work could be interrupted. 474 | 475 | Only undamaged part is uploaded from the damaged file and after that the database 476 | uploading is considering as completed. 477 | 478 | At the end of the uploading, the database creates a new `checkpoint`. If system 479 | crashes occur during the start (loading) of the database, it possible to get errors and 480 | violation of data consistency. 481 | 482 | ## Error Codes 483 | 484 | Error codes are stored here: `github.com/claygod/coffer/reports/codes` 485 | If the `Ok` code is received, the operation is finished completely. If the Code contains 486 | `Error` (the operation has not been completed or not fully completed, or completed 487 | with an error), you can continue working with the database. If the code contains 488 | `Panic`, you cannot continue working with the database because of it stage. 489 | 490 | ### Code List 491 | 492 | - Ok - done without comment 493 | - Error - not completed or not fully completed, but you can continue to work 494 | - ErrRecordLimitExceeded - record limit per operation exceeded 495 | - ErrExceedingMaxValueSize - value is too long 496 | - ErrExceedingMaxKeyLength - key is too long 497 | - ErrExceedingZeroKeyLength - key is too short 498 | - ErrHandlerNotFound - no handler found 499 | - ErrParseRequest – preparing of logging request was failed 500 | - ErrResources - not enough resources 501 | - ErrNotFound - no keys are found 502 | - ErrReadRecords - reading records error for a transaction (if there is a lack of at least one record, a transaction cannot be performed) 503 | - ErrHandlerReturn - found and uploaded handler returned an error 504 | - ErrHandlerResponse - handler returned incomplete reply 505 | - Panic - not finished, further work with the database is impossible 506 | - PanicStopped – application has been stopped 507 | - PanicWAL - an error occurred in the operation log 508 | 509 | ### Code checks through methods 510 | 511 | In order not to export data to an application (which works with a database), reports 512 | have methods: 513 | 514 | - IsCodeOk - done without comment 515 | - IsCodeError - not completed or not fully completed, but you can continue to work 516 | - IsCodeErrRecordLimitExceeded - record limit for one operation is exceeded 517 | - IsCodeErrExceedingMaxValueSize - value is too long 518 | - IsCodeErrExceedingMaxKeyLength - key is too long 519 | - IsCodeErrExceedingZeroKeyLength - key is too short 520 | - IsCodeErrHandlerNotFound - no handler found 521 | - IsCodeErrParseRequest - preparing of logging request was failed 522 | - IsCodeErrResources - not enough resources 523 | - IsCodeErrNotFound - no keys are found 524 | - IsCodeErrReadRecords - reading records error for a transaction (if there is a lack of at least one record, a transaction cannot be performed) 525 | - IsCodeErrHandlerReturn – found and uploaded handler returned an error 526 | - IsCodeErrHandlerResponse - handler returned incomplete reply 527 | - IsCodePanic - not finished, further work with the database is impossible 528 | - IsCodePanicStopped – application has been stopped 529 | - IsCodePanicWAL - error occurred in the operation log 530 | 531 | It is not very convenient to make large switches to check the received codes. You can 532 | limit yourself to just three checks: 533 | 534 | - IsCodeOk - done without comment 535 | - IsCodeError - not completed or not fully completed, but you can continue to work (covers ALL errors) 536 | - IsCodePanic - not completed, further work with the database is not possible (covers ALL panics) 537 | 538 | ## Benchmark 539 | 540 | - BenchmarkCofferWriteParallel32LowConcurent-4 100000 12933 ns/op 541 | - BenchmarkCofferTransactionSequence-4 2000 227928 ns/op 542 | - BenchmarkCofferTransactionPar32NotConcurent-4 100000 4132 ns/op 543 | - BenchmarkCofferTransactionPar32HalfConcurent-4 100000 4199 ns/op 544 | 545 | ## Dependencies 546 | 547 | - github.com/shirou/gopsutil/disk 548 | - github.com/shirou/gopsutil/mem 549 | - github.com/sirupsen/logrus 550 | 551 | ## TODO 552 | 553 | - [x] the log should start a new log at startup 554 | - [x] study out the names of checkpoints and logs (numbering logic) 555 | - [x] launch and work of follower 556 | - [x] cleaning unnecessary logs via follower 557 | - [ ] provide an opportunity not to delete old logs, add a test! 558 | - [x] loading from broken files to stop loading, but work must continue (AllowStartupErrLoadLogs) 559 | - [x] cyclic loading of checkpoints until they run out (with errors) 560 | - [x] returns not errors, but reports of work that’s been completed 561 | - [x] add DeleteOptional, and add it in Operations too 562 | - [x] test Count 563 | - [x] Write test 564 | - [x] Read test 565 | - [x] Delete test 566 | - [x] Transaction test 567 | - [x] test RecordsList 568 | - [x] test RecordsListUnsafe 569 | - [x] test RecordsListWithPrefix 570 | - [x] test RecordsListWithSuffix 571 | - [x] ReadListUnsafe test 572 | - [x] boot test with a broken log (last, the rest are ok) 573 | - [x] boot test with broken checkpoint 574 | - [x] boot test with a broken log and another the log which follow after 575 | - [x] transaction usage test 576 | - [x] for convenience of testing do WriteUnsafe 577 | - [x] ~~ what for WriteUnsafeRecord is need in Checkpoint ? (for recording at startup?) ~~ alternative to WriteListUnsafe (faster) 578 | - [x] benchmark of competitive and non-competitive records 579 | - [x] benchmark reading competitive 580 | - [ ] benchmark write and read in competitive mode 581 | - [x] benchmark competitive transactions in parallel mode 582 | - [ ] at boot - when files are broken, the “wrn” may returns, not “err” 583 | - [x] study out the log and the batch, why at fast record they get to the following log 584 | - [x] interception of panics at the root of the application and at the level of use cases 585 | - [ ] ~~ during transactions, you can delete some of the records from participating (! need for a question!) ~~ 586 | - [x] testing auxiliary helpers 587 | - [x] while creating a database immediately add a list of handlers because the uploading from logs happens instantly 588 | - [x] add a convenient configurator while creating a database 589 | - [x] translate comments into English 590 | - [x] clear code from old artifacts 591 | - [ ] create a directory for documentation 592 | - [x] create a directory for examples 593 | - [x] make a simple example with writing, transaction and reading 594 | - [x] make an example with financial transactions 595 | - [ ] error handling example 596 | - [ ] test the linter and eliminate all incorrectness in the code 597 | - [x] add Usage / Quick start text to readme 598 | - [x] description of error codes 599 | - [x] configuration description 600 | - [x] in the description specify third-party packages (as dependencies) 601 | - [x] add methods for reports in order to check for all errors like IsErrBlahBlahBlah 602 | - [x] transfer all imported packages to distribution 603 | - [x] switch from WriteUnsafeRecord to WriteListUnsafe 604 | - [x] add ReadListUnsafe for an ability to read when the database is stopped 605 | - [x] add RecordsListUnsafe, which can work with the stopped and running database 606 | - [x] get a list of keys with a condition of a prefix: RecordsListWithPrefix 607 | - [x] get a list of keys with a condition of a suffix: RecordsListWithSuffix 608 | - [x] remove the Save method 609 | - [x] returns new values in the report during a transaction 610 | - [x] check returned value in tests 611 | - [x] start numbering with big numbers, for example million or a billion (more convenient for sorting files) 612 | - [x] give a correct description-comment for all public methods 613 | - [ ] create description for error returns and warnings in the Create method 614 | - [ ] pause in the batcher - check its size, set the optimal size 615 | - [x] add in the description that the data is stored both on disk and in memory during the operation of the database 616 | - [ ] method for getting all log files and checkpoints 617 | - [ ] method for viewing a log file 618 | - [ ] method of viewing a checkpoint file 619 | - [ ] the method of strict adding record into the database (only if the record with such a key has not already existed) 620 | - [ ] add a method to view the status of the database 621 | 622 | ### Copyright © 2019-2022 Eduard Sesigin. All rights reserved. Contacts: 623 | -------------------------------------------------------------------------------- /README_RU.md: -------------------------------------------------------------------------------- 1 | [![GoDoc](https://godoc.org/github.com/claygod/coffer?status.svg)](https://godoc.org/github.com/claygod/coffer) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) [![Travis CI](https://travis-ci.org/claygod/coffer.svg?branch=master)](https://travis-ci.org/claygod/coffer) [![Go Report Card](https://goreportcard.com/badge/github.com/claygod/coffer)](https://goreportcard.com/report/github.com/claygod/coffer) [![codecov](https://codecov.io/gh/claygod/coffer/branch/master/graph/badge.svg)](https://codecov.io/gh/claygod/coffer) 2 | 3 | # Coffer 4 | 5 | Простая key-value ACID* база данных. При среднем или даже низком `latency` старается обеспечить 6 | большую пропускную способность `throughput`, не жертвуя при этом ACID свойствами БД. 7 | БД даёт возможность создавать хидеры записей по своему усмотрению и использовать их в качестве транзакций. 8 | Максимальный размер хранимых данных ограничен размером оперативной памяти компьютера. 9 | 10 | *is a set of properties of database transactions intended to guarantee validity even in the event of errors, power failures, etc. 11 | 12 | ## Table of Contents 13 | 14 | * [Usage](#Usage) 15 | * [Examples](#Examples) 16 | * [API](#api) 17 | + [Methods](#methods) 18 | * [Config](#config) 19 | + [Handler](#Handler) 20 | - [Пример хэндлера без использования агрумента](#Пример-хэндлера-без-использования-агрумента) 21 | - [Пример хэндлера с использованием агрумента](#Пример-хэндлера-с-использованием-агрумента) 22 | * [Запуск](#Запуск) 23 | - [Старт](#Старт) 24 | - [Follow](#Follow) 25 | * [Хранение данных](#Хранение-данных) 26 | - [Загрузка данных после некорректного отключения](#Загрузка-данных-после-некорректного-отключения) 27 | * [Коды ошибок](#Коды-ошибок) 28 | - [Список кодов](#Список-кодов) 29 | - [Проверка кодов через методы](#Проверка-кодов-через-методы) 30 | * [Benchmark](#Benchmark) 31 | * [Dependencies](#Dependencies) 32 | * [ToDo](#TODO) 33 | 34 | ## Usage 35 | 36 | ```golang 37 | package main 38 | 39 | import ( 40 | "fmt" 41 | 42 | "github.com/claygod/coffer" 43 | ) 44 | 45 | const curDir = "./" 46 | 47 | func main() { 48 | // STEP init 49 | db, err, wrn := coffer.Db(curDir).Create() 50 | switch { 51 | case err != nil: 52 | fmt.Println("Error:", err) 53 | return 54 | case wrn != nil: 55 | fmt.Println("Warning:", err) 56 | return 57 | } 58 | if !db.Start() { 59 | fmt.Println("Error: not start") 60 | return 61 | } 62 | defer db.Stop() 63 | 64 | // STEP write 65 | if rep := db.Write("foo", []byte("bar")); rep.IsCodeError() { 66 | fmt.Sprintf("Write error: code `%d` msg `%s`", rep.Code, rep.Error) 67 | return 68 | } 69 | 70 | // STEP read 71 | rep := db.Read("foo") 72 | rep.IsCodeError() 73 | if rep.IsCodeError() { 74 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 75 | return 76 | } 77 | fmt.Println(string(rep.Data)) 78 | } 79 | ``` 80 | 81 | ### Examples 82 | 83 | По указанным путям вы найдёте много примеров использования транзакций. 84 | 85 | - `Quick start` https://github.com/claygod/coffer/tree/master/examples/quick_start 86 | - `Finance` https://github.com/claygod/coffer/tree/master/examples/finance 87 | 88 | ## API 89 | 90 | Запущенная БД после выполнения операции возвращает отчёт с результатами: 91 | - кодом 92 | - ошибкой (коды ошибок хранятся здесь: `github.com/claygod/coffer/reports`) 93 | - данными 94 | - другими подробностями 95 | Ознакомиться со структурами ответов можно здесь: `github.com/claygod/coffer/reports` 96 | 97 | ### Methods 98 | 99 | * Start 100 | * Stop 101 | * StopHard 102 | * Save 103 | * Write 104 | * WriteList 105 | * WriteListUnsafe 106 | * Read 107 | * ReadList 108 | * ReadListUnsafe 109 | * Delete 110 | * DeleteListStrict 111 | * DeleteListOptional 112 | * Transaction 113 | * Count 114 | * CountUnsafe 115 | * RecordsList 116 | * RecordsListUnsafe 117 | * RecordsListWithPrefix 118 | * RecordsListWithSuffix 119 | 120 | Внимание! Все запросы, имена которых содержат `Unsafe` обычно можно выполнять как при запущенной, 121 | так и при остановленной (не запущеной) БД. Во втором случае нельзя запросы делать параллельно, 122 | иначе консистентность БД может оказаться нарушенной, а данные могут быть потеряны. 123 | 124 | #### Start 125 | 126 | Run the database. When launched, the `Follow` interactor is turned on, 127 | which monitors the relevance of the current checkpoint. 128 | 129 | #### Stop 130 | 131 | Остановить БД. Если вы хотите в своём приложении периодически останавливать и запускать БД, 132 | возможно, после остановки вы захотите создать новый клиент. 133 | 134 | #### Write 135 | 136 | Записать в БД новую запись, указав ключ и значение. 137 | Их длина должна удовлетворять требованиям, указанным в конфигурации. 138 | 139 | #### WriteList 140 | 141 | Записать в БД несколько записей, указав в агрументах соответствующую `map`. 142 | Strict mode (strictMode=true): 143 | Операция будет выполнена, если нет записей с такими ключами. 144 | Возвращается список уже существующих записей. 145 | Optional mode (strictMode=false): 146 | Операция будет выполнена не зависимо от того, есть записи с такими ключами или нет. 147 | Возвращается список уже существующих записей. 148 | Важно: этот аргумент ссылочный, его нельзя изменять! 149 | 150 | #### WriteListUnsafe 151 | 152 | Записать в БД несколько записей, указам в агрументах соответствующую `map`. 153 | Этот метод существует для того, чтобы перед запуском БД немного быстрее её наполнять. 154 | Метод не подразумевает параллельного использования. 155 | 156 | #### Read 157 | 158 | Прочитать одну запись из БД. В полученом `report` будет код результата, и если он положительный, 159 | то и значение в соответствующем поле. 160 | 161 | #### ReadList 162 | 163 | Прочитать несколько записей. Ограничение на максимальное количество читаемых записей есть в конфигурации. 164 | Помимо найденных записей, возвращается список не найденных записей. 165 | 166 | #### ReadListUnsafe 167 | 168 | Прочитать несколько записей. Метод может быть вызван при остановленной (или не запущенной) БД. 169 | Метод не подразумевает параллельного использования. 170 | 171 | #### Delete 172 | 173 | Удалить одну запись. 174 | 175 | #### DeleteList 176 | 177 | Strict: 178 | Удалить несколько записей, но только если все они есть в БД. Если хотя бы одной записи нет, 179 | то ни одна запись удалена не будет. 180 | 181 | Optional: 182 | Удалить несколько записей. Удалены будут все записи из списка, которые будут найдены в БД. 183 | 184 | #### Transaction 185 | 186 | Выполнить транзакцию. Транзакция должна быть занесена в БД на этапе создания и конфигурирования БД. 187 | Ответственность за консистентность функционала обработчиков транзакций между разными запусками БД 188 | лежит на пользователе БД. Транзакция возвращает новые значения, сохранённые в БД. 189 | 190 | #### Count 191 | 192 | Получить количество записей в БД. Запрос можно делать только к запущенной БД 193 | 194 | #### CountUnsafe 195 | 196 | Получить количество записей в БД. Запросы к остановленной или не запущеннной БД нельзя делать параллельно! 197 | 198 | #### RecordsList 199 | 200 | Получить список всех ключей БД. При большом количестве записей в БД запрос будет медленным, поэтому применяйте 201 | его только в случае крайней нужды. Метод работает только при запущенной БД. 202 | 203 | #### RecordsListUnsafe 204 | 205 | Получить список всех ключей БД. При большом количестве записей в БД запрос будет медленным, поэтому применяйте 206 | его только в случае крайней нужды. При использовании запроса с остановленной или не запущенной БД параллельность 207 | запрещена. 208 | 209 | #### RecordsListWithPrefix 210 | 211 | Получить список всех ключей, имеющих указанный в аргументах префикс (начинается с этой строки). 212 | 213 | ## Config 214 | 215 | Фактически, достаточно указать путь к директории базы данных, и все параметры конфигурации установятся на дефолтные: 216 | 217 | cof, err, wrn := Db(dirPath) . Create() 218 | 219 | Дефолтные значения можно увидеть в файле `/config.go` . 220 | Однако каждый из параметров можно сконфигурировать: 221 | 222 | ```golang 223 | Db(dirPath). 224 | BatchSize(batchSize). 225 | LimitRecordsPerLogfile(limitRecordsPerLogfile). 226 | FollowPause(100*time.Second). 227 | LogsByCheckpoint(1000). 228 | AllowStartupErrLoadLogs(true). 229 | MaxKeyLength(maxKeyLength). 230 | MaxValueLength(maxValueLength). 231 | MaxRecsPerOperation(1000000). 232 | RemoveUnlessLogs(true). 233 | LimitMemory(100 * 1000000). 234 | LimitDisk(1000 * 1000000). 235 | Handler("handler1", &handler1). 236 | Handler("handler2", &handler2). 237 | Handlers(map[string]*handler). 238 | Create() 239 | ``` 240 | 241 | ### Db 242 | 243 | Указываем рабочую директорию, в которой БД будет хранить свои файлы. Для новой базы данных 244 | директория должна быть свободной от файлов с расширениями log, check, checkpoint. 245 | 246 | ### BatchSize 247 | 248 | Максимальное количество записей, которое БД может добавить за один раз. Уменьшение этого параметра 249 | немного улучшает `latency` (но не слишком сильно). Увеличение этого параметра немного ухудшает `latency`, 250 | но при этом увеличивает пропускную способность `throughput`. 251 | 252 | ### LimitRecordsPerLogfile 253 | 254 | Количество операций, которые будут записаны в один log-файл. Маленькая цифра заставит БД очень часто создавать 255 | новые файлы, что отрицательно скажется на скорости работы БД. Большая цифра уменьшает количество пауз на создание 256 | файлов, но файлы при этом становятся более крупными. 257 | 258 | ### FollowPause 259 | 260 | Размер интервала для запуска `Follow` интерактора, анализирующего старые логи и периодически создающего 261 | новые чекпоинты (точки останова). 262 | 263 | ### LogsByCheckpoint 264 | 265 | После скольких заполненных лог-файлов необходимо создавать новый чекпоинт, чем меньше цифра, тем чаще создаём. 266 | Для производительности лучше это делать не слишком часто. 267 | 268 | ### AllowStartupErrLoadLogs 269 | 270 | Опция разрешает работу БД при загрузке, даже в том случае, если последний файл логов закончен некорректно 271 | (типичная ситуация для нештатного завершения работы). По умолчанию опция разрешена. 272 | 273 | ### MaxKeyLength 274 | 275 | Максимально допустимая длина ключа. 276 | 277 | ### MaxValueLength 278 | 279 | Максимальный размер значения для записи. 280 | 281 | ### MaxRecsPerOperation 282 | 283 | Максимальное количество записей, которое может быть задействовано в одной операции. 284 | 285 | ### RemoveUnlessLogs 286 | 287 | Опция удаления старых файлов. После того, как `Follow` создал новый чекпоинт, он с разрешения этой 288 | опции удаляет теперь уже не нужные логи операций. Если вам по каким-то причина нужно хранить весь лог операций, 289 | вы можете отключить эту опцию, однако будьте готовы к тому, что это увеличит расход дискового пространства. 290 | 291 | ### LimitMemory 292 | 293 | Минимальный размер свободной оперативной памяти, при котором БД перестаёт выполнять операции 294 | и останавливается во избежание потери данных. 295 | 296 | ### LimitDisk 297 | 298 | Минимальный размер свободного места на жёстком диске, при котором БД перестаёт выполнять операции 299 | и останавливается во избежание потери данных. 300 | 301 | ### Handler 302 | 303 | Добавить хэндлер транзакции. Важно, чтобы при разных запусках одной и той же БД имя хэндлера 304 | и результаты его работы были идемпотентны. В противном случае в разное время при разных запусках 305 | хэндлеры будут работать по разному, что приведёт к нарушению консистентности данных. 306 | Если вы предполагаете со временем вносить изменения в хэндлеры, 307 | возможно добавление в ключ номера версии поможет упорядочить такой процесс. 308 | 309 | Ограничения: 310 | - Аргумент, передаваемый в хэндлер, должен быть числом, слайсом байтов. 311 | - При необходимости передать сложные структуры, их нужно сериализовать в байты. 312 | - Хидер может оперировать только уже существующими записями. 313 | - Хидер не может удалять записи. 314 | - Хидер по окончании работы должен вернуть новые значения всех запрошенных записей. 315 | - Количество изменяемых хидером записей установлено в конфигурации `MaxRecsPerOperation` 316 | 317 | #### Пример хэндлера без использования агрумента 318 | 319 | ```golang 320 | func HandlerExchange(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 321 | if arg != nil { 322 | return nil, fmt.Errorf("Args not null.") 323 | } else if len(recs) != 2 { 324 | return nil, fmt.Errorf("Want 2 records, have %d", len(recs)) 325 | } 326 | recsKeys := make([]string, 0, 2) 327 | recsValues := make([][]byte, 0, 2) 328 | for k, v := range recs { 329 | recsKeys = append(recsKeys, k) 330 | recsValues = append(recsValues, v) 331 | } 332 | out := make(map[string][]byte, 2) 333 | out[recsKeys[0]] = recsValues[1] 334 | out[recsKeys[1]] = recsValues[0] 335 | return out, nil 336 | } 337 | ``` 338 | 339 | #### Пример хэндлера с использованием агрумента 340 | 341 | ```golang 342 | func HandlerDebit(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 343 | if arg == nil || len(arg) != 8 { 344 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 345 | } else if len(recs) != 1 { 346 | return nil, fmt.Errorf("Want 1 record, have %d", len(recs)) 347 | } 348 | delta := bytesToUint64(arg) 349 | var recKey string 350 | var recValue []byte 351 | for k, v := range recs { 352 | recKey = k 353 | recValue = v 354 | } 355 | if len(recValue) != 8 { 356 | return nil, fmt.Errorf("The length of the value in the record is %d bytes, but 8 bytes are needed", len(recValue)) 357 | } 358 | curAmount := bytesToUint64(recValue) 359 | newAmount := curAmount + delta 360 | if curAmount > newAmount { 361 | return nil, fmt.Errorf("Account overflow. There is %d, a debit of %d.", curAmount, delta) 362 | } 363 | return map[string][]byte{recKey: uint64ToBytes(newAmount)}, nil 364 | } 365 | ``` 366 | 367 | ### Handlers 368 | 369 | Добавить несколько нэндлеров в БД за один раз. Важный момент: хэндлеры с совпадающими ключами перезатираются. 370 | 371 | ### Create 372 | 373 | Обязательная команда (должна быть последней), которая заканчивает конфигурирование и создаёт БД. 374 | 375 | ## Запуск 376 | 377 | ### Старт 378 | 379 | При старте последним по номеру должен быть чекпойнт. Если это не так, то значит, остановка была некорректной. 380 | Тогда грузится последний имеющийся чекпоинт и все логи после него до тех пор, пока это возможно. На битом логе 381 | или последнем логе скачиваем, пока получается, и на этом загрузку заканчиваем. БД создаёт новый чекпоинт, 382 | и после этого возможно продолжение исполнение кода. 383 | 384 | ### Follow 385 | 386 | После того, как БД будет запущена, она пишет все операции в журнал. В результате лог может сильно разрастись. 387 | Если в конце концов при окончании работы приложения БД будет корректно остановлена, 388 | то появится новый чекпойнт, и при последующем старте именно из него и будут взяты данные. 389 | Однако, остановка может оказаться некорректной, и новый чекпойнт создан не будет. 390 | 391 | В этом случае при новом старте БД будет вынуждена загрузить старый чекпоинт, и провести заново все операции, 392 | которые были совершены и записаны в журнал. Это может оказаться весьма значительным по времени, и в конечном 393 | итоге база будет грузиться гораздо дольше, что не всегда приемлемо для приложений. 394 | 395 | Именно поэтому в БД существует механизм фолловера, который методично перебирает логи в процессе работы БД 396 | и периодически создаёт чекпойнты, которые значительно ближе по состоянию к текущему моменту. 397 | Также за фолловером закреплена функция чистки старых логов и чекпойнтов, чтобы освобождать 398 | место на жёстком диске. 399 | 400 | ## Хранение данных 401 | 402 | Ваши данные хранятся в виде файлов в том каталоге, который вы указали при создании базы. 403 | Файлы с расширением `log` содержат описание выполненных операций. 404 | Файлы с расширением `checkpoint` содержат снимки состояния БД на определённый момент. 405 | Файлы с расширением `check` содержат неполный снимок состояния БД. 406 | Используя параметр конфигурации `RemoveUnlessLogs` , вы може приказать БД 407 | старые и ненужные файлы удалять, чтобы сберечь дисковое пространство. 408 | 409 | Если база данных остановлена в штатном режиме, то последним файлом, записанным на диск, 410 | будет файл `checkpoint`, а его номер будет максимальным. 411 | Если база данных остановлена некорректно, то скорей всего максимальный номер будет у файла 412 | с расширением `log` или `check`. 413 | 414 | Внимание! до тех пор, пока БД полностью не остановлена, запрещено с файлами базы данных 415 | проводить какие-либо операции. 416 | 417 | Если вы хотите скопировать куда-либо базу, то необходимо копировать всё содержимое директории. 418 | Если вы хотите при копировании скопировать минимум файлов, то необходимо скопировать файл 419 | с расширением `checkpoint` , имеющий максимальный номер, и все файлы с расширением `log` , 420 | которые имеют номер, больший чем у скопированного файла `checkpoint`. 421 | 422 | ### Загрузка данных после некорректного отключения 423 | 424 | Если работа приложения, использующего БД завершена некорректно, то при следующей загрузке БД 425 | постарается найти последний корректный снимок состояния `checkpoint`. Найдя этот файл, БД загрузит его, 426 | после чего загрузит все `log` файлы с большими номерами. Мы ожидаем, что последний `log` файл может 427 | быть не до конца заполненным, так как во время записи работа могла быть прервана. Поэтому загрузка 428 | с испорченного файла будет выполнена до испорченного (недозаписанного) участка, после чего 429 | загрузка БД считается завершенной. По кончании загрузки БД создаёт новый `checkpoint`. 430 | Если сбои системы происходят во время старта (загрузки) БД, возможны ошибки и нарушение 431 | консистентности данных. 432 | 433 | ## Коды ошибок 434 | 435 | Коды ошибок хранятся здесь: "github.com/claygod/coffer/reports/codes" 436 | Если получен `Ok` код, значит операция выполнена полностью. Если Код содержит `Error`, значит операция 437 | не выполнена, выполнена не полностью или выполнена с ошибкой, однако работу с БД можно продолжать. 438 | Если код содержит `Panic`, значит состояние БД таково, что работать с ней дальше нельзя. 439 | 440 | ### Список кодов 441 | 442 | - Ok - выполнено без замечаний 443 | - Error - не выполнено или выполнено не полностью, но работать дальше можно 444 | - ErrRecordLimitExceeded - превышен лимит записей на одну операцию 445 | - ErrExceedingMaxValueSize - слишком длинное значение 446 | - ErrExceedingMaxKeyLength - слишком длинный ключ 447 | - ErrExceedingZeroKeyLength - слишком короткий ключ 448 | - ErrHandlerNotFound - не найден хэндлер 449 | - ErrParseRequest - не получилось подготовить запрос для логгирования 450 | - ErrResources - не хватает ресурсов 451 | - ErrNotFound - не найдены ключи 452 | - ErrReadRecords - ошибка считывания записей для транзакции (при отсутствии хоть одной записи транзакцию нельзя проводить) 453 | - ErrHandlerReturn - найденный и загруженный хандлер вернул ошибку 454 | - ErrHandlerResponse - хандлер вернул неполные ответы 455 | - Panic - не выполнено, дальнейшая работа с БД невозможна 456 | - PanicStopped - приложение остановлено 457 | - PanicWAL - ошибка работы журнала проведённых операций 458 | 459 | ### Проверка кодов через методы 460 | 461 | Чтобы не экспортировать в приложение, работающее с БД, у отчётов (Report) есть методы: 462 | 463 | - IsCodeOk - выполнено без замечаний 464 | - IsCodeError - не выполнено или выполнено не полностью, но работать дальше можно 465 | - IsCodeErrRecordLimitExceeded - превышен лимит записей на одну операцию 466 | - IsCodeErrExceedingMaxValueSize - слишком длинное значение 467 | - IsCodeErrExceedingMaxKeyLength - слишком длинный ключ 468 | - IsCodeErrExceedingZeroKeyLength - слишком короткий ключ 469 | - IsCodeErrHandlerNotFound - не найден хэндлер 470 | - IsCodeErrParseRequest - не получилось подготовить запрос для логгирования 471 | - IsCodeErrResources - не хватает ресурсов 472 | - IsCodeErrNotFound - не найдены ключи 473 | - IsCodeErrReadRecords - ошибка считывания записей для транзакции (при отсутствии хоть одной записи транзакцию нельзя проводить) 474 | - IsCodeErrHandlerReturn - найденный и загруженный хандлер вернул ошибку 475 | - IsCodeErrHandlerResponse - хандлер вернул неполные ответы 476 | - IsCodePanic - не выполнено, дальнейшая работа с БД невозможна 477 | - IsCodePanicStopped - приложение остановлено 478 | - IsCodePanicWAL - ошибка работы журнала проведённых операций 479 | 480 | Для проверки полученных кодов не очень удобно делать большие свитчи. Можно ограничиться всего тремя проверками: 481 | 482 | - IsCodeOk - выполнено без замечаний 483 | - IsCodeError - не выполнено или выполнено не полностью, но работать дальше можно (охватывает ВСЕ ошибки) 484 | - IsCodePanic - не выполнено, дальнейшая работа с БД невозможна (охватывает ВСЕ паники) 485 | 486 | 487 | ## Benchmark 488 | 489 | - BenchmarkCofferWriteParallel32LowConcurent-4 100000 12933 ns/op 490 | - BenchmarkCofferTransactionSequence-4 2000 227928 ns/op 491 | - BenchmarkCofferTransactionPar32NotConcurent-4 100000 4132 ns/op 492 | - BenchmarkCofferTransactionPar32HalfConcurent-4 100000 4199 ns/op 493 | 494 | ## Dependencies 495 | 496 | - github.com/shirou/gopsutil/disk 497 | - github.com/shirou/gopsutil/mem 498 | - github.com/sirupsen/logrus 499 | 500 | ## TODO 501 | 502 | - [x] журнал при старте должен начинать новый лог 503 | - [x] разобраться с именами чекпоинтов и логов (логика нумерации) 504 | - [x] запуск и работа фолловера 505 | - [x] чистка ненужных логов фолловером 506 | - [ ] предусмотреть возможность не удалять старые логи, добавить тест! 507 | - [x] загрузка с битых файлов, чтобы останавливалась загрузка, но работа продолжалась (AllowStartupErrLoadLogs) 508 | - [x] циклическая загрузка чекпойнтов, пока они не кончатся (при ошибках) 509 | - [x] возврат не ошибок, а отчётов о проделанной работе 510 | - [x] добавить DeleteOptional, в том числе и в Operations 511 | - [x] тест Count 512 | - [x] тест Write 513 | - [x] тест Read 514 | - [x] тест Delete 515 | - [x] тест Transaction 516 | - [x] тест RecordsList 517 | - [x] тест RecordsListUnsafe 518 | - [x] тест RecordsListWithPrefix 519 | - [x] тест RecordsListWithSuffix 520 | - [x] тест ReadListUnsafe 521 | - [x] тест на загрузку с битым логом (последним, остальные в порядке) 522 | - [x] тест на загрузку с битым чекпоинтом 523 | - [x] тест на загрузку с битым логом и идущим за ним ещё одним логом 524 | - [x] тест на использование транзакции 525 | - [x] для удобства тестирования сделать WriteUnsafe 526 | - [x] ~~для чего нужен WriteUnsafeRecord в Checkpoint ? (для записи при старте?)~~ альтернатива WriteListUnsafe (быстрее) 527 | - [x] бенчмарк записи конкурентной и не конкурентной 528 | - [x] бенчмарк чтения конкурентного 529 | - [ ] бенчмарк записии и чтения в конкурентном режиме 530 | - [x] бенчмарк конкурентных транзакций в параллельном режиме 531 | - [ ] при загрузке - при поломанных файлах возвращаться может wrn, а не err 532 | - [x] разобраться с журналом и батчером, почему при быстрой записи records попадают в следующий лог 533 | - [x] перехват паник в корне приложения и на уровне usecases 534 | - [ ] ~~при транзакциях можно некоторые записи из участвующих удалять (!надобность под вопросом!)~~ 535 | - [x] тестирование вспомогательных хэлперов 536 | - [x] при создании БД сразу добавлять список хэндлеров, т.к. и загрузка из логов тоже происходит сразу 537 | - [x] добавить удобный конфигуратор при создании бд 538 | - [x] комментарии перевести на английский язык 539 | - [x] очистить код от старых артефактов 540 | - [ ] завести каталог для документации 541 | - [x] завести каталог для примеров 542 | - [x] сделать простой пример с записью, транзакцией и чтением 543 | - [x] сделать пример с финансовыми транзакциями 544 | - [ ] пример обработки ошибок 545 | - [ ] прогнать линтер и устранить все некорректности в коде 546 | - [x] добавить Usage/Quick start текст в readme 547 | - [x] описание кодов ошибок 548 | - [x] описание конфигурирования 549 | - [x] в описании указать сторонние пакеты (как зависимости) 550 | - [x] репортам добавить методы проверки на все ошибки в духе IsErrBlahBlahBlah 551 | - [x] все импортируемые пакеты перенести в дистрибутив 552 | - [x] перевести использование WriteUnsafeRecord на WriteListUnsafe 553 | - [x] добавитьReadListUnsafe для возможности чтения при остановленной базе 554 | - [x] добавить RecordsListUnsafe, который может работать и при остановленной и при работающей БД 555 | - [x] получение списка ключей с условием по префиксу RecordsListWithPrefix 556 | - [x] получение списка ключей с условием по суффиксу RecordsListWithSuffix 557 | - [x] убрать метод Save 558 | - [x] при транзакции возвращать в отчёте новые значения 559 | - [x] в тестах проверить возвращаемое значение 560 | - [x] начинать нумерацию с больших цифр, допустим с миллиона/миллиарда (удобней для сортировки файлов) 561 | - [x] всем публичным методам дать корректное описание-комментарий 562 | - [ ] описать возврат ошибок и варнингов в методе Create 563 | - [ ] пауза в батчере - проверить её размер, выставить оптимальный 564 | - [x] указать в описании, что данные при работе хранятся и на диске и в памяти 565 | - [ ] метод получения всех файлов логов и чекпоинтов 566 | - [ ] метод просмотра файла лога 567 | - [ ] метод просмотра файла чекпоинта 568 | - [ ] метод строгой записи в базу (только если запись с таким ключём не существует) 569 | - [ ] добавить метод для просмотра статуса БД 570 | 571 | ### Copyright © 2019-2022 Eduard Sesigin. All rights reserved. Contacts: 572 | -------------------------------------------------------------------------------- /actions.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // Actions 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | //"strings" 10 | "time" 11 | 12 | "github.com/claygod/coffer/reports" 13 | "github.com/claygod/coffer/reports/codes" 14 | "github.com/claygod/coffer/usecases" 15 | ) 16 | 17 | /* 18 | Write a new record in the database, specifying the key and value. 19 | Their length must satisfy the requirements specified in the configuration. 20 | */ 21 | func (c *Coffer) Write(key string, value []byte) *reports.ReportWriteList { 22 | return c.WriteList(map[string][]byte{key: value}, false) 23 | } 24 | 25 | /* 26 | WriteList - write several records to the database by specifying `map` in the arguments. 27 | Strict mode (true): 28 | The operation will be performed if there are no records with such keys yet. 29 | Otherwise, a list of existing records is returned. 30 | Optional mode (false): 31 | The operation will be performed regardless of whether there are records with such keys or not. 32 | A list of existing records is returned. 33 | Important: this argument is a reference; it cannot be changed in the calling code! 34 | */ 35 | func (c *Coffer) WriteList(input map[string][]byte, strictMode bool) *reports.ReportWriteList { 36 | rep := &reports.ReportWriteList{Report: reports.Report{}} 37 | 38 | defer c.panicRecover() 39 | 40 | if !c.hasp.Add() { 41 | rep.Code = codes.PanicStopped 42 | rep.Error = fmt.Errorf("Coffer is stopped") 43 | 44 | return rep 45 | } 46 | 47 | defer c.hasp.Done() 48 | 49 | for _, value := range input { 50 | if ln := len(value); ln > c.config.UsecasesConfig.MaxValueLength { // контроль максимально допустимой длины значения 51 | rep.Code = codes.ErrExceedingMaxValueSize 52 | rep.Error = fmt.Errorf("The admissible value length is %d; there is a value with a length of %d in the request.", c.config.UsecasesConfig.MaxValueLength, ln) 53 | 54 | return rep 55 | } 56 | } 57 | 58 | keys := c.extractKeysFromMap(input) 59 | 60 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 61 | rep.Code = code 62 | rep.Error = err 63 | 64 | return rep 65 | } 66 | 67 | c.porter.Catch(keys) 68 | defer c.porter.Throw(keys) 69 | 70 | req := &usecases.ReqWriteList{ 71 | Time: time.Now(), 72 | List: input, 73 | } 74 | 75 | if strictMode { 76 | rep = c.recInteractor.WriteListStrict(req) 77 | } else { 78 | rep = c.recInteractor.WriteListOptional(req) 79 | } 80 | 81 | if rep.Code >= codes.Panic { 82 | defer c.Stop() 83 | } 84 | 85 | return rep 86 | } 87 | 88 | /* 89 | WriteListUnsafe - write several records to the database by specifying `map` in the arguments. 90 | This method exists in order to fill it up a little faster before starting the database. 91 | The method does not imply concurrent use. 92 | */ 93 | func (c *Coffer) WriteListUnsafe(input map[string][]byte) *reports.Report { 94 | rep := &reports.Report{} 95 | defer c.panicRecover() 96 | 97 | for _, value := range input { 98 | if ln := len(value); ln > c.config.UsecasesConfig.MaxValueLength { // контроль максимально допустимой длины значения 99 | rep.Code = codes.ErrExceedingMaxValueSize 100 | rep.Error = fmt.Errorf("The admissible value length is %d; there is a value with a length of %d in the request.", c.config.UsecasesConfig.MaxValueLength, ln) 101 | 102 | return rep 103 | } 104 | } 105 | 106 | keys := c.extractKeysFromMap(input) 107 | 108 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 109 | rep.Code = code 110 | rep.Error = err 111 | 112 | return rep 113 | } 114 | 115 | req := &usecases.ReqWriteList{ 116 | Time: time.Now(), 117 | List: input, 118 | } 119 | 120 | rep = c.recInteractor.WriteListUnsafe(req) 121 | 122 | if rep.Code >= codes.Panic { 123 | defer c.Stop() 124 | } 125 | 126 | return rep 127 | } 128 | 129 | /* 130 | Read one entry from the database. In the received `report` there will be a result code, and if it is positive, 131 | that will be the value in the `data` field. 132 | */ 133 | func (c *Coffer) Read(key string) *reports.ReportRead { 134 | rep := &reports.ReportRead{Report: reports.Report{}} 135 | defer c.panicRecover() 136 | repList := c.ReadList([]string{key}) 137 | rep.Report = repList.Report 138 | 139 | if len(repList.Data) == 1 { 140 | if d, ok := repList.Data[key]; ok { 141 | rep.Data = d 142 | } 143 | } 144 | 145 | return rep 146 | } 147 | 148 | /* 149 | ReadList - read a few entries. There is a limit on the maximum number of readable entries in the configuration. 150 | In addition to the found records, a list of not found records is returned. 151 | */ 152 | func (c *Coffer) ReadList(keys []string) *reports.ReportReadList { 153 | rep := &reports.ReportReadList{Report: reports.Report{}} 154 | defer c.panicRecover() 155 | 156 | if !c.hasp.Add() { 157 | rep.Code = codes.PanicStopped 158 | rep.Error = fmt.Errorf("Coffer is stopped") 159 | 160 | return rep 161 | } 162 | 163 | defer c.hasp.Done() 164 | 165 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 166 | rep.Code = code 167 | rep.Error = err 168 | 169 | return rep 170 | } 171 | 172 | c.porter.Catch(keys) 173 | defer c.porter.Throw(keys) 174 | 175 | req := &usecases.ReqLoadList{ 176 | Time: time.Now(), 177 | Keys: keys, 178 | } 179 | rep = c.recInteractor.ReadList(req) 180 | 181 | if rep.Code >= codes.Panic { 182 | defer c.Stop() 183 | } 184 | 185 | return rep 186 | } 187 | 188 | /* 189 | ReadListUnsafe - read a few entries. The method can be called when the database is stopped (not running). 190 | The method does not imply concurrent use. 191 | */ 192 | func (c *Coffer) ReadListUnsafe(keys []string) *reports.ReportReadList { 193 | rep := &reports.ReportReadList{Report: reports.Report{}} 194 | defer c.panicRecover() 195 | 196 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 197 | rep.Code = code 198 | rep.Error = err 199 | 200 | return rep 201 | } 202 | 203 | req := &usecases.ReqLoadList{ 204 | Time: time.Now(), 205 | Keys: keys, 206 | } 207 | rep = c.recInteractor.ReadListUnsafe(req) 208 | 209 | if rep.Code >= codes.Panic { 210 | defer c.Stop() 211 | } 212 | 213 | return rep 214 | } 215 | 216 | /* 217 | Delete - remove a single record. 218 | */ 219 | func (c *Coffer) Delete(key string) *reports.Report { 220 | repList := c.DeleteList([]string{key}, true) 221 | 222 | return &repList.Report 223 | } 224 | 225 | /* 226 | DeleteList - delete multiple entries. 227 | Delete list Strict - delete several records, but only if they are all in the database. 228 | If at least one entry is missing, then no record will be deleted. 229 | Delete list Optional - delete multiple entries. Those entries from the list 230 | that will be found in the database will be deleted. 231 | */ 232 | func (c *Coffer) DeleteList(keys []string, strictMode bool) *reports.ReportDeleteList { 233 | rep := &reports.ReportDeleteList{Report: reports.Report{}} 234 | 235 | defer c.panicRecover() 236 | 237 | if !c.hasp.Add() { 238 | rep.Code = codes.PanicStopped 239 | rep.Error = fmt.Errorf("Coffer is stopped") 240 | 241 | return rep 242 | } 243 | 244 | defer c.hasp.Done() 245 | 246 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 247 | rep.Code = code 248 | rep.Error = err 249 | 250 | return rep 251 | } 252 | 253 | c.porter.Catch(keys) 254 | defer c.porter.Throw(keys) 255 | 256 | req := &usecases.ReqDeleteList{ 257 | Time: time.Now(), 258 | Keys: keys, 259 | } 260 | 261 | rep = c.recInteractor.DeleteList(req, strictMode) 262 | 263 | if rep.Code >= codes.Panic { 264 | defer c.Stop() 265 | } 266 | 267 | return rep 268 | } 269 | 270 | /* 271 | Transaction - execute a handler. The transaction handler must be registered in the database at the stage 272 | of creating and configuring the database. Responsibility for the consistency of the functionality 273 | of transaction handlers between different database launches rests with the database user. 274 | The transaction returns the new values stored in the database. 275 | */ 276 | func (c *Coffer) Transaction(handlerName string, keys []string, arg []byte) *reports.ReportTransaction { 277 | rep := &reports.ReportTransaction{Report: reports.Report{}} 278 | 279 | defer c.panicRecover() 280 | 281 | if !c.hasp.Add() { 282 | rep.Code = codes.PanicStopped 283 | rep.Error = fmt.Errorf("Coffer is stopped") 284 | 285 | return rep 286 | } 287 | 288 | defer c.hasp.Done() 289 | 290 | if code, err := c.checkLenCountKeys(keys); code != codes.Ok { 291 | rep.Code = code 292 | rep.Error = err 293 | 294 | return rep 295 | } 296 | 297 | c.porter.Catch(keys) 298 | 299 | defer c.porter.Throw(keys) 300 | 301 | req := &usecases.ReqTransaction{ 302 | Time: time.Now(), 303 | HandlerName: handlerName, 304 | Keys: keys, 305 | Value: arg, 306 | } 307 | rep = c.recInteractor.Transaction(req) 308 | 309 | if rep.Code >= codes.Panic { 310 | defer c.Stop() 311 | } 312 | 313 | return rep 314 | } 315 | 316 | /* 317 | Count - get the number of records in the database. 318 | A query can only be made to a running database 319 | */ 320 | func (c *Coffer) Count() *reports.ReportRecordsCount { 321 | rep := &reports.ReportRecordsCount{Report: reports.Report{}} 322 | 323 | defer c.panicRecover() 324 | 325 | if !c.hasp.Add() { 326 | rep.Code = codes.PanicStopped 327 | rep.Error = fmt.Errorf("Coffer is stopped") 328 | 329 | return rep 330 | } 331 | 332 | defer c.hasp.Done() 333 | 334 | rep = c.recInteractor.RecordsCount() 335 | 336 | if rep.Code >= codes.Panic { 337 | defer c.Stop() 338 | } 339 | 340 | return rep 341 | } 342 | 343 | /* 344 | CountUnsafe - get the number of records in the database. 345 | Queries to a stopped / not running database cannot be done in parallel! 346 | */ 347 | func (c *Coffer) CountUnsafe() *reports.ReportRecordsCount { 348 | rep := &reports.ReportRecordsCount{Report: reports.Report{}} 349 | 350 | defer c.panicRecover() 351 | 352 | if !c.hasp.IsReady() && !c.hasp.Add() { 353 | rep.Code = codes.PanicStopped 354 | rep.Error = fmt.Errorf("Coffer is started, !c.hasp.Add()") 355 | 356 | return rep 357 | } 358 | 359 | defer c.hasp.Done() 360 | 361 | rep = c.recInteractor.RecordsCount() 362 | 363 | if rep.Code >= codes.Panic { 364 | defer c.Stop() 365 | } 366 | 367 | return rep 368 | } 369 | 370 | /* 371 | RecordsList - get a list of all database keys. With a large number of records in the database, 372 | the query will be slow, so use its only in case of emergency. 373 | The method only works when the database is running. 374 | */ 375 | func (c *Coffer) RecordsList() *reports.ReportRecordsList { 376 | defer c.panicRecover() 377 | 378 | if !c.hasp.Add() { 379 | rep := &reports.ReportRecordsList{Report: reports.Report{}} 380 | rep.Code = codes.PanicStopped 381 | rep.Error = fmt.Errorf("Coffer is stopped") 382 | 383 | return rep 384 | } 385 | 386 | defer c.hasp.Done() 387 | 388 | rep := c.recInteractor.RecordsList() 389 | 390 | if rep.Code >= codes.Panic { 391 | defer c.Stop() 392 | } 393 | 394 | return rep 395 | } 396 | 397 | /* 398 | RecordsListUnsafe - get a list of all database keys. With a large number of records in the database, 399 | the query will be slow, so use its only in case of emergency. When using a query with 400 | a stopped/not_running database, competitiveness prohibited. 401 | */ 402 | func (c *Coffer) RecordsListUnsafe() *reports.ReportRecordsList { 403 | defer c.panicRecover() 404 | 405 | rep := c.recInteractor.RecordsList() 406 | 407 | if rep.Code >= codes.Panic { 408 | defer c.Stop() 409 | } 410 | 411 | return rep 412 | } 413 | 414 | /* 415 | RecordsListWithPrefix - get a list of all the keys having prefix 416 | specified in the argument (start with that string). 417 | */ 418 | func (c *Coffer) RecordsListWithPrefix(prefix string) *reports.ReportRecordsList { 419 | defer c.panicRecover() 420 | 421 | if !c.hasp.Add() { 422 | rep := &reports.ReportRecordsList{Report: reports.Report{}} 423 | rep.Code = codes.PanicStopped 424 | rep.Error = fmt.Errorf("Coffer is stopped") 425 | 426 | return rep 427 | } 428 | 429 | defer c.hasp.Done() 430 | 431 | rep := c.recInteractor.RecordsListWithPrefix(prefix) 432 | 433 | if rep.Code >= codes.Panic { 434 | defer c.Stop() 435 | } 436 | 437 | return rep 438 | } 439 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | token: 1f919e20-f1c9-4882-aa4e-99b6bbaf2179 3 | -------------------------------------------------------------------------------- /coffer.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // API 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/claygod/coffer/domain" 11 | "github.com/claygod/coffer/services/filenamer" 12 | "github.com/claygod/coffer/services/journal" 13 | "github.com/claygod/coffer/services/porter" 14 | "github.com/claygod/coffer/services/repositories/handlers" 15 | "github.com/claygod/coffer/services/repositories/records" 16 | "github.com/claygod/coffer/services/resources" 17 | "github.com/claygod/coffer/services/startstop" 18 | "github.com/claygod/coffer/usecases" 19 | "github.com/sirupsen/logrus" 20 | ) 21 | 22 | /* 23 | Coffer - Simple ACID* key-value database. 24 | */ 25 | type Coffer struct { 26 | config *Config 27 | logger usecases.Logger 28 | porter usecases.Porter 29 | resControl *resources.ResourcesControl 30 | handlers domain.HandlersRepository 31 | recInteractor *usecases.RecordsInteractor 32 | folInteractor *usecases.FollowInteractor 33 | panicRecover func() 34 | hasp usecases.Starter 35 | } 36 | 37 | func new(config *Config, hdls domain.HandlersRepository) (*Coffer, error, error) { 38 | //TODO: check received config 39 | resControl, err := resources.New(config.ResourcesConfig) 40 | 41 | if err != nil { 42 | return nil, err, nil 43 | } 44 | 45 | if hdls == nil { 46 | hdls = handlers.New() 47 | } 48 | 49 | logger := logrus.New() 50 | 51 | c := &Coffer{ 52 | config: config, 53 | logger: logger.WithField("Object", "Coffer"), 54 | porter: porter.New(), 55 | resControl: resControl, 56 | handlers: hdls, 57 | hasp: startstop.New(), 58 | } 59 | 60 | c.panicRecover = func() { 61 | if r := recover(); r != nil { 62 | c.logger.Error(r) 63 | } 64 | } 65 | 66 | alarmFunc := func(err error) { // для журнала 67 | logger.WithField("Object", "Journal").WithField("Method", "Write").Error(err) 68 | } 69 | riRepo := records.New() 70 | fiRepo := records.New() 71 | reqCoder := usecases.NewReqCoder() 72 | fileNamer := filenamer.NewFileNamer(c.config.UsecasesConfig.DirPath) 73 | trn := usecases.NewTransaction(c.handlers) 74 | chp := usecases.NewCheckpoint(c.config.UsecasesConfig) 75 | ldr := usecases.NewLoader(config.UsecasesConfig, logger.WithField("Object", "Loader"), chp, reqCoder, resControl, trn) 76 | jrn, err := journal.New(c.config.JournalConfig, fileNamer, alarmFunc) 77 | 78 | if err != nil { 79 | return nil, err, nil 80 | } 81 | 82 | ri, err, wrn := usecases.NewRecordsInteractor( 83 | c.config.UsecasesConfig, 84 | logger.WithField("Object", "RecordsInteractor"), 85 | ldr, 86 | chp, 87 | trn, 88 | reqCoder, 89 | riRepo, 90 | c.handlers, 91 | resControl, 92 | jrn, 93 | fileNamer, 94 | startstop.New(), 95 | ) 96 | 97 | if err != nil { 98 | return nil, err, wrn 99 | } 100 | 101 | c.recInteractor = ri 102 | 103 | fi, err := usecases.NewFollowInteractor( 104 | logger.WithField("Object", "FollowInteractor"), 105 | ldr, 106 | c.config.UsecasesConfig, 107 | chp, 108 | fiRepo, 109 | fileNamer, 110 | startstop.New(), 111 | ) 112 | 113 | if err != nil { 114 | return nil, err, nil 115 | } 116 | 117 | c.folInteractor = fi 118 | 119 | return c, nil, nil 120 | } 121 | 122 | /* 123 | Start - database launch 124 | */ 125 | func (c *Coffer) Start() bool { 126 | defer c.panicRecover() 127 | 128 | if !c.resControl.Start() { 129 | return false 130 | } 131 | 132 | if !c.recInteractor.Start() { 133 | c.resControl.Stop() 134 | 135 | return false 136 | } 137 | 138 | if !c.folInteractor.Start() { 139 | c.resControl.Stop() 140 | c.recInteractor.Stop() 141 | 142 | return false 143 | } 144 | 145 | if !c.hasp.Start() { 146 | c.resControl.Stop() 147 | c.recInteractor.Stop() 148 | c.folInteractor.Stop() 149 | 150 | return false 151 | } 152 | 153 | return true 154 | } 155 | 156 | /* 157 | Stop - database stop 158 | */ 159 | func (c *Coffer) Stop() bool { 160 | if c.hasp.IsReady() { 161 | return true // already stopped 162 | } 163 | 164 | defer c.panicRecover() 165 | 166 | if !c.hasp.Block() { 167 | return false 168 | } 169 | 170 | defer c.hasp.Unblock() 171 | 172 | if !c.resControl.Stop() { 173 | return false 174 | } 175 | 176 | if !c.folInteractor.Stop() { 177 | c.resControl.Start() 178 | 179 | return false 180 | } 181 | 182 | if !c.recInteractor.Stop() { 183 | c.resControl.Start() 184 | c.folInteractor.Start() 185 | 186 | return false 187 | } 188 | 189 | return true 190 | } 191 | 192 | /* 193 | StopHard - immediate stop of the database, without waiting for the stop of internal processes. 194 | The operation is quick, but extremely dangerous. 195 | */ 196 | func (c *Coffer) StopHard() error { 197 | defer c.panicRecover() 198 | var errOut error 199 | c.hasp.Block() 200 | 201 | if !c.hasp.Block() { 202 | errOut = fmt.Errorf("Hasp is not stopped.") 203 | } 204 | 205 | if !c.folInteractor.Stop() { 206 | errOut = fmt.Errorf("%v Follow Interactor is not stopped.", errOut) 207 | } 208 | 209 | if !c.recInteractor.Stop() { 210 | errOut = fmt.Errorf("%v Records Interactor is not stopped.", errOut) 211 | } 212 | 213 | return errOut 214 | } 215 | -------------------------------------------------------------------------------- /coffer_bench_test.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // API benchmarks 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | "sync/atomic" 11 | "testing" 12 | "time" 13 | 14 | "github.com/claygod/coffer/domain" 15 | "github.com/claygod/coffer/reports/codes" 16 | //"github.com/claygod/coffer/services/journal" 17 | //"github.com/claygod/coffer/services/resources" 18 | //"github.com/claygod/coffer/usecases" 19 | ) 20 | 21 | var keyConcurent int64 22 | 23 | func BenchmarkClean(b *testing.B) { 24 | forTestClearDir(dirPath) 25 | cof1, err := createAndStartNewCofferFast(b, 1000, 1000, 100, 1000) //createAndStartNewCofferLengthB(b, 10, 100) 26 | if err != nil { 27 | b.Error(err) 28 | return 29 | } 30 | defer forTestClearDir(dirPath) 31 | defer cof1.Stop() 32 | defer forTestClearDir(dirPath) 33 | } 34 | 35 | // func BenchmarkCofferReadParallel32HiConcurent(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 36 | // b.StopTimer() 37 | // //b.SetParallelism(1) 38 | // forTestClearDir(dirPath) 39 | // //time.Sleep(1 * time.Second) 40 | // //fmt.Println("====================Parallel======================") 41 | // cof1, err := createAndStartNewCofferFast(b, 500, 100002, 100, 1000) // createAndStartNewCofferLengthB(b, 10, 100) 42 | // if err != nil { 43 | // b.Error(err) 44 | // return 45 | // } 46 | // defer cof1.Stop() 47 | // defer forTestClearDir(dirPath) 48 | // for x := 0; x < 100000; x += 100 { 49 | // list := make(map[string][]byte, 100) 50 | // for z := x; z < x+100; z++ { 51 | // key := strconv.Itoa(z) 52 | // list[key] = []byte("a" + key + "b") 53 | // } 54 | // rep := cof1.WriteList(list, false) 55 | // if rep.Code >= codes.Warning { 56 | // b.Error(fmt.Sprintf("Code_: %d , err: %v", rep.Code, rep.Error)) 57 | // } 58 | // } 59 | // fmt.Println("DB filled", cof1.Count()) 60 | // time.Sleep(2 * time.Second) 61 | // u := 0 62 | 63 | // b.StartTimer() 64 | // b.RunParallel(func(pb *testing.PB) { 65 | // for pb.Next() { 66 | // y := int(uint16(u)) 67 | // key := strconv.Itoa(y) 68 | // rep := cof1.Read(key) 69 | // if rep.Code >= codes.Warning { 70 | // b.Error(fmt.Sprintf("Code: %d , key: %s", rep.Code, key)) 71 | // } 72 | // u++ 73 | // //fmt.Println("++++++++", u) 74 | // } 75 | // }) 76 | // } 77 | 78 | func BenchmarkCofferWriteParallel32NotConcurent(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 79 | b.SetParallelism(1) 80 | b.StopTimer() 81 | forTestClearDir(dirPath) 82 | cof1, err := createAndStartNewCofferFast(b, 1000, 1000, 100, 1000) //createAndStartNewCofferLengthB(b, 10, 100) 83 | if err != nil { 84 | b.Error(err) 85 | return 86 | } 87 | defer cof1.Stop() 88 | defer forTestClearDir(dirPath) 89 | b.SetParallelism(32) 90 | b.StartTimer() 91 | b.RunParallel(func(pb *testing.PB) { 92 | for pb.Next() { 93 | u := atomic.AddInt64(&keyConcurent, 1) 94 | key := strconv.FormatInt(u, 10) 95 | rep := cof1.Write(key, []byte("aaa"+key+"bbb")) 96 | if rep.Code >= codes.Error { 97 | b.Error(fmt.Sprintf("Code: %d , key: %s", rep.Code, key)) 98 | } 99 | } 100 | }) 101 | } 102 | 103 | func BenchmarkCofferWriteParallel32HiConcurent(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 104 | b.StopTimer() 105 | forTestClearDir(dirPath) 106 | cof1, err := createAndStartNewCofferFast(b, 1000, 1000, 100, 1000) // createAndStartNewCofferLengthB(b, 10, 100) 107 | if err != nil { 108 | b.Error(err) 109 | return 110 | } 111 | defer cof1.Stop() 112 | defer forTestClearDir(dirPath) 113 | u := 0 114 | b.SetParallelism(32) 115 | b.StartTimer() 116 | b.RunParallel(func(pb *testing.PB) { 117 | for pb.Next() { 118 | key := strconv.Itoa(u) 119 | rep := cof1.Write(key, []byte("aaa"+key+"bbb")) 120 | if rep.Code >= codes.Error { 121 | b.Error(fmt.Sprintf("Code: %d , key: %s", rep.Code, key)) 122 | } 123 | u++ 124 | } 125 | }) 126 | } 127 | 128 | func BenchmarkCofferTransactionSequence(b *testing.B) { 129 | b.StopTimer() 130 | forTestClearDir(dirPath) 131 | cof10, err := createAndStartNewCofferFast(b, 10, 1000, 100, 1000) 132 | if err != nil { 133 | b.Error(err) 134 | return 135 | } 136 | defer forTestClearDir(dirPath) 137 | defer cof10.Stop() 138 | defer forTestClearDir(dirPath) 139 | 140 | for x := 0; x < 500; x += 1 { 141 | key := strconv.Itoa(x) 142 | rep := cof10.Write(key, []byte(key)) 143 | if rep.Code >= codes.Error { 144 | b.Error(fmt.Sprintf("Code_: %d , err: %v", rep.Code, rep.Error)) 145 | } 146 | } 147 | atomic.AddInt64(&keyConcurent, 100) 148 | cof10.ReadList([]string{"101", "102"}) 149 | b.StartTimer() 150 | for i := 0; i < b.N; i++ { 151 | rep := cof10.Transaction("exchange", []string{"101", "102"}, nil) 152 | if rep.Code >= codes.Error || rep.Error != nil { 153 | b.Error("EEEEEEEEERRRRRRRRRRRRRRRRR") 154 | } 155 | } 156 | } 157 | 158 | func BenchmarkCofferTransactionPar32NotConcurent(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 159 | b.StopTimer() 160 | forTestClearDir(dirPath) 161 | cof11, err := createAndStartNewCofferFast(b, 1000, 10000, 500, 1000) 162 | if err != nil { 163 | b.Error(err) 164 | return 165 | } 166 | defer forTestClearDir(dirPath) 167 | defer cof11.Stop() 168 | defer forTestClearDir(dirPath) 169 | 170 | for x := 0; x < 70000; x += 100 { 171 | list := make(map[string][]byte, 100) 172 | for z := x; z < x+100; z++ { 173 | key := strconv.Itoa(z) 174 | list[key] = []byte("a" + key + "b") 175 | } 176 | rep := cof11.WriteList(list, false) 177 | if rep.Code >= codes.Error { 178 | b.Error(fmt.Sprintf("Code_: %d , err: %v", rep.Code, rep.Error)) 179 | } 180 | } 181 | 182 | atomic.AddInt64(&keyConcurent, 100) 183 | cof11.ReadList([]string{"101", "102"}) 184 | b.SetParallelism(32) 185 | b.StartTimer() 186 | b.RunParallel(func(pb *testing.PB) { 187 | for pb.Next() { 188 | u1 := int64(uint16(atomic.AddInt64(&keyConcurent, 1))) 189 | u2 := int64(uint16(atomic.AddInt64(&keyConcurent, 1))) 190 | atomic.AddInt64(&keyConcurent, 100) 191 | 192 | rep := cof11.Transaction("exchange", []string{strconv.FormatInt(u1, 10), strconv.FormatInt(u2, 10)}, nil) 193 | if rep.Code >= codes.Error { 194 | b.Error(fmt.Sprintf("Code: %d , key1: %d, key2: %d", rep.Code, u1, u2)) 195 | } 196 | } 197 | }) 198 | } 199 | 200 | func BenchmarkCofferTransactionPar32HalfConcurent(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 201 | b.StopTimer() 202 | forTestClearDir(dirPath) 203 | cof12, err := createAndStartNewCofferFast(b, 1000, 10000, 500, 1000) // createAndStartNewCofferLengthB(b, 10, 100) 204 | if err != nil { 205 | b.Error(err) 206 | return 207 | } 208 | defer forTestClearDir(dirPath) 209 | defer cof12.Stop() 210 | defer forTestClearDir(dirPath) 211 | 212 | for x := 0; x < 70000; x += 100 { 213 | list := make(map[string][]byte, 100) 214 | for z := x; z < x+100; z++ { 215 | key := strconv.Itoa(z) 216 | list[key] = []byte("a" + key + "b") 217 | } 218 | rep := cof12.WriteList(list, false) 219 | if rep.Code >= codes.Error { 220 | b.Error(fmt.Sprintf("Code_: %d , err: %v", rep.Code, rep.Error)) 221 | } 222 | } 223 | 224 | atomic.AddInt64(&keyConcurent, 100) 225 | cof12.ReadList([]string{"101", "102"}) 226 | b.SetParallelism(32) 227 | b.StartTimer() 228 | b.RunParallel(func(pb *testing.PB) { 229 | for pb.Next() { 230 | u1 := int64(uint16(atomic.AddInt64(&keyConcurent, 0))) 231 | u2 := int64(uint16(atomic.AddInt64(&keyConcurent, 1))) 232 | atomic.AddInt64(&keyConcurent, 100) 233 | 234 | rep := cof12.Transaction("exchange", []string{strconv.FormatInt(u1, 10), strconv.FormatInt(u2, 10)}, nil) 235 | if rep.Code >= codes.Error { 236 | b.Error(fmt.Sprintf("Code: %d , key1: %d, key2: %d", rep.Code, u1, u2)) 237 | } 238 | } 239 | }) 240 | } 241 | 242 | // ======================================================================= 243 | // =========================== HELPERS =================================== 244 | // ======================================================================= 245 | 246 | func createAndStartNewCofferFast(t *testing.B, batchSize int, limitRecordsPerLogfile int, maxKeyLength int, maxValueLength int) (*Coffer, error) { 247 | cof1, err, wrn := createNewCofferFast(batchSize, limitRecordsPerLogfile, maxKeyLength, maxValueLength) 248 | if err != nil { 249 | return nil, err 250 | } else if wrn != nil { 251 | t.Log(wrn) 252 | } 253 | if !cof1.Start() { 254 | return nil, fmt.Errorf("Failed to start (cof)") 255 | } 256 | return cof1, nil 257 | } 258 | 259 | func createNewCofferFast(batchSize int, limitRecordsPerLogfile int, maxKeyLength int, maxValueLength int) (*Coffer, error, error) { 260 | hdlExch := domain.Handler(handlerExchange) 261 | return Db(dirPath).BatchSize(batchSize). 262 | LimitRecordsPerLogfile(limitRecordsPerLogfile). 263 | FollowPause(100*time.Second). 264 | LogsByCheckpoint(1000). 265 | MaxKeyLength(maxKeyLength). 266 | MaxValueLength(maxValueLength). 267 | MaxRecsPerOperation(1000000). 268 | Handler("exchange", &hdlExch). 269 | Create() 270 | } 271 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // Config 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "time" 9 | 10 | "github.com/claygod/coffer/services/journal" 11 | "github.com/claygod/coffer/services/resources" 12 | "github.com/claygod/coffer/usecases" 13 | ) 14 | 15 | /* 16 | Config - Coffer configuration 17 | */ 18 | type Config struct { 19 | JournalConfig *journal.Config 20 | UsecasesConfig *usecases.Config 21 | ResourcesConfig *resources.Config 22 | 23 | //DataPath string 24 | MaxRecsPerOperation int 25 | //MaxKeyLength int 26 | //MaxValueLength int 27 | } 28 | 29 | const ( 30 | stateStopped int64 = iota 31 | stateStarted 32 | statePanic 33 | ) 34 | 35 | const ( 36 | logPrefix string = "Coffer " 37 | megabyte int64 = 1024 * 1024 38 | ) 39 | 40 | const ( 41 | codeWriteList byte = iota //codeWrite 42 | codeTransaction 43 | codeDeleteList 44 | ) 45 | 46 | const ( 47 | defaultBatchSize int = 1000 48 | defaultLimitRecordsPerLogfile int64 = 1000 49 | 50 | defaultFollowPause time.Duration = 60 * time.Second 51 | defaultLogsByCheckpoint int64 = 10 52 | defaultAllowStartupErrLoadLogs bool = true 53 | defaultMaxKeyLength int = 100 54 | defaultMaxValueLength int = 10000 55 | defaultRemoveUnlessLogs bool = true 56 | 57 | defaultLimitMemory int64 = 100 * megabyte 58 | defaultLimitDisk int64 = 1000 * megabyte 59 | 60 | defaultMaxRecsPerOperation int = 1000 61 | ) 62 | -------------------------------------------------------------------------------- /db.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // Db configurator 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "time" 10 | 11 | "github.com/claygod/coffer/domain" 12 | "github.com/claygod/coffer/services/journal" 13 | "github.com/claygod/coffer/services/repositories/handlers" 14 | "github.com/claygod/coffer/services/resources" 15 | "github.com/claygod/coffer/usecases" 16 | ) 17 | 18 | /* 19 | Configurator - generates a configuration, and based on this configuration creates a database 20 | */ 21 | type Configurator struct { 22 | config *Config 23 | handlers domain.HandlersRepository 24 | } 25 | 26 | /* 27 | Db - specify the working directory in which the database will store its files. For a new database 28 | the directory should be free of files with the extensions log, check, checkpoint. 29 | */ 30 | func Db(dirPath string) *Configurator { 31 | jCnf := &journal.Config{ 32 | BatchSize: defaultBatchSize, 33 | LimitRecordsPerLogfile: defaultLimitRecordsPerLogfile, 34 | } 35 | ucCnf := &usecases.Config{ 36 | FollowPause: defaultFollowPause, 37 | LogsByCheckpoint: defaultLogsByCheckpoint, // after processing every N log files, the follower makes a new checkpoint 38 | DirPath: dirPath, 39 | AllowStartupErrLoadLogs: defaultAllowStartupErrLoadLogs, // if errors were detected during loading, is it possible to continue (by default it is possible) 40 | MaxKeyLength: defaultMaxKeyLength, 41 | MaxValueLength: defaultMaxValueLength, 42 | RemoveUnlessLogs: defaultRemoveUnlessLogs, // clean logs after use 43 | } 44 | rcCnf := &resources.Config{ 45 | LimitMemory: defaultLimitMemory, // minimum available memory (bytes) 46 | LimitDisk: defaultLimitDisk, // minimum free disk space 47 | DirPath: dirPath, // 48 | } 49 | 50 | cnf := &Config{ 51 | JournalConfig: jCnf, 52 | UsecasesConfig: ucCnf, 53 | ResourcesConfig: rcCnf, 54 | MaxRecsPerOperation: defaultMaxRecsPerOperation, 55 | //MaxKeyLength: 100, 56 | //MaxValueLength: 10000, 57 | } 58 | 59 | hdls := handlers.New() 60 | 61 | db := &Configurator{ 62 | config: cnf, 63 | handlers: hdls, 64 | } 65 | return db 66 | } 67 | 68 | /* 69 | Create - creating a database. This operation should be the last in the configuration chain. 70 | */ 71 | func (c *Configurator) Create() (*Coffer, error, error) { 72 | return new(c.config, c.handlers) 73 | } 74 | 75 | /* 76 | Handler - add a handler to the configuration. If a handler with such a key exists, it will be overwritten. 77 | */ 78 | func (c *Configurator) Handler(key string, value *domain.Handler) *Configurator { 79 | c.handlers.Set(key, value) 80 | return c 81 | } 82 | 83 | /* 84 | Handlers - add several handlers to the configuration. Duplicate handlers will be overwritten. 85 | */ 86 | func (c *Configurator) Handlers(hdls map[string]*domain.Handler) *Configurator { 87 | for key, value := range hdls { 88 | c.handlers.Set(key, value) 89 | } 90 | return c 91 | } 92 | 93 | /* 94 | BatchSize - the maximum number of records that a batch inside a database can add at a time 95 | (this applies to setting up internal processes, this does not apply to the number of records added at a time). 96 | Decreasing this parameter slightly improves the `latency` (but not too much). Increasing this parameter 97 | slightly degrades the `latency`, but at the same time increases the throughput `throughput`. 98 | */ 99 | func (c *Configurator) BatchSize(value int) *Configurator { 100 | c.config.JournalConfig.BatchSize = value 101 | return c 102 | } 103 | 104 | /* 105 | LimitRecordsPerLogfile - the number of operations to be written to one log file. 106 | A small number will make the database very often create 107 | new files, which will adversely affect the speed of the database. 108 | A large number reduces the number of pauses for creation 109 | files, but the files become larger. 110 | */ 111 | func (c *Configurator) LimitRecordsPerLogfile(value int) *Configurator { 112 | c.config.JournalConfig.LimitRecordsPerLogfile = int64(value) 113 | return c 114 | } 115 | 116 | /* 117 | FollowPause - the size of the time interval for starting the `Follow` interactor, 118 | which analyzes old logs and periodically creates new checkpoints. 119 | */ 120 | func (c *Configurator) FollowPause(value time.Duration) *Configurator { 121 | c.config.UsecasesConfig.FollowPause = value 122 | return c 123 | } 124 | 125 | /* 126 | LogsByCheckpoint - after how many completed log files it is necessary to create a new checkpoint (the smaller 127 | the number, the more often we create). For good performance, it’s better not to do it too often. 128 | */ 129 | func (c *Configurator) LogsByCheckpoint(value int) *Configurator { 130 | c.config.UsecasesConfig.LogsByCheckpoint = int64(value) 131 | return c 132 | } 133 | 134 | /* 135 | AllowStartupErrLoadLogs - the option allows the database to work at startup, 136 | even if the last log file was completed incorrectly, i.e. the last record is corrupted 137 | (a typical situation for an abnormal shutdown). By default, the option is enabled. 138 | */ 139 | func (c *Configurator) AllowStartupErrLoadLogs(value bool) *Configurator { 140 | c.config.UsecasesConfig.AllowStartupErrLoadLogs = value 141 | return c 142 | } 143 | 144 | /* 145 | MaxKeyLength - the maximum allowed key length. 146 | */ 147 | func (c *Configurator) MaxKeyLength(value int) *Configurator { 148 | c.config.UsecasesConfig.MaxKeyLength = value 149 | return c 150 | } 151 | 152 | /* 153 | MaxValueLength - the maximum size of the value to write. 154 | */ 155 | func (c *Configurator) MaxValueLength(value int) *Configurator { 156 | c.config.UsecasesConfig.MaxValueLength = value 157 | return c 158 | } 159 | 160 | /* 161 | RemoveUnlessLogs - option to delete old files. After `Follow` created a new checkpoint, 162 | with the permission of this option, it now removes the unnecessary operation logs. 163 | If for some reason you need to store the entire log of operations, you can disable this option, 164 | but be prepared for the fact that this will increase the consumption of disk space. 165 | */ 166 | func (c *Configurator) RemoveUnlessLogs(value bool) *Configurator { 167 | c.config.UsecasesConfig.RemoveUnlessLogs = value 168 | return c 169 | } 170 | 171 | /* 172 | LimitMemory - the minimum size of free RAM at which the database stops 173 | performing operations and stops to avoid data loss. 174 | */ 175 | func (c *Configurator) LimitMemory(value int) *Configurator { 176 | c.config.ResourcesConfig.LimitMemory = int64(value) 177 | return c 178 | } 179 | 180 | /* 181 | LimitDisk - the minimum amount of free space on the hard drive at which 182 | the database stops performing operations and stops to avoid data loss. 183 | */ 184 | func (c *Configurator) LimitDisk(value int) *Configurator { 185 | c.config.ResourcesConfig.LimitDisk = int64(value) 186 | return c 187 | } 188 | 189 | /* 190 | MaxRecsPerOperation - the maximum number of records that can be involved in one operation. 191 | */ 192 | func (c *Configurator) MaxRecsPerOperation(value int) *Configurator { 193 | c.config.MaxRecsPerOperation = value 194 | return c 195 | } 196 | -------------------------------------------------------------------------------- /domain/entities.go: -------------------------------------------------------------------------------- 1 | package domain 2 | 3 | // Coffer 4 | // Domain entities 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | /* 8 | Handler - handler in a transaction. 9 | The rule for writing a handler is simple: it can only change values in 10 | the resulting array. Adding new keys is prohibited, because will actually mean 11 | adding records, and those may already be. Decreasing the keys will mean deleting the entries, 12 | it is possible because all keys are locked and simultaneous access to them cannot be. 13 | */ 14 | type Handler func([]byte, map[string][]byte) (map[string][]byte, error) 15 | 16 | /* 17 | Operation - struct for logs. 18 | */ 19 | type Operation struct { 20 | Code byte 21 | Body []byte 22 | } 23 | 24 | /* 25 | Record - key-value struct in database. 26 | */ 27 | type Record struct { 28 | Key string 29 | Value []byte 30 | } 31 | -------------------------------------------------------------------------------- /domain/repositories.go: -------------------------------------------------------------------------------- 1 | package domain 2 | 3 | // Coffer 4 | // Repositories 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | /* 8 | RecordsRepository - records store interface. 9 | */ 10 | type RecordsRepository interface { 11 | Reset() 12 | WriteList(map[string][]byte) 13 | WriteListStrict(map[string][]byte) []string 14 | WriteListOptional(map[string][]byte) []string 15 | WriteListUnsafe(map[string][]byte) 16 | //WriteUnsafeRecord(string, []byte) 17 | ReadList([]string) (map[string][]byte, []string) 18 | ReadListUnsafe([]string) (map[string][]byte, []string) 19 | DelListStrict([]string) []string 20 | DelListOptional([]string) ([]string, []string) 21 | 22 | Iterator(chan *Record) // требуется при сохранении в файл 23 | CountRecords() int 24 | RecordsList() []string 25 | RecordsListWithPrefix(string) []string 26 | } 27 | 28 | /* 29 | HandlersRepository - handlers store interface. 30 | */ 31 | type HandlersRepository interface { 32 | Set(string, *Handler) 33 | Get(string) (*Handler, error) 34 | } 35 | -------------------------------------------------------------------------------- /examples/clean_dir.go: -------------------------------------------------------------------------------- 1 | package examples 2 | 3 | // Coffer 4 | // Examples: clean dir helper 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "os" 9 | "strings" 10 | ) 11 | 12 | /* 13 | ClearDir - clear dir from logs, checkpoints. 14 | */ 15 | func ClearDir(dir string) error { 16 | if !strings.HasSuffix(dir, "/") { 17 | dir += "/" 18 | } 19 | 20 | d, err := os.Open(dir) 21 | if err != nil { 22 | return err 23 | } 24 | defer d.Close() 25 | names, err := d.Readdirnames(-1) 26 | if err != nil { 27 | return err 28 | } 29 | for _, name := range names { 30 | //fmt.Println(name) 31 | if strings.HasSuffix(name, ".log") || strings.HasSuffix(name, ".check") || strings.HasSuffix(name, ".checkpoint") { 32 | os.Remove(dir + name) 33 | } 34 | // err = os.RemoveAll(filepath.Join(dir, name)) 35 | // if err != nil { 36 | // return err 37 | // } 38 | } 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /examples/finance/finance.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Coffer 4 | // Examples: finance 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | "encoding/gob" 10 | "fmt" 11 | 12 | "github.com/claygod/coffer" 13 | "github.com/claygod/coffer/domain" 14 | "github.com/claygod/coffer/examples" 15 | ) 16 | 17 | const curDir = "./" 18 | 19 | func main() { 20 | examples.ClearDir(curDir) 21 | defer examples.ClearDir(curDir) 22 | fmt.Printf("Financial example:\n\n") 23 | 24 | // STEP init 25 | hdlCredit := domain.Handler(HandlerCredit) 26 | hdlDebit := domain.Handler(HandlerDebit) 27 | hdlTransfer := domain.Handler(HandlerTransfer) 28 | hdlMultiTransfer := domain.Handler(HandlerMultiTransfer) 29 | db, err, wrn := coffer.Db(curDir).Handler("credit", &hdlCredit).Handler("debit", &hdlDebit).Handler("transfer", &hdlTransfer).Handler("multi_transfer", &hdlMultiTransfer).Create() 30 | 31 | switch { 32 | case err != nil: 33 | fmt.Println("Error:", err) 34 | return 35 | 36 | case wrn != nil: 37 | fmt.Println("Warning:", err) 38 | return 39 | 40 | case !db.Start(): 41 | fmt.Println("Error: not start") 42 | return 43 | } 44 | 45 | defer db.Stop() 46 | 47 | //STEP create accounts 48 | if rep := db.WriteList(map[string][]byte{"john_usd": uint64ToBytes(90), "alice_usd": uint64ToBytes(5), "john_stock": uint64ToBytes(5), "alice_stock": uint64ToBytes(25)}, true); rep.IsCodeError() { 49 | fmt.Printf("Write error: code `%d` msg `%s`", rep.Code, rep.Error) 50 | return 51 | } 52 | 53 | //STEP initial 54 | rep := db.ReadList([]string{"john_usd", "alice_usd", "john_stock", "alice_stock"}) 55 | 56 | if rep.IsCodeError() { 57 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 58 | return 59 | } 60 | 61 | fmt.Printf(" initial: John's usd=%d stocks=%d Alice's usd=%d stocks=%d\n", 62 | bytesToUint64(rep.Data["john_usd"]), 63 | bytesToUint64(rep.Data["john_stock"]), 64 | bytesToUint64(rep.Data["alice_usd"]), 65 | bytesToUint64(rep.Data["alice_stock"])) 66 | showState(db) 67 | 68 | //STEP credit 69 | if rep := db.Transaction("credit", []string{"john_usd"}, uint64ToBytes(5)); rep.IsCodeError() { 70 | fmt.Printf("Transaction error: code `%v` msg `%v`", rep.Code, rep.Error) 71 | return 72 | } 73 | 74 | fmt.Println(" credit: 5 usd were withdrawn from John’s account") 75 | showState(db) 76 | 77 | //STEP debit 78 | if rep := db.Transaction("debit", []string{"alice_stock"}, uint64ToBytes(2)); rep.IsCodeError() { 79 | fmt.Printf("Transaction error: code `%v` msg `%v`", rep.Code, rep.Error) 80 | return 81 | } 82 | fmt.Println(" debit: two stocks added to Alice's account") 83 | showState(db) 84 | 85 | //STEP transfer 86 | req1 := ReqTransfer{From: "john_usd", To: "alice_usd", Amount: 3} 87 | var buf bytes.Buffer 88 | enc := gob.NewEncoder(&buf) 89 | 90 | if err := enc.Encode(req1); err != nil { 91 | fmt.Println(err) 92 | return 93 | } 94 | 95 | if rep := db.Transaction("transfer", []string{"john_usd", "alice_usd"}, buf.Bytes()); rep.IsCodeError() { 96 | fmt.Printf("Transaction error: code `%v` msg `%v`", rep.Code, rep.Error) 97 | return 98 | } 99 | 100 | fmt.Println(" transfer: John transferred Alice $ 3") 101 | showState(db) 102 | 103 | //STEP purchase/sale 104 | t1 := ReqTransfer{From: "john_usd", To: "alice_usd", Amount: 50} 105 | t2 := ReqTransfer{From: "alice_stock", To: "john_stock", Amount: 5} 106 | req2 := []ReqTransfer{t1, t2} 107 | buf.Reset() 108 | enc = gob.NewEncoder(&buf) 109 | 110 | if err := enc.Encode(req2); err != nil { 111 | fmt.Println(err) 112 | return 113 | } 114 | 115 | if rep := db.Transaction("multi_transfer", []string{"john_usd", "alice_usd", "john_stock", "alice_stock"}, buf.Bytes()); rep.IsCodeError() { 116 | fmt.Printf("Transaction error: code `%v` msg `%v`", rep.Code, rep.Error) 117 | return 118 | } 119 | 120 | fmt.Println(" purchase/sale: John bought 5 stocks from Alice for $ 50") 121 | showState(db) 122 | } 123 | 124 | func showState(db *coffer.Coffer) { 125 | rep := db.ReadList([]string{"john_usd", "alice_usd", "john_stock", "alice_stock"}) 126 | 127 | if rep.IsCodeError() { 128 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 129 | return 130 | } 131 | 132 | fmt.Printf("-------------------------------------------------------------->>> John's usd:%3d stocks:%3d Alice's usd:%3d stocks:%3d\n", 133 | bytesToUint64(rep.Data["john_usd"]), 134 | bytesToUint64(rep.Data["john_stock"]), 135 | bytesToUint64(rep.Data["alice_usd"]), 136 | bytesToUint64(rep.Data["alice_stock"])) 137 | } 138 | -------------------------------------------------------------------------------- /examples/finance/handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Coffer 4 | // Examples: finance handlers 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | "encoding/gob" 10 | "fmt" 11 | "unsafe" 12 | ) 13 | 14 | // func HandlerNewAccount(arg interface{}, recs map[string][]byte) (map[string][]byte, error) { 15 | // newAcc, ok := arg.(uint64) 16 | // if !ok { 17 | // return nil, fmt.Errorf("Invalid Argument: %v.", arg) 18 | // } else if len(recs) != 1 { 19 | // return nil, fmt.Errorf("Want 1 record, have %d", len(recs)) 20 | // } 21 | // var recKey string 22 | // for k, _ := range recs { 23 | // recKey = k 24 | // } 25 | // return map[string][]byte{recKey: uint64ToBytes(newAcc)}, nil 26 | // } 27 | 28 | /* 29 | HandlerCredit - credit handler. 30 | */ 31 | func HandlerCredit(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 32 | if arg == nil || len(arg) != 8 { 33 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 34 | } else if len(recs) != 1 { 35 | return nil, fmt.Errorf("Want 1 record, have %d", len(recs)) 36 | } 37 | delta := bytesToUint64(arg) 38 | var recKey string 39 | var recValue []byte 40 | for k, v := range recs { 41 | recKey = k 42 | recValue = v 43 | } 44 | if len(recValue) != 8 { 45 | return nil, fmt.Errorf("The length of the value in the record is %d bytes, but 8 bytes are needed", len(recValue)) 46 | } 47 | curAcc := bytesToUint64(recValue) 48 | if delta > curAcc { 49 | return nil, fmt.Errorf("Not enough funds in the account. There is %d, a credit of %d.", curAcc, delta) 50 | } 51 | return map[string][]byte{recKey: uint64ToBytes(curAcc - delta)}, nil 52 | } 53 | 54 | /* 55 | HandlerDebit - Debit handler. 56 | */ 57 | func HandlerDebit(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 58 | if arg == nil || len(arg) != 8 { 59 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 60 | } else if len(recs) != 1 { 61 | return nil, fmt.Errorf("Want 1 record, have %d", len(recs)) 62 | } 63 | delta := bytesToUint64(arg) 64 | var recKey string 65 | var recValue []byte 66 | for k, v := range recs { 67 | recKey = k 68 | recValue = v 69 | } 70 | if len(recValue) != 8 { 71 | return nil, fmt.Errorf("The length of the value in the record is %d bytes, but 8 bytes are needed", len(recValue)) 72 | } 73 | curAmount := bytesToUint64(recValue) 74 | newAmount := curAmount + delta 75 | if curAmount > newAmount { 76 | return nil, fmt.Errorf("Account overflow. There is %d, a debit of %d.", curAmount, delta) 77 | } 78 | return map[string][]byte{recKey: uint64ToBytes(newAmount)}, nil 79 | } 80 | 81 | /* 82 | HandlerTransfer - Transfer handler. 83 | */ 84 | func HandlerTransfer(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 85 | if arg == nil { 86 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 87 | } 88 | dec := gob.NewDecoder(bytes.NewBuffer(arg)) 89 | var req ReqTransfer 90 | if err := dec.Decode(&req); err != nil { 91 | return nil, fmt.Errorf("Invalid Argument: %v. Error: %v", arg, err) 92 | } 93 | 94 | if len(recs) != 2 { 95 | return nil, fmt.Errorf("Want 2 record, have %d", len(recs)) 96 | } 97 | 98 | return helperHandlerTransfer(req, recs) 99 | } 100 | 101 | /* 102 | HandlerMultiTransfer - MultiTransfer handler. 103 | */ 104 | func HandlerMultiTransfer(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 105 | if arg == nil { 106 | return nil, fmt.Errorf("Invalid Argument: %v.", arg) 107 | } 108 | dec := gob.NewDecoder(bytes.NewBuffer(arg)) 109 | var reqs []ReqTransfer 110 | if err := dec.Decode(&reqs); err != nil { 111 | return nil, fmt.Errorf("Invalid Argument: %v. Error: %v", string(arg), err) 112 | } 113 | 114 | if len(recs) != len(reqs)*2 { 115 | return nil, fmt.Errorf("Want %d record, have %d", len(reqs)*2, len(recs)) 116 | } 117 | 118 | out := make(map[string][]byte, len(recs)) 119 | for _, req := range reqs { 120 | vFrom, ok := recs[req.From] 121 | if !ok { 122 | return nil, fmt.Errorf("Entry %s cannot be found among transaction arguments.", req.From) 123 | } 124 | vTo, ok := recs[req.To] 125 | if !ok { 126 | return nil, fmt.Errorf("Entry %s cannot be found among transaction arguments.", req.To) 127 | } 128 | 129 | m, err := helperHandlerTransfer(req, map[string][]byte{req.From: vFrom, req.To: vTo}) 130 | if err != nil { 131 | return nil, err 132 | } 133 | for k, v := range m { 134 | out[k] = v 135 | } 136 | } 137 | return out, nil 138 | } 139 | 140 | func helperHandlerTransfer(req ReqTransfer, recs map[string][]byte) (map[string][]byte, error) { 141 | recFromValueBytes, ok := recs[req.From] 142 | if !ok { 143 | return nil, fmt.Errorf("Entry %s cannot be found among transaction arguments.", req.From) 144 | } else if len(recFromValueBytes) != 8 { 145 | return nil, fmt.Errorf("The length of the value in the record `%s` is %d bytes, but 8 bytes are needed", req.From, len(recFromValueBytes)) 146 | } 147 | recToValueBytes, ok := recs[req.To] 148 | if !ok { 149 | return nil, fmt.Errorf("Entry %s cannot be found among transaction arguments.", req.To) 150 | } else if len(recFromValueBytes) != 8 { 151 | return nil, fmt.Errorf("The length of the value in the record `%s` is %d bytes, but 8 bytes are needed", req.To, len(recToValueBytes)) 152 | } 153 | 154 | recFromValueUint64 := bytesToUint64(recFromValueBytes) 155 | newAmountFrom := recFromValueUint64 - req.Amount 156 | if req.Amount > recFromValueUint64 { 157 | return nil, fmt.Errorf("Not enough funds in the account `%s`. There is %d, a credit of %d.", req.From, recFromValueUint64, req.Amount) 158 | } 159 | recToValueUint64 := bytesToUint64(recToValueBytes) 160 | newAmountTo := recToValueUint64 + req.Amount 161 | if recToValueUint64 > newAmountTo { 162 | return nil, fmt.Errorf("Account `%s` is overflow. There is %d, a debit of %d.", req.To, recToValueUint64, req.Amount) 163 | } 164 | 165 | return map[string][]byte{req.From: uint64ToBytes(newAmountFrom), req.To: uint64ToBytes(newAmountTo)}, nil 166 | } 167 | 168 | /* 169 | ReqTransfer - request. 170 | */ 171 | type ReqTransfer struct { 172 | From string 173 | To string 174 | Amount uint64 175 | } 176 | 177 | func uint64ToBytes(i uint64) []byte { 178 | x := (*[8]byte)(unsafe.Pointer(&i)) 179 | out := make([]byte, 0, 8) 180 | out = append(out, x[:]...) 181 | return out 182 | } 183 | 184 | func bytesToUint64(b []byte) uint64 { 185 | var x [8]byte 186 | copy(x[:], b[:]) 187 | return *(*uint64)(unsafe.Pointer(&x)) 188 | } 189 | -------------------------------------------------------------------------------- /examples/quick_start/handlers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Coffer 4 | // Example quik start handler 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | ) 10 | 11 | /* 12 | HandlerExchange - exchange handler. 13 | */ 14 | func HandlerExchange(arg []byte, recs map[string][]byte) (map[string][]byte, error) { 15 | if arg != nil { 16 | return nil, fmt.Errorf("Args not null.") 17 | } else if len(recs) != 2 { 18 | return nil, fmt.Errorf("Want 2 records, have %d", len(recs)) 19 | } 20 | recsKeys := make([]string, 0, 2) 21 | recsValues := make([][]byte, 0, 2) 22 | for k, v := range recs { 23 | recsKeys = append(recsKeys, k) 24 | recsValues = append(recsValues, v) 25 | } 26 | out := make(map[string][]byte, 2) 27 | out[recsKeys[0]] = recsValues[1] 28 | out[recsKeys[1]] = recsValues[0] 29 | return out, nil 30 | } 31 | -------------------------------------------------------------------------------- /examples/quick_start/quik_start.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Coffer 4 | // Examples: quik start 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/claygod/coffer" 11 | "github.com/claygod/coffer/domain" 12 | "github.com/claygod/coffer/examples" 13 | ) 14 | 15 | const curDir = "./" 16 | 17 | func main() { 18 | examples.ClearDir(curDir) 19 | defer examples.ClearDir(curDir) 20 | fmt.Println("Quik start: BEGIN") 21 | 22 | // STEP init 23 | hdlExch := domain.Handler(HandlerExchange) 24 | db, err, wrn := coffer.Db(curDir).Handler("exchange", &hdlExch).Create() 25 | switch { 26 | case err != nil: 27 | fmt.Println("Error:", err) 28 | return 29 | case wrn != nil: 30 | fmt.Println("Warning:", err) 31 | return 32 | case !db.Start(): 33 | fmt.Println("Error: not start") 34 | return 35 | } 36 | defer db.Stop() 37 | fmt.Println("- init: db created and started") 38 | 39 | // STEP write 40 | if rep := db.Write("foo", []byte("bar")); rep.IsCodeError() { 41 | fmt.Sprintf("Write error: code `%d` msg `%s`", rep.Code, rep.Error) 42 | return 43 | } 44 | if rep := db.WriteList(map[string][]byte{"john": []byte("ball"), "alice": []byte("flower")}, true); rep.IsCodeError() { 45 | fmt.Sprintf("Write error: code `%d` msg `%s`", rep.Code, rep.Error) 46 | return 47 | } 48 | fmt.Println("- write: John has ball and Alice has flower") 49 | 50 | // STEP transaction 51 | if rep := db.Transaction("exchange", []string{"john", "alice"}, nil); rep.IsCodeError() { 52 | fmt.Sprintf("Transaction error: code `%v` msg `%v`", rep.Code, rep.Error) 53 | return 54 | } 55 | fmt.Println("- transaction: John and Alice exchanged items") 56 | 57 | // STEP read 58 | if rep := db.Read("foo"); rep.IsCodeError() { 59 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 60 | return 61 | } 62 | rep := db.ReadList([]string{"john", "alice"}) 63 | if rep.IsCodeError() { 64 | fmt.Sprintf("Read error: code `%v` msg `%v`", rep.Code, rep.Error) 65 | return 66 | } 67 | fmt.Println(fmt.Sprintf("- read: John has %s and Alice has %s", string(rep.Data["john"]), string(rep.Data["alice"]))) 68 | 69 | fmt.Println("Quik start: END") 70 | } 71 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/claygod/coffer 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/claygod/tools v0.0.0-20211125200027-91c5d269978c 7 | github.com/shirou/gopsutil/v3 v3.23.3 8 | github.com/sirupsen/logrus v1.9.0 9 | ) 10 | 11 | require ( 12 | github.com/go-ole/go-ole v1.2.6 // indirect 13 | github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect 14 | github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect 15 | github.com/tklauser/go-sysconf v0.3.11 // indirect 16 | github.com/tklauser/numcpus v0.6.0 // indirect 17 | github.com/yusufpapurcu/wmi v1.2.2 // indirect 18 | golang.org/x/sys v0.6.0 // indirect 19 | ) 20 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/claygod/tools v0.0.0-20211125200027-91c5d269978c h1:+Ya5dtbgyuS0+7FE5ahyzabtAPhFioHwG0Pz7VNNmg0= 2 | github.com/claygod/tools v0.0.0-20211125200027-91c5d269978c/go.mod h1:J5HG1JQFmlzOx3YzkwSFB2Y2mwPGuVkb+z+0pa1VJgE= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 5 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= 7 | github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= 8 | github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 9 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 10 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 11 | github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= 12 | github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= 13 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 15 | github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= 16 | github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= 17 | github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= 18 | github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= 19 | github.com/shoenig/go-m1cpu v0.1.4/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= 20 | github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= 21 | github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= 22 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 23 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 24 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 25 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 26 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 27 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 28 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 29 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= 30 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 31 | github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= 32 | github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= 33 | github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= 34 | github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= 35 | github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= 36 | github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= 37 | golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 38 | golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 39 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 40 | golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 41 | golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= 42 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 43 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 44 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 45 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 46 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 47 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 48 | -------------------------------------------------------------------------------- /helper.go: -------------------------------------------------------------------------------- 1 | package coffer 2 | 3 | // Coffer 4 | // Helpers 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | 10 | "github.com/claygod/coffer/reports/codes" 11 | ) 12 | 13 | // func (c *Coffer) copySlice(inList []string) ([]string, error) { // на случай, если мы хотим скопировать входные данные запроса, боясь их изменения 14 | // outList := make([]string, len(inList)) 15 | // n := copy(outList, inList) 16 | // if n != len(inList) { 17 | // return nil, fmt.Errorf("Slice (strings) copy failed.") 18 | // } 19 | // return outList, nil 20 | // } 21 | 22 | // func (c *Coffer) copyMap(inMap map[string][]byte) (map[string][]byte, error) { // на случай, если мы хотим скопировать входные данные запроса, боясь их изменения 23 | // outMap := make(map[string][]byte, len(inMap)) 24 | // for k, v := range inMap { 25 | // list := make([]byte, len(v)) 26 | // n := copy(list, v) 27 | // if n != len(v) { 28 | // return nil, fmt.Errorf("Slice (bytes) copy failed.") 29 | // } 30 | // outMap[k] = list 31 | // } 32 | // return outMap, nil 33 | // } 34 | 35 | // func (c *Coffer) checkPanic() { 36 | // if err := recover(); err != nil { 37 | // c.hasp.Block() 38 | // //atomic.StoreInt64(&c.hasp, statePanic) 39 | // //fmt.Println(err) 40 | // c.logger.Error(err).Context("Object", "Coffer").Context("Method", "checkPanic").Send() 41 | // } 42 | // } 43 | 44 | // func (c *Coffer) alarmFunc(err error) { // для журнала 45 | // c.logger.Error(err).Context("Object", "Journal").Context("Method", "Write").Send() 46 | // } 47 | 48 | func (c *Coffer) checkLenCountKeys(keys []string) (codes.Code, error) { // checking operation key parameters 49 | if ln := len(keys); ln > c.config.MaxRecsPerOperation { // control the maximum allowable number of added records per operation 50 | return codes.ErrRecordLimitExceeded, fmt.Errorf("The allowable number of entries in operation %d, and in the request %d.", c.config.MaxRecsPerOperation, ln) 51 | } 52 | for _, key := range keys { // control of the maximum permissible and zero key length 53 | ln := len(key) 54 | if ln > c.config.UsecasesConfig.MaxKeyLength { 55 | return codes.ErrExceedingMaxKeyLength, fmt.Errorf("The admissible key length is %d; there is a key with a length of %d in the request.", c.config.UsecasesConfig.MaxKeyLength, ln) 56 | } 57 | if ln == 0 { 58 | return codes.ErrExceedingZeroKeyLength, fmt.Errorf("The key length is 0.") 59 | } 60 | } 61 | return codes.Ok, nil 62 | } 63 | 64 | func (c *Coffer) extractKeysFromMap(input map[string][]byte) []string { 65 | keys := make([]string, 0, len(input)) 66 | for key := range input { 67 | keys = append(keys, key) 68 | } 69 | return keys 70 | } 71 | 72 | // func (c *Coffer) checkPanic() { 73 | // if r := recover(); r != nil { 74 | // c.logger.Error(r, "Object=Coffer") 75 | // } 76 | // } 77 | -------------------------------------------------------------------------------- /reports/codes/codes.go: -------------------------------------------------------------------------------- 1 | package codes 2 | 3 | // Coffer 4 | // Config 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | /* 8 | Code - response code type 9 | */ 10 | type Code int64 11 | 12 | const ( 13 | //Ok - done without errors 14 | Ok Code = iota // выполнено без замечаний 15 | //Error - completed with errors, but you can continue to work 16 | Error // не выполнено, но работать дальше можно 17 | //ErrRecordLimitExceeded - record limit per operation exceeded 18 | ErrRecordLimitExceeded // превышен лимит записей на одну операцию 19 | //ErrExceedingMaxValueSize - value too long 20 | ErrExceedingMaxValueSize // слишком длинное значение 21 | //ErrExceedingMaxKeyLength - key is too long 22 | ErrExceedingMaxKeyLength // слишком длинный ключ 23 | //ErrExceedingZeroKeyLength - key too short 24 | ErrExceedingZeroKeyLength // слишком короткий ключ 25 | //ErrHandlerNotFound - not found handler 26 | ErrHandlerNotFound // не найден хэндлер 27 | //ErrParseRequest - failed to prepare a request for logging 28 | ErrParseRequest // не получилось подготовить запрос для логгирования 29 | //ErrResources - not enough resources 30 | ErrResources // не хватает ресурсов 31 | //ErrNotFound - no keys found 32 | ErrNotFound // не найдены ключи 33 | //ErrRecordsFound - keys found during recording 34 | ErrRecordsFound // при попытке записи найдены ключи 35 | //ErrReadRecords - error reading records for a transaction (in the absence of at least one record, a transaction cannot be performed) 36 | ErrReadRecords // ошибка считывания записей для транзакции (при отсутствии хоть одной записи транзакцию нельзя проводить) 37 | //ErrHandlerReturn - found and loaded handler returned an error 38 | ErrHandlerReturn // найденный и загруженный хандлер вернул ошибку 39 | //ErrHandlerResponse - handler returned incomplete answers 40 | ErrHandlerResponse // хандлер вернул неполные ответы 41 | //Panic - not completed, further work with the database is impossible 42 | Panic // не выполнено, дальнейшая работа с БД невозможна 43 | //PanicStopped - the database is stopped, so you can’t work with it 44 | PanicStopped 45 | //PanicWAL - operation logging error, database stopped 46 | PanicWAL 47 | ) 48 | -------------------------------------------------------------------------------- /reports/reports.go: -------------------------------------------------------------------------------- 1 | package reports 2 | 3 | // Coffer 4 | // Reports (level usecases) 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | 10 | "github.com/claygod/coffer/reports/codes" 11 | ) 12 | 13 | // type ReportTransaction struct { 14 | // Code codes.Code 15 | // Error error 16 | // } 17 | 18 | // type ReportWriteList struct { 19 | // Code codes.Code 20 | // Error error 21 | // } 22 | 23 | /* 24 | ReportRead - report returned after operation Read. 25 | */ 26 | type ReportRead struct { 27 | Report 28 | Data []byte 29 | } 30 | 31 | /* 32 | ReportReadList - report returned after operation ReadList. 33 | */ 34 | type ReportReadList struct { 35 | Report 36 | Data map[string][]byte 37 | NotFound []string 38 | } 39 | 40 | /* 41 | ReportTransaction - report returned after operation Transaction. 42 | */ 43 | type ReportTransaction struct { 44 | Report 45 | Data map[string][]byte 46 | } 47 | 48 | /* 49 | ReportRecordsList - report returned after operation RecordsList. 50 | */ 51 | type ReportRecordsList struct { 52 | Report 53 | Data []string 54 | } 55 | 56 | // type ReportDelete struct { 57 | // Code codes.Code 58 | // Error error 59 | // } 60 | 61 | /* 62 | ReportDeleteList - report returned after operation DeleteList. 63 | */ 64 | type ReportDeleteList struct { 65 | Report 66 | Removed []string 67 | NotFound []string 68 | } 69 | 70 | /* 71 | ReportRecordsCount - report returned after operation Count. 72 | */ 73 | type ReportRecordsCount struct { 74 | Report 75 | Count int 76 | } 77 | 78 | /* 79 | ReportWriteList - report returned after operation WriteList Optional/Strict. 80 | */ 81 | type ReportWriteList struct { 82 | Report 83 | Found []string 84 | } 85 | 86 | /* 87 | Report - report returned after operation. 88 | */ 89 | type Report struct { 90 | Code codes.Code 91 | Error error 92 | } 93 | 94 | /* 95 | IsCodeOk - checking code for Ok result 96 | */ 97 | func (r *Report) IsCodeOk() bool { 98 | return r.Code == codes.Ok 99 | } 100 | 101 | // func (r *Report) IsCodeWarning() bool { 102 | // return r.Code >= codes.Warning 103 | // } 104 | 105 | /* 106 | IsCodeError - checking code for availability `Error` 107 | */ 108 | func (r *Report) IsCodeError() bool { 109 | return r.Code > codes.Ok && r.Code < codes.Panic 110 | } 111 | 112 | /* 113 | IsCodeErrRecordLimitExceeded - checking code for error ErrRecordLimitExceeded 114 | */ 115 | func (r *Report) IsCodeErrRecordLimitExceeded() bool { 116 | return r.Code == codes.ErrRecordLimitExceeded 117 | } 118 | 119 | /* 120 | IsCodeErrExceedingMaxValueSize - checking code for error ErrExceedingMaxValueSize 121 | */ 122 | func (r *Report) IsCodeErrExceedingMaxValueSize() bool { 123 | return r.Code == codes.ErrExceedingMaxValueSize 124 | } 125 | 126 | /* 127 | IsCodeErrExceedingMaxKeyLength - checking code for error ErrExceedingMaxKeyLength 128 | */ 129 | func (r *Report) IsCodeErrExceedingMaxKeyLength() bool { 130 | return r.Code == codes.ErrExceedingMaxKeyLength 131 | } 132 | 133 | /* 134 | IsCodeErrExceedingZeroKeyLength - checking code for error ErrExceedingZeroKeyLength 135 | */ 136 | func (r *Report) IsCodeErrExceedingZeroKeyLength() bool { 137 | return r.Code == codes.ErrExceedingZeroKeyLength 138 | } 139 | 140 | /* 141 | IsCodeErrHandlerNotFound - checking code for error ErrHandlerNotFound 142 | */ 143 | func (r *Report) IsCodeErrHandlerNotFound() bool { 144 | return r.Code == codes.ErrHandlerNotFound 145 | } 146 | 147 | /* 148 | IsCodeErrParseRequest - checking code for error ErrParseRequest 149 | */ 150 | func (r *Report) IsCodeErrParseRequest() bool { 151 | return r.Code == codes.ErrParseRequest 152 | } 153 | 154 | /* 155 | IsCodeErrResources - checking code for error ErrResources 156 | */ 157 | func (r *Report) IsCodeErrResources() bool { 158 | return r.Code == codes.ErrResources 159 | } 160 | 161 | /* 162 | IsCodeErrNotFound - checking code for error ErrNotFound 163 | */ 164 | func (r *Report) IsCodeErrNotFound() bool { 165 | return r.Code == codes.ErrNotFound 166 | } 167 | 168 | /* 169 | IsCodeErrRecordsFound - checking code for error ErrRecordsFound 170 | */ 171 | func (r *Report) IsCodeErrRecordsFound() bool { 172 | return r.Code == codes.ErrRecordsFound 173 | } 174 | 175 | /* 176 | IsCodeErrReadRecords - checking code for error ErrReadRecords 177 | */ 178 | func (r *Report) IsCodeErrReadRecords() bool { 179 | return r.Code == codes.ErrReadRecords 180 | } 181 | 182 | /* 183 | IsCodeErrHandlerReturn - checking code for error ErrHandlerReturn 184 | */ 185 | func (r *Report) IsCodeErrHandlerReturn() bool { 186 | return r.Code == codes.ErrHandlerReturn 187 | } 188 | 189 | /* 190 | IsCodeErrHandlerResponse - checking code for error ErrHandlerResponse 191 | */ 192 | func (r *Report) IsCodeErrHandlerResponse() bool { 193 | return r.Code == codes.ErrHandlerResponse 194 | } 195 | 196 | /* 197 | IsCodePanic - checking code for availability `Panic` 198 | */ 199 | func (r *Report) IsCodePanic() bool { 200 | return r.Code >= codes.Panic 201 | } 202 | 203 | /* 204 | IsCodePanicStopped - checking code for error PanicStopped 205 | */ 206 | func (r *Report) IsCodePanicStopped() bool { 207 | return r.Code == codes.PanicStopped 208 | } 209 | 210 | /* 211 | IsCodePanicWAL - checking code for error PanicWAL 212 | */ 213 | func (r *Report) IsCodePanicWAL() bool { 214 | return r.Code == codes.PanicWAL 215 | } 216 | -------------------------------------------------------------------------------- /services/batcher/LICENSE: -------------------------------------------------------------------------------- 1 | LICENSE 2 | 3 | Copyright (c) 2018 Eduard Sesigin. All rights reserved. Contacts: 4 | The license for this software and associated documentation files (the "Software"). 5 | 6 | "Software" is available under different licensing options designed to accommodate the needs of our various users: 7 | 8 | 1) "Software" licensed under the GNU Lesser General Public License (LGPL) version 3, is appropriate for the use of "Software" 9 | provided you can comply with the terms and conditions of the GNU LGPL version 3 (or GNU GPL version 3). 10 | 2) "Software" licensed under commercial licenses is appropriate for development of proprietary/commercial software where you 11 | do not want to share any source code with third parties or otherwise cannot comply with the terms of the GNU LGPL version 3. 12 | 13 | "Software" documentation is licensed under the terms of the GNU Free Documentation License (FDL) version 1.3, 14 | as published by the Free Software Foundation. Alternatively, you may use the documentation in accordance with 15 | the terms contained in a written agreement between you and the author of the documentation. 16 | 17 | For information about selling software, contact the author of the software by e-mail 18 | 19 | DISCLAIMER 20 | 21 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO 22 | THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 23 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | OTHER DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /services/batcher/README.md: -------------------------------------------------------------------------------- 1 | # Batcher 2 | 3 | Performing tasks in batches 4 | 5 | Батчер получает из входного канала байты и записывает их пачками в файл. 6 | -------------------------------------------------------------------------------- /services/batcher/batcher.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // API 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "io" 9 | "runtime" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | const batchRatio int = 10 // how many batches fit into the input channel 15 | const pauseByEmptyBuf time.Duration = 200 * time.Millisecond // do not change! 16 | 17 | const ( 18 | stateStop int64 = 0 << iota 19 | stateStart 20 | ) 21 | 22 | /* 23 | Batcher - performs write jobs in batches. 24 | */ 25 | type batcher struct { 26 | indicator *indicator 27 | work io.Writer 28 | alarm func(error) 29 | chInput chan []byte 30 | chStop chan struct{} 31 | batchSize int 32 | stopFlag int64 33 | } 34 | 35 | /* 36 | newBatcher - create new batcher. 37 | Arguments: 38 | - workFunc - function that records the formed batch 39 | - alarmFunc - error handling function 40 | - chInput - input channel 41 | - batchSize - batch size 42 | */ 43 | func newBatcher(workFunc io.Writer, alarmFunc func(error), batchSize int) *batcher { 44 | return &batcher{ 45 | indicator: newIndicator(), 46 | work: workFunc, 47 | alarm: alarmFunc, 48 | chInput: make(chan []byte, batchSize), 49 | chStop: make(chan struct{}, batchRatio*batchSize), //TODO: here the length may NOT matter 50 | batchSize: batchSize, 51 | } 52 | } 53 | 54 | /* 55 | start - run a worker 56 | */ 57 | func (b *batcher) start() { 58 | if atomic.CompareAndSwapInt64(&b.stopFlag, stateStop, stateStart) { 59 | go b.indicator.autoSwitcher() 60 | go b.worker() 61 | } 62 | } 63 | 64 | /* 65 | stop - finish the job 66 | */ 67 | func (b *batcher) stop() { 68 | // if _, ok := <-b.chStop; ok { 69 | // close(b.chStop) 70 | // } 71 | if b.chStop != nil { 72 | close(b.chStop) 73 | } 74 | //TODO: ? b.chStop <- struct{}{} 75 | for { 76 | if atomic.LoadInt64(&b.stopFlag) == stateStop { 77 | return 78 | } 79 | runtime.Gosched() 80 | } 81 | } 82 | 83 | /* 84 | getChan - get current channel. 85 | */ 86 | func (b *batcher) getChan() chan struct{} { 87 | return b.indicator.getChan() 88 | } 89 | -------------------------------------------------------------------------------- /services/batcher/batcher_test.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // Batcher tests 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | 11 | //"runtime/pprof" 12 | "testing" 13 | //"time" 14 | ) 15 | 16 | func TestBatcher(t *testing.T) { 17 | // fileName := "./test.txt" 18 | // wr := newMockWriter(fileName) 19 | // chIn := make(chan []byte, 100) 20 | // batchSize := 10 21 | // btch := NewBatcher(wr, mockAlarmHandle, chIn, batchSize) 22 | // btch.Start() 23 | // for u := 0; u < 25; u++ { 24 | // chIn <- []byte{97} 25 | // } 26 | // time.Sleep(200 * time.Millisecond) 27 | // wr.Close() 28 | // f, _ := os.Open(fileName) 29 | // st, err := f.Stat() 30 | // if err != nil { 31 | // t.Error("Error `stat` file") 32 | // } 33 | // if st.Size() != 28 { 34 | // t.Error("Want 28, have ", st.Size()) 35 | // } 36 | 37 | // btch.Stop() 38 | // // os.Remove(fileName) 39 | } 40 | 41 | // func BenchmarkClientSequence(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 42 | // b.StopTimer() 43 | // fmt.Println("====================Sequence======================") 44 | // clt, err := Open("./tmp.txt", 2000) 45 | // if err != nil { 46 | // b.Error("Error `stat` file") 47 | // } 48 | // //defer 49 | // dummy := forTestGetDummy(100) 50 | 51 | // u := 0 52 | // b.SetParallelism(4) 53 | // f, err := os.Create("cpu.txt") 54 | // if err != nil { 55 | // b.Error("could not create CPU profile: ", err) 56 | // } 57 | // if err := pprof.StartCPUProfile(f); err != nil { 58 | // b.Error("could not start CPU profile: ", err) 59 | // } 60 | // b.StartTimer() 61 | // b.RunParallel(func(pb *testing.PB) { 62 | // for pb.Next() { 63 | // //fmt.Println("++++++++++++++1+++++++++", u) 64 | // clt.Write(dummy) 65 | // //fmt.Println("++++++++++++++2+++++++++", u) 66 | // u++ 67 | // } 68 | // }) 69 | // pprof.StopCPUProfile() 70 | // clt.Close() 71 | // // os.Remove(fileName) 72 | // } 73 | 74 | func BenchmarkClientParallel(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 75 | b.StopTimer() 76 | fmt.Println("====================Parallel======================") 77 | clt, err := Open("./tmp.txt", 1000, alarm) 78 | if err != nil { 79 | b.Error("Error `stat` file") 80 | } 81 | //defer 82 | dummy := forTestGetDummy(100) 83 | 84 | u := 0 85 | b.SetParallelism(2) 86 | // f, err := os.Create("cpu.txt") 87 | // if err != nil { 88 | // b.Error("could not create CPU profile: ", err) 89 | // } 90 | // if err := pprof.StartCPUProfile(f); err != nil { 91 | // b.Error("could not start CPU profile: ", err) 92 | // } 93 | for x := 0; x < 256; x++ { 94 | //go genTraffic(clt, dummy) 95 | } 96 | 97 | b.StartTimer() 98 | b.RunParallel(func(pb *testing.PB) { 99 | for pb.Next() { 100 | //fmt.Println("++++++++++++++1+++++++++", u) 101 | clt.Write(dummy) 102 | //fmt.Println("++++++++++++++2+++++++++", u) 103 | u++ 104 | } 105 | }) 106 | //pprof.StopCPUProfile() 107 | clt.Close() 108 | // os.Remove(fileName) 109 | } 110 | 111 | // --- Helpers for tests --- 112 | 113 | func genTraffic(clt *Client, dummy []byte) { 114 | for x := 0; x < 25600000; x++ { 115 | clt.Write(dummy) 116 | } 117 | } 118 | 119 | type mockWriter struct { 120 | f *os.File 121 | } 122 | 123 | func newMockWriter(fileName string) *mockWriter { 124 | f, _ := os.Create(fileName) 125 | return &mockWriter{ 126 | f: f, 127 | } 128 | } 129 | func (m *mockWriter) Write(in []byte) (int, error) { 130 | m.f.Write(in) 131 | m.f.Write([]byte("\n")) // to calculate the batch 132 | return len(in), nil 133 | } 134 | func (m *mockWriter) Close() { 135 | m.f.Close() 136 | } 137 | 138 | func mockAlarmHandle(err error) { 139 | panic(err) 140 | } 141 | 142 | func forTestGetDummy(count int) []byte { 143 | dummy := make([]byte, count) 144 | for i := 0; i < count; i++ { 145 | dummy[i] = 105 146 | } 147 | return dummy 148 | } 149 | -------------------------------------------------------------------------------- /services/batcher/client.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // Client 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "log" 10 | "os" 11 | "runtime" 12 | ) 13 | 14 | /* 15 | Client - for ease of use of the batcher in the typis case. 16 | */ 17 | type Client struct { 18 | b *batcher 19 | f *os.File 20 | //chIn chan []byte 21 | } 22 | 23 | /* 24 | Open - client creation and batcher. 25 | */ 26 | func Open(filePath string, batchSize int, alarmFunc func(error)) (*Client, error) { 27 | //fmt.Println("Создание нового клиента ", filePath) 28 | f, err := os.Create(filePath) 29 | if err != nil { 30 | return nil, err 31 | } 32 | //chIn := make(chan []byte, batchSize) 33 | nb := newBatcher(newWriter(f), alarmFunc, batchSize) 34 | nb.start() 35 | 36 | return &Client{ 37 | b: nb, 38 | f: f, 39 | //chIn: chIn, 40 | }, nil 41 | } 42 | 43 | /* 44 | Write - write data. 45 | */ 46 | func (c *Client) Write(in []byte) { 47 | 48 | //fmt.Println("step 1", string(in)) 49 | c.b.chInput <- in 50 | //fmt.Println("step 2") 51 | 52 | //fmt.Println("step 3") 53 | ch := c.b.getChan() 54 | //fmt.Println("step 4") 55 | <-ch 56 | //fmt.Println("step 5") 57 | } 58 | 59 | /* 60 | Close - stop and close Client. 61 | */ 62 | func (c *Client) Close() { 63 | if c.b == nil { 64 | return 65 | } 66 | c.b.stop() 67 | for { 68 | if len(c.b.chInput) == 0 { 69 | return 70 | } 71 | runtime.Gosched() 72 | } 73 | } 74 | 75 | /* 76 | alarm - errors log. 77 | */ 78 | func alarm(err error) { 79 | log.Print(err) 80 | } 81 | 82 | /* 83 | writer - intermediate structure for writing to file. 84 | */ 85 | type writer struct { 86 | f *os.File 87 | } 88 | 89 | /* 90 | newWriter - create new filewriter. 91 | */ 92 | func newWriter(f *os.File) *writer { 93 | return &writer{ 94 | f: f, 95 | } 96 | } 97 | 98 | /* 99 | Write - write data to a file with synchronization 100 | */ 101 | func (w *writer) Write(in []byte) (int, error) { 102 | i, err := w.f.Write(in) 103 | //i, err = w.f.Write([]byte{99}) //TODO: зачем тут ещё один символ?? 104 | if err == nil { 105 | err = w.f.Sync() 106 | } 107 | return i, err 108 | } 109 | -------------------------------------------------------------------------------- /services/batcher/client_test.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // Client tests 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "os" 9 | //"fmt" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestClient(t *testing.T) { 15 | fileName := "./test2.txt" 16 | bc, err := Open(fileName, 5, alarm) 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | for i := 0; i < 7; i++ { 21 | go bc.Write([]byte{97}) 22 | //fmt.Println(i) 23 | } 24 | time.Sleep(1000 * time.Millisecond) 25 | bc.Close() 26 | 27 | f, _ := os.Open(fileName) 28 | st, err := f.Stat() 29 | if err != nil { 30 | t.Error("Error `stat` file") 31 | } 32 | if st.Size() != 7 { 33 | t.Error("Want 7, have ", st.Size()) 34 | } 35 | //os.Remove(fileName) 36 | } 37 | 38 | // --- Helpers for tests --- 39 | -------------------------------------------------------------------------------- /services/batcher/indicator.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // Indicator 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | const cleanerShift uint8 = 5 //128 // shift to exclude race condition 15 | 16 | /* 17 | indicator - the closing of the channels signals the completed task. 18 | */ 19 | type indicator struct { 20 | m sync.Mutex 21 | chDone [256]chan struct{} 22 | cursor uint32 23 | } 24 | 25 | /* 26 | newIndicator - create new Indicator. 27 | */ 28 | func newIndicator() *indicator { 29 | i := &indicator{} 30 | i.chDone[0] = make(chan struct{}) 31 | for u := 0; u < 256; u++ { 32 | i.chDone[u] = make(chan struct{}) 33 | } 34 | //go i.autoSwitcher() 35 | return i 36 | } 37 | 38 | /* 39 | SwitchChan - switch channels: 40 | - a new channel is created 41 | - the pointer switches to a new channel 42 | - the old channel (with a shift) is closed 43 | */ 44 | func (i *indicator) switchChan() { 45 | i.m.Lock() 46 | defer i.m.Unlock() 47 | //fmt.Println("indicator switch ", uint8(atomic.LoadUint32(&i.cursor))) 48 | cursor := uint8(atomic.LoadUint32(&i.cursor)) 49 | i.chDone[cursor+1] = make(chan struct{}) 50 | atomic.StoreUint32(&i.cursor, uint32(cursor+1)) 51 | //if _, ok := i.chDone[cursor-cleanerShift]; ok { 52 | close(i.chDone[cursor]) 53 | //} 54 | } 55 | 56 | /* 57 | getChan - get current channel. 58 | */ 59 | func (i *indicator) getChan() chan struct{} { 60 | i.m.Lock() 61 | defer i.m.Unlock() 62 | cursor := uint8(atomic.LoadUint32(&i.cursor)) 63 | return i.chDone[cursor] 64 | } 65 | 66 | func (i *indicator) autoSwitcher() { 67 | for { 68 | i.switchChan() 69 | time.Sleep(200 * time.Microsecond) 70 | //fmt.Println("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Автосвич!! ") 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /services/batcher/worker.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | // Batcher 4 | // Worker 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | //"fmt" 10 | "runtime" 11 | "sync/atomic" 12 | "time" 13 | ) 14 | 15 | /* 16 | worker - basic cycle. 17 | 18 | - creates a batch 19 | - passes the batch to the vryter 20 | - check if you need to stop 21 | - switches the channel 22 | - zeroes the buffer under the new batch 23 | */ 24 | func (b *batcher) worker() { 25 | var buf bytes.Buffer 26 | for { 27 | buf.Reset() 28 | var u int 29 | // begin 30 | select { 31 | //TODO: наполнение должно идти и в момент отправки!! 32 | // case inData := <-b.chInput: 33 | // if _, err := buf.Write(inData); err != nil { 34 | // b.alarm(err) 35 | // } 36 | case <-b.chStop: 37 | if len(b.chInput) == 0 { 38 | atomic.StoreInt64(&b.stopFlag, stateStop) 39 | return 40 | } 41 | continue 42 | 43 | case inData := <-b.chInput: 44 | if _, err := buf.Write(inData); err != nil { 45 | b.alarm(err) 46 | 47 | } else { 48 | u++ 49 | } 50 | //default: 51 | //break 52 | } 53 | // batch fill 54 | //fmt.Println("-2 получили, может быть ещё что-то получим") 55 | for i := 0; i < b.batchSize; i++ { // -1 56 | select { 57 | case inData := <-b.chInput: 58 | if _, err := buf.Write(inData); err != nil { 59 | b.alarm(err) 60 | } else { 61 | u++ 62 | b.indicator.switchChan() 63 | } 64 | default: 65 | break 66 | } 67 | } 68 | // batch to out 69 | bOut := buf.Bytes() 70 | if len(bOut) > 0 { 71 | if _, err := b.work.Write(bOut); err != nil { 72 | atomic.StoreInt64(&b.stopFlag, stateStop) 73 | b.alarm(err) 74 | return 75 | } 76 | } else { 77 | time.Sleep(100 * time.Microsecond) 78 | runtime.Gosched() 79 | } 80 | // exit-check 81 | select { 82 | case <-b.chStop: 83 | if len(b.chInput) == 0 { 84 | atomic.StoreInt64(&b.stopFlag, stateStop) 85 | return 86 | } 87 | continue 88 | default: 89 | } 90 | b.indicator.switchChan() 91 | buf.Reset() 92 | } 93 | } 94 | 95 | func (b *batcher) fillBuf(buf bytes.Buffer, counter *int64) error { 96 | // begin 97 | select { 98 | // case <-b.chStop: 99 | // atomic.StoreInt64(&b.stopFlag, stateStop) 100 | // return nil 101 | case inData := <-b.chInput: 102 | if _, err := buf.Write(inData); err != nil { 103 | return err 104 | } 105 | //default: 106 | //break 107 | } 108 | // batch fill 109 | for i := 0; i < b.batchSize; i++ { // -1 110 | select { 111 | case inData := <-b.chInput: 112 | if _, err := buf.Write(inData); err != nil { 113 | return err 114 | } 115 | if atomic.LoadInt64(counter) == 0 { 116 | break 117 | } 118 | default: 119 | if atomic.LoadInt64(counter) == 0 { 120 | break 121 | } 122 | runtime.Gosched() 123 | } 124 | } 125 | return nil 126 | } 127 | 128 | func (b *batcher) writeBuf(buf bytes.Buffer, counter *int64) error { 129 | bOut := buf.Bytes() 130 | if len(bOut) > 0 { 131 | if _, err := b.work.Write(buf.Bytes()); err != nil { 132 | return err 133 | } 134 | //b.indicator.switchChan() 135 | //buf.Reset() 136 | } else { 137 | time.Sleep(pauseByEmptyBuf) 138 | runtime.Gosched() 139 | } 140 | atomic.AddInt64(counter, -1) 141 | return nil 142 | } 143 | -------------------------------------------------------------------------------- /services/filenamer/filenamer.go: -------------------------------------------------------------------------------- 1 | package filenamer 2 | 3 | // Coffer 4 | // Filenamer 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "io/ioutil" 10 | "sort" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | //"time" 15 | ) 16 | 17 | //const limitRecordsPerLogfile int64 = 100000 18 | 19 | const startNumber int64 = 1000000000 20 | 21 | /* 22 | FileNamer - logs names creator. 23 | */ 24 | type FileNamer struct { 25 | m sync.Mutex 26 | dirPath string 27 | } 28 | 29 | /* 30 | NewFileNamer - create new FileNamer 31 | */ 32 | func NewFileNamer(dirPath string) *FileNamer { 33 | return &FileNamer{ 34 | dirPath: dirPath, 35 | } 36 | } 37 | 38 | // func (f *FileNamer) GetNewFileName222(ext string) (string, error) { 39 | // for i := 0; i < 60; i++ { 40 | // if latestName, err := f.findLatestLog(ext); err == nil { 41 | // lNumStr := strings.Replace(latestName, ext, "", 1) 42 | // if lNum, err := strconv.ParseInt(lNumStr, 10, 64); err == nil { 43 | // lNum += 1 44 | // return f.dirPath + strconv.FormatInt(lNum, 10) + ext, nil // "/" + 45 | // } 46 | // } 47 | 48 | // // newFileName := f.dirPath + strconv.Itoa(int(time.Now().Unix())) + ext 49 | // // if _, err := os.Stat(newFileName); !os.IsExist(err) { 50 | // // return newFileName, nil 51 | // // } 52 | // time.Sleep(1 * time.Second) 53 | // } 54 | // return "", fmt.Errorf("Error finding a new name.") 55 | // } 56 | 57 | /* 58 | GetNewFileName - get new fileName. 59 | */ 60 | func (f *FileNamer) GetNewFileName(ext string) (string, error) { 61 | 62 | f.m.Lock() 63 | defer f.m.Unlock() 64 | //if ext == ".log" { 65 | latestNum, err := f.findLatestNum([]string{".log", ".check", ".checkpoint"}) 66 | if err != nil { 67 | return "", fmt.Errorf("Error finding a new name: %v", err) 68 | } 69 | if latestNum == 0 { 70 | latestNum = startNumber 71 | } 72 | ///fmt.Println("сгенерирован новый текущий номер ", latestNum) 73 | return f.dirPath + strconv.FormatInt(latestNum+1, 10) + ext, nil 74 | // } 75 | // latestNumLog, err := f.findLatestNum([]string{".log"}) // для ".checkpoint" 76 | // if err != nil { 77 | // return "", fmt.Errorf("Error finding a new name: %v", err) 78 | // } 79 | // latestNumChPn, err := f.findLatestNum([]string{".check", ".checkpoint"}) // для ".checkpoint" 80 | // if err != nil { 81 | // return "", fmt.Errorf("Error finding a new name: %v", err) 82 | // } 83 | // if latestNumLog > latestNumChPn { 84 | // return f.dirPath + strconv.FormatInt(latestNumLog, 10) + ext, nil 85 | // } 86 | // return f.dirPath + strconv.FormatInt(latestNumChPn+1, 10) + ext, nil 87 | 88 | } 89 | 90 | /* 91 | GetLatestFileName - get latest file name. 92 | */ 93 | func (f *FileNamer) GetLatestFileName(ext string) (string, error) { 94 | f.m.Lock() 95 | defer f.m.Unlock() 96 | //TODO: 97 | fNamesList, err := f.getFilesByExtList(ext) 98 | if err != nil { 99 | return "", fmt.Errorf("Error finding a latest name: %v", err) 100 | } 101 | ln := len(fNamesList) 102 | switch { 103 | case ln == 0: 104 | return "", nil 105 | case ln == 1: 106 | return f.dirPath + fNamesList[0], nil 107 | default: 108 | sort.Strings(fNamesList) 109 | return f.dirPath + fNamesList[len(fNamesList)-1], nil 110 | } 111 | } 112 | 113 | /* 114 | GetAfterLatest - get after latest. 115 | */ 116 | func (f *FileNamer) GetAfterLatest(last string) ([]string, error) { //TODO: тут названия файлов возвращаются БЕЗ директории 117 | f.m.Lock() 118 | defer f.m.Unlock() 119 | lstTemp := strings.Split(last, "/") // на случай, если аргумент прилетел вместе с путём (директорией) 120 | lst := strings.Split(lstTemp[len(lstTemp)-1], ".") 121 | lastInt, err := strconv.Atoi(lst[0]) // strconv.ParseInt(fNumStr, 10, 64) 122 | if err != nil || len(lst) != 2 { 123 | return nil, fmt.Errorf("Filenamer parse string (%s) error: %v", last, err) 124 | } 125 | 126 | fNamesList, err := f.getFilesByExtList(lst[1]) 127 | if err != nil { 128 | return nil, fmt.Errorf("Error finding files by ext: %v", err) 129 | } 130 | //fmt.Println("FileNamer: fNamesList: ", lastInt, lst, fNamesList) 131 | numList := make([]int, 0, len(fNamesList)) 132 | for _, fName := range fNamesList { 133 | fNumStr := strings.Replace(fName, "."+lst[1], "", 1) 134 | fNumInt, err := strconv.Atoi(fNumStr) // strconv.ParseInt(fNumStr, 10, 64) 135 | if err != nil { 136 | //TODO: info 137 | continue 138 | } 139 | numList = append(numList, fNumInt) 140 | } 141 | sort.Ints(numList) 142 | //fmt.Println("FileNamer: numList: ", numList) 143 | outListInt := make([]int, 0, len(numList)) 144 | for i, fNum := range numList { 145 | if fNum > lastInt { 146 | outListInt = numList[i : len(numList)-1] 147 | break 148 | } 149 | } 150 | //fmt.Println("FileNamer: outListInt: ", outListInt) 151 | outListStr := make([]string, 0, len(outListInt)) 152 | for _, v := range outListInt { 153 | outListStr = append(outListStr, strconv.Itoa(v)+"."+lst[1]) 154 | } 155 | //fmt.Println("FileNamer: outListStr: ", outListStr) 156 | return outListStr, nil 157 | } 158 | 159 | /* 160 | GetHalf - получить список файлов, номера которых больше или меньше того, что в аргументе 161 | */ 162 | func (f *FileNamer) GetHalf(last string, more bool) ([]string, error) { //TODO: тут названия файлов возвращаются БЕЗ директории 163 | f.m.Lock() 164 | defer f.m.Unlock() 165 | lstTemp := strings.Split(last, "/") // на случай, если аргумент прилетел вместе с путём (директорией) 166 | lst := strings.Split(lstTemp[len(lstTemp)-1], ".") 167 | lastInt, err := strconv.Atoi(lst[0]) // strconv.ParseInt(fNumStr, 10, 64) 168 | if err != nil || len(lst) != 2 { 169 | return nil, fmt.Errorf("Filenamer parse string (%s) error: %v", last, err) 170 | } 171 | 172 | fNamesList, err := f.getFilesByExtList(lst[1]) 173 | if err != nil { 174 | return nil, fmt.Errorf("Error finding files by ext: %v", err) 175 | } 176 | //fmt.Println("FileNamer: fNamesList: ", lastInt, lst, fNamesList) 177 | numList := make([]int, 0, len(fNamesList)) 178 | for _, fName := range fNamesList { 179 | fNumStr := strings.Replace(fName, "."+lst[1], "", 1) 180 | fNumInt, err := strconv.Atoi(fNumStr) // strconv.ParseInt(fNumStr, 10, 64) 181 | if err != nil { 182 | //TODO: info 183 | continue 184 | } 185 | numList = append(numList, fNumInt) 186 | } 187 | sort.Ints(numList) 188 | //fmt.Println("FileNamer: ============ numList: ", numList) 189 | outListInt := make([]int, 0, len(numList)) 190 | for i, fNum := range numList { 191 | if more && fNum > lastInt { 192 | outListInt = numList[i:] 193 | break 194 | } else if !more && fNum >= lastInt { 195 | outListInt = numList[0:i] 196 | break 197 | } 198 | } 199 | //fmt.Println("FileNamer: outListInt: ", outListInt) 200 | outListStr := make([]string, 0, len(outListInt)) 201 | for _, v := range outListInt { 202 | outListStr = append(outListStr, strconv.Itoa(v)+"."+lst[1]) 203 | } 204 | //fmt.Println("FileNamer: outListStr: ", outListStr) 205 | return outListStr, nil 206 | } 207 | 208 | func (f *FileNamer) findLatestNum(extList []string) (int64, error) { 209 | var max int64 210 | //extList := []string{".log", ".check", ".checkpoint"} //TODO: нужен ли ".check" ??? 211 | for _, ext := range extList { 212 | num, err := f.findMaxFile(ext) 213 | if err == nil && num > max { 214 | max = num 215 | } 216 | if err != nil { 217 | continue 218 | } 219 | 220 | //latestName, err := f.findLatestFile(ext) 221 | //fmt.Println("LATEST: ", num) 222 | 223 | // if err != nil { 224 | // return 0, err 225 | // } else if latestName == "" { 226 | // continue 227 | // } 228 | // strs := strings.Split(latestName, ".") 229 | // if len(strs) == 0 { 230 | // continue 231 | // } 232 | // num, err := strconv.ParseInt(strs[0], 10, 64) 233 | // if err == nil && num > max { 234 | // max = num 235 | // } 236 | } 237 | return max, nil 238 | } 239 | 240 | // func (f *FileNamer) findLatestFile(ext string) (string, error) { 241 | // fNamesList, err := f.getFilesByExtList(ext) 242 | // if err != nil { 243 | // return "", err 244 | // } 245 | // ln := len(fNamesList) 246 | // switch { 247 | // case ln == 0: 248 | // return "", nil 249 | // case ln == 1: 250 | // return fNamesList[0], nil 251 | // default: 252 | // sort.Strings(fNamesList) 253 | // return fNamesList[len(fNamesList)-1], nil 254 | // } 255 | // //return fNamesList, nil 256 | // } 257 | 258 | func (f *FileNamer) findMaxFile(ext string) (int64, error) { 259 | fNamesList, err := f.getFilesByExtList(ext) 260 | if err != nil { 261 | return 0, err 262 | } 263 | ln := len(fNamesList) 264 | switch { 265 | case ln == 0: 266 | return 0, nil 267 | // case ln == 1: 268 | // return fNamesList[0], nil 269 | default: 270 | var max int64 271 | for _, name := range fNamesList { 272 | strs := strings.Split(name, ".") 273 | if len(strs) == 0 { 274 | continue 275 | } 276 | num, err := strconv.ParseInt(strs[0], 10, 64) 277 | if err == nil && num > max { 278 | max = num 279 | } 280 | } 281 | return max, nil 282 | } 283 | //return fNamesList, nil 284 | } 285 | 286 | // func (f *FileNamer) findLatestLog(ext string) (string, error) { 287 | // fNamesList, err := f.getFilesByExtList(ext) 288 | // if err != nil { 289 | // return "", err 290 | // } 291 | // ln := len(fNamesList) 292 | // switch { 293 | // case ln == 0: 294 | // return "0" + ext, nil 295 | // case ln == 1: // последний лог мы никогда не берём чтобы не ткнуться в ещё наполняемый лог 296 | // return fNamesList[0], nil 297 | // default: 298 | // sort.Strings(fNamesList) 299 | // return fNamesList[len(fNamesList)-1], nil 300 | // } 301 | // //return fNamesList, nil 302 | // } 303 | 304 | func (f *FileNamer) getFilesByExtList(ext string) ([]string, error) { 305 | files, err := ioutil.ReadDir(f.dirPath) 306 | if err != nil { 307 | return nil, err 308 | } 309 | list := make([]string, 0, len(files)) 310 | for _, fl := range files { 311 | if strings.HasSuffix(fl.Name(), ext) { 312 | list = append(list, fl.Name()) 313 | } 314 | } 315 | return list, nil 316 | } 317 | -------------------------------------------------------------------------------- /services/journal/config.go: -------------------------------------------------------------------------------- 1 | package journal 2 | 3 | // Coffer 4 | // Journal (config) 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | /* 8 | Config - journal configuration. 9 | */ 10 | type Config struct { 11 | //DirPath string 12 | BatchSize int 13 | LimitRecordsPerLogfile int64 14 | } 15 | 16 | const ( 17 | stateStopped int64 = iota 18 | stateStarted 19 | statePanic 20 | ) 21 | -------------------------------------------------------------------------------- /services/journal/journal.go: -------------------------------------------------------------------------------- 1 | package journal 2 | 3 | // Coffer 4 | // Journal 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/claygod/coffer/services/batcher" 14 | "github.com/claygod/coffer/services/filenamer" 15 | //"github.com/claygod/tools/batcher" 16 | ) 17 | 18 | //const limitRecordsPerLogfile int64 = 100000 19 | 20 | /* 21 | Journal - transactions logs saver (WAL). 22 | */ 23 | type Journal struct { 24 | m sync.Mutex 25 | config *Config 26 | fileNamer *filenamer.FileNamer 27 | counter int64 28 | client *batcher.Client 29 | alarmFunc func(error) 30 | countBatchClients int64 31 | state int64 32 | } 33 | 34 | /* 35 | New - create new Journal. 36 | */ 37 | func New(cnf *Config, fn *filenamer.FileNamer, alarmFunc func(error)) (*Journal, error) { 38 | return &Journal{ 39 | config: cnf, 40 | fileNamer: fn, 41 | //client: clt, 42 | //dirPath: dirPath, 43 | alarmFunc: alarmFunc, 44 | //batchSize: batchSize, 45 | state: stateStarted, 46 | }, nil 47 | } 48 | 49 | /* 50 | Start - launch the journal. 51 | */ 52 | func (j *Journal) Start() error { 53 | j.m.Lock() 54 | defer j.m.Unlock() 55 | nName, err := j.fileNamer.GetNewFileName(".log") //dirPath 56 | if err != nil { 57 | return err 58 | } 59 | clt, err := batcher.Open(nName, j.config.BatchSize, j.alarmFunc) 60 | if err != nil { 61 | return err 62 | } 63 | j.client = clt 64 | return nil 65 | } 66 | 67 | /* 68 | Stop - stop the journal. 69 | */ 70 | func (j *Journal) Stop() { 71 | j.m.Lock() 72 | defer j.m.Unlock() 73 | atomic.CompareAndSwapInt64(&j.state, stateStarted, stateStopped) // if it doesn’t work, then we are already stopped or in a panic 74 | j.client.Close() 75 | for { 76 | if atomic.LoadInt64(&j.countBatchClients) == 0 { 77 | return 78 | } 79 | time.Sleep(1 * time.Millisecond) 80 | } 81 | } 82 | 83 | /* 84 | Restart - restart the Journal. The counter is set so that the next write is in a new file. 85 | */ 86 | func (j *Journal) Restart() { 87 | // j.m.Lock() 88 | // defer j.m.Unlock() 89 | //TODO: in theory, the state does not change here and you do not need to check it. 90 | atomic.StoreInt64(&j.counter, j.config.LimitRecordsPerLogfile+1) 91 | } 92 | 93 | /* 94 | Write - write data to log file. 95 | */ 96 | func (j *Journal) Write(toSave []byte) error { 97 | if st := atomic.LoadInt64(&j.state); st != stateStarted { 98 | return fmt.Errorf("State is `%d` (not started).", st) 99 | } 100 | clt, err := j.getClient() 101 | if err != nil { 102 | j.alarmFunc(err) 103 | atomic.StoreInt64(&j.state, statePanic) 104 | return err 105 | } 106 | clt.Write(toSave) 107 | 108 | return nil 109 | } 110 | 111 | func (j *Journal) getClient() (*batcher.Client, error) { 112 | j.m.Lock() 113 | defer j.m.Unlock() 114 | if j.counter > j.config.LimitRecordsPerLogfile { 115 | oldClt := j.client 116 | nName, err := j.fileNamer.GetNewFileName(".log") // j.dirPath 117 | if err != nil { 118 | return nil, err 119 | } 120 | clt, err := batcher.Open(nName, j.config.BatchSize, j.alarmFunc) 121 | if err != nil { 122 | return nil, err 123 | } 124 | j.client = clt 125 | j.counter = 0 126 | atomic.AddInt64(&j.countBatchClients, 1) 127 | j.clientBatchClose(oldClt) //TODO: del GO ? 128 | } 129 | j.counter++ 130 | return j.client, nil 131 | } 132 | 133 | func (j *Journal) clientBatchClose(clt *batcher.Client) { 134 | clt.Close() 135 | atomic.AddInt64(&j.countBatchClients, -1) 136 | } 137 | -------------------------------------------------------------------------------- /services/journal/journal_test.go: -------------------------------------------------------------------------------- 1 | package journal 2 | 3 | // Coffer 4 | // Journal (tests) 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "log" 9 | "os" 10 | "runtime/pprof" 11 | "testing" 12 | 13 | //"time" 14 | 15 | "github.com/claygod/coffer/services/filenamer" 16 | "github.com/claygod/tools/batcher" 17 | ) 18 | 19 | func BenchmarkClient(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 20 | b.StopTimer() 21 | clt, err := batcher.Open("./tmp.txt", 2000) 22 | if err != nil { 23 | b.Error("Error `stat` file") 24 | } 25 | defer clt.Close() 26 | dummy := forTestGetDummy(100) 27 | 28 | u := 0 29 | b.SetParallelism(256) 30 | f, err := os.Create("cpu.txt") 31 | if err != nil { 32 | b.Error("could not create CPU profile: ", err) 33 | } 34 | if err := pprof.StartCPUProfile(f); err != nil { 35 | b.Error("could not start CPU profile: ", err) 36 | } 37 | b.StartTimer() 38 | b.RunParallel(func(pb *testing.PB) { 39 | for pb.Next() { 40 | clt.Write(dummy) 41 | u++ 42 | } 43 | }) 44 | pprof.StopCPUProfile() 45 | 46 | // os.Remove(fileName) 47 | } 48 | 49 | func BenchmarkBatcherClient(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 50 | b.StopTimer() 51 | clt, err := batcher.Open("./tmp.txt", 2000) 52 | if err != nil { 53 | b.Error("Error `stat` file") 54 | } 55 | defer clt.Close() 56 | dummy := forTestGetDummy(100) 57 | 58 | u := 0 59 | b.SetParallelism(256) 60 | b.StartTimer() 61 | b.RunParallel(func(pb *testing.PB) { 62 | for pb.Next() { 63 | clt.Write(dummy) 64 | u++ 65 | } 66 | }) 67 | //pprof.StopCPUProfile() 68 | 69 | // os.Remove(fileName) 70 | } 71 | 72 | func BenchmarkNew1(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 73 | b.StopTimer() 74 | cnf := &Config{ 75 | //DirPath: "./", 76 | BatchSize: 2000, 77 | LimitRecordsPerLogfile: 100000, 78 | } 79 | j, err := New(cnf, filenamer.NewFileNamer("./"), forTestAlarmer) 80 | if err != nil { 81 | b.Error(err) 82 | } 83 | defer j.client.Close() 84 | dummy := forTestGetDummy(100) 85 | u := 0 86 | b.SetParallelism(256) 87 | b.StartTimer() 88 | b.RunParallel(func(pb *testing.PB) { 89 | for pb.Next() { 90 | j.client.Write(dummy) 91 | u++ 92 | } 93 | }) 94 | pprof.StopCPUProfile() 95 | 96 | // os.Remove(fileName) 97 | } 98 | 99 | func BenchmarkNew2(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 100 | b.StopTimer() 101 | cnf := &Config{ 102 | //DirPath: "./", 103 | BatchSize: 2000, 104 | LimitRecordsPerLogfile: 100000, 105 | } 106 | j, err := New(cnf, filenamer.NewFileNamer("./"), forTestAlarmer) 107 | if err != nil { 108 | b.Error(err) 109 | } 110 | defer j.client.Close() 111 | dummy := forTestGetDummy(100) 112 | u := 0 113 | b.SetParallelism(256) 114 | b.StartTimer() 115 | b.RunParallel(func(pb *testing.PB) { 116 | for pb.Next() { 117 | j.Write(dummy) 118 | u++ 119 | } 120 | }) 121 | pprof.StopCPUProfile() 122 | 123 | // os.Remove(fileName) 124 | } 125 | 126 | func forTestGetDummy(count int) []byte { 127 | dummy := make([]byte, count) 128 | for i := 0; i < count; i++ { 129 | dummy[i] = 105 130 | } 131 | return dummy 132 | } 133 | 134 | func forTestAlarmer(err error) { 135 | log.Println(err) 136 | } 137 | -------------------------------------------------------------------------------- /services/log.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | // Coffer 4 | // Log to out 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "log" 9 | ) 10 | 11 | /* 12 | Log -logger for joutnal. 13 | */ 14 | type Log struct { 15 | prefix string 16 | } 17 | 18 | /* 19 | NewLog - create new Log 20 | */ 21 | func NewLog(prefix string) *Log { 22 | return &Log{ 23 | prefix: prefix, 24 | } 25 | } 26 | 27 | /* 28 | Write - write to log. 29 | */ 30 | func (l *Log) Write(in []byte) (int, error) { 31 | go log.Print(string(in)) 32 | return len(in), nil 33 | } 34 | -------------------------------------------------------------------------------- /services/porter/README.md: -------------------------------------------------------------------------------- 1 | # Porter 2 | 3 | The porter gives out the keys (permissions) to work with resources at the specified key. 4 | 5 | ### Copyright © 2018-2019 Eduard Sesigin. All rights reserved. Contacts: 6 | -------------------------------------------------------------------------------- /services/porter/keeper.go: -------------------------------------------------------------------------------- 1 | package porter 2 | 3 | // Porter 4 | // Keys keeper 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | type keeper struct { 8 | keys map[string]bool 9 | } 10 | 11 | func (k *keeper) catch(keys []string) { 12 | 13 | } 14 | 15 | func (k *keeper) throw(keys []string) { 16 | 17 | } 18 | -------------------------------------------------------------------------------- /services/porter/porter.go: -------------------------------------------------------------------------------- 1 | package porter 2 | 3 | // Porter 4 | // API 5 | // Copyright © 2018 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "runtime" 9 | "sort" 10 | "sync/atomic" 11 | "time" 12 | ) 13 | 14 | const timePause time.Duration = 100 * time.Microsecond 15 | 16 | const ( 17 | stateUnlocked int32 = iota 18 | stateLocked 19 | ) 20 | 21 | /* 22 | Porter - regulates access to resources by keys. 23 | */ 24 | type Porter struct { 25 | data [(1 << 24) - 1]int32 26 | } 27 | 28 | /* 29 | New - create new Porter. 30 | */ 31 | func New() *Porter { 32 | return &Porter{} 33 | } 34 | 35 | /* 36 | Catch - block certain resources. This function will infinitely try to block the necessary resources, 37 | so if the logic of the application using this library contains errors, deadlocks, etc., this can lead to FATAL errors. 38 | */ 39 | func (p *Porter) Catch(keys []string) { 40 | hashes := p.stringsToHashes(keys) 41 | for i, hash := range hashes { 42 | if !atomic.CompareAndSwapInt32(&p.data[hash], stateUnlocked, stateUnlocked) { 43 | p.throw(hashes[0:i]) 44 | runtime.Gosched() 45 | time.Sleep(timePause) 46 | } 47 | } 48 | } 49 | 50 | /* 51 | Throw - frees access to resources. Resources MUST be blocked before this, otherwise using this library will lead to errors. 52 | */ 53 | func (p *Porter) Throw(keys []string) { 54 | p.throw(p.stringsToHashes(keys)) 55 | } 56 | 57 | func (p *Porter) throw(hashes []int) { 58 | for _, hash := range hashes { 59 | atomic.StoreInt32(&p.data[hash], stateUnlocked) 60 | } 61 | } 62 | 63 | func (p *Porter) stringsToHashes(keys []string) []int { 64 | out := make([]int, 0, len(keys)) 65 | tempArr := make(map[int]bool) 66 | for _, key := range keys { 67 | tempArr[p.stringToHashe(key)] = true 68 | } 69 | for key := range tempArr { 70 | out = append(out, key) 71 | } 72 | sort.Ints(out) 73 | return out 74 | } 75 | 76 | func (p *Porter) stringToHashe(key string) int { 77 | switch len(key) { 78 | case 0: 79 | return 0 80 | case 1: 81 | return int(uint(key[0])) 82 | case 2: 83 | return int(uint(key[1])<<4) + int(uint(key[0])) 84 | case 3: 85 | return int(uint(key[2])<<8) + int(uint(key[1])<<4) + int(uint(key[0])) 86 | default: 87 | return int(uint(key[3])<<12) + int(uint(key[2])<<8) + int(uint(key[1])<<4) + int(uint(key[0])) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /services/repositories/handlers/handlers.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | // Coffer 4 | // Handlers repo 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "sync" 10 | 11 | "github.com/claygod/coffer/domain" 12 | ) 13 | 14 | /* 15 | Handlers - parallel storage 16 | */ 17 | type Handlers struct { 18 | mtx sync.RWMutex 19 | handlers map[string]*domain.Handler 20 | } 21 | 22 | /* 23 | New - create new Handlers. 24 | */ 25 | func New() *Handlers { 26 | return &Handlers{ 27 | handlers: make(map[string]*domain.Handler), 28 | } 29 | } 30 | 31 | /* 32 | Get - get record from storage. 33 | */ 34 | func (h *Handlers) Get(handlerName string) (*domain.Handler, error) { 35 | h.mtx.RLock() 36 | hdl, ok := h.handlers[handlerName] 37 | h.mtx.RUnlock() 38 | if !ok { 39 | return nil, fmt.Errorf("Header with the name `%s` is not installed.", handlerName) 40 | } 41 | return hdl, nil 42 | } 43 | 44 | /* 45 | Set - add storage entry. 46 | */ 47 | func (h *Handlers) Set(handlerName string, handlerMethod *domain.Handler) { 48 | h.mtx.Lock() 49 | defer h.mtx.Unlock() 50 | // _, ok := h.handlers[handlerName] 51 | // if ok { 52 | // return fmt.Errorf("Header with the name `%s` is installed.", handlerName) 53 | // } 54 | h.handlers[handlerName] = handlerMethod 55 | //return nil 56 | } 57 | -------------------------------------------------------------------------------- /services/repositories/records/records.go: -------------------------------------------------------------------------------- 1 | package records 2 | 3 | // Coffer 4 | // In-Memory records repository 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "sync" 9 | 10 | "github.com/claygod/coffer/domain" 11 | ) 12 | 13 | /* 14 | Records - in-memory data repository. 15 | The store is moved to a separate entity so that, if necessary 16 | It would be possible to work not with one repository, but with their array. 17 | */ 18 | type Records struct { 19 | mtx sync.RWMutex 20 | store *storage 21 | } 22 | 23 | /* 24 | New - create new Records. 25 | */ 26 | func New() *Records { 27 | return &Records{ 28 | store: newStorage(), 29 | } 30 | } 31 | 32 | /* 33 | Reset - create new storage. 34 | */ 35 | func (s *Records) Reset() { 36 | s.mtx.Lock() 37 | defer s.mtx.Unlock() 38 | s.store = newStorage() 39 | } 40 | 41 | /* 42 | WriteList - write records list. 43 | */ 44 | func (s *Records) WriteList(list map[string][]byte) { 45 | s.mtx.Lock() 46 | defer s.mtx.Unlock() 47 | s.store.writeList(list) 48 | } 49 | 50 | /* 51 | WriteListStrict - write records list (strict mode). 52 | */ 53 | func (s *Records) WriteListStrict(list map[string][]byte) []string { 54 | s.mtx.Lock() 55 | defer s.mtx.Unlock() 56 | return s.store.writeListIfNot(list) 57 | } 58 | 59 | /* 60 | WriteListOptional - write records list (strict mode). 61 | */ 62 | func (s *Records) WriteListOptional(list map[string][]byte) []string { 63 | s.mtx.Lock() 64 | defer s.mtx.Unlock() 65 | return s.store.writeListOptional(list) 66 | } 67 | 68 | /* 69 | WriteListUnsafe - unsafe write records list. 70 | */ 71 | func (s *Records) WriteListUnsafe(list map[string][]byte) { 72 | s.store.writeList(list) 73 | } 74 | 75 | /* 76 | ReadList - read records list. 77 | */ 78 | func (s *Records) ReadList(list []string) (map[string][]byte, []string) { 79 | s.mtx.Lock() 80 | defer s.mtx.Unlock() 81 | return s.store.readList(list) 82 | } 83 | 84 | /* 85 | ReadListUnsafe - unsafe read records list. 86 | */ 87 | func (s *Records) ReadListUnsafe(list []string) (map[string][]byte, []string) { 88 | return s.store.readList(list) 89 | } 90 | 91 | /* 92 | DelListStrict - remove records list (strict mode). 93 | */ 94 | func (s *Records) DelListStrict(keys []string) []string { 95 | s.mtx.Lock() 96 | defer s.mtx.Unlock() 97 | return s.store.delAllOrNothing(keys) 98 | } 99 | 100 | /* 101 | DelListOptional - remove records list (optional mode). 102 | */ 103 | func (s *Records) DelListOptional(keys []string) ([]string, []string) { 104 | s.mtx.Lock() 105 | defer s.mtx.Unlock() 106 | return s.store.removeWhatIsPossible(keys) 107 | } 108 | 109 | /* 110 | Iterator - required when saving to file. 111 | */ 112 | func (s *Records) Iterator(chRecord chan *domain.Record) { // required when saving to file - требуется при сохранении в файл 113 | s.mtx.Lock() 114 | defer s.mtx.Unlock() 115 | chFinish := make(chan struct{}) 116 | s.store.iterator(chRecord, chFinish) 117 | <-chFinish 118 | close(chRecord) 119 | } 120 | 121 | /* 122 | CountRecords - total count records. 123 | */ 124 | func (s *Records) CountRecords() int { 125 | s.mtx.Lock() 126 | defer s.mtx.Unlock() 127 | return s.store.countRecords() 128 | } 129 | 130 | /* 131 | RecordsList - get total keys list. 132 | */ 133 | func (s *Records) RecordsList() []string { 134 | s.mtx.Lock() 135 | defer s.mtx.Unlock() 136 | return s.store.keysList() 137 | } 138 | 139 | /* 140 | RecordsListWithPrefix - get total keys list with prefix. 141 | */ 142 | func (s *Records) RecordsListWithPrefix(prefix string) []string { 143 | s.mtx.Lock() 144 | defer s.mtx.Unlock() 145 | return s.store.keysListWithPrefix(prefix) 146 | } 147 | -------------------------------------------------------------------------------- /services/repositories/records/storage.go: -------------------------------------------------------------------------------- 1 | package records 2 | 3 | // Coffer 4 | // Records storage 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "strings" 10 | 11 | "github.com/claygod/coffer/domain" 12 | ) 13 | 14 | /* 15 | storage - easy data storage (not parallel mode). 16 | */ 17 | type storage struct { 18 | data map[string][]byte 19 | } 20 | 21 | func newStorage() *storage { 22 | return &storage{ 23 | data: make(map[string][]byte), 24 | } 25 | } 26 | 27 | func (r *storage) readList(keys []string) (map[string][]byte, []string) { 28 | notFound := make([]string, 0, len(keys)) 29 | list := make(map[string][]byte) 30 | for _, key := range keys { 31 | if value, ok := r.data[key]; ok { 32 | list[key] = value 33 | } else { 34 | notFound = append(notFound, key) 35 | } 36 | } 37 | return list, notFound 38 | } 39 | 40 | func (r *storage) writeList(list map[string][]byte) { 41 | for key, value := range list { 42 | r.data[key] = value 43 | } 44 | } 45 | 46 | func (r *storage) writeListIfNot(list map[string][]byte) []string { 47 | found := make([]string, 0, len(list)) 48 | for key := range list { // first check if all of these keys 49 | if _, ok := r.data[key]; ok { 50 | found = append(found, key) 51 | } 52 | } 53 | if len(found) == 0 { 54 | r.writeList(list) 55 | } 56 | return found 57 | } 58 | 59 | func (r *storage) writeListOptional(list map[string][]byte) []string { 60 | found := make([]string, 0, len(list)) 61 | for key, value := range list { // first check if all of these keys 62 | if _, ok := r.data[key]; ok { 63 | found = append(found, key) 64 | } 65 | r.data[key] = value 66 | } 67 | return found 68 | } 69 | 70 | func (r *storage) writeOne(key string, value []byte) { 71 | r.data[key] = value 72 | } 73 | 74 | func (r *storage) setOne(rec *domain.Record) { 75 | r.data[rec.Key] = rec.Value 76 | } 77 | 78 | func (r *storage) removeWhatIsPossible(keys []string) ([]string, []string) { 79 | removedList := make([]string, 0, len(keys)) 80 | notFound := make([]string, 0, len(keys)) 81 | for _, key := range keys { 82 | if _, ok := r.data[key]; ok { 83 | removedList = append(removedList, key) 84 | delete(r.data, key) 85 | } else { 86 | notFound = append(notFound, key) 87 | } 88 | } 89 | return removedList, notFound 90 | } 91 | 92 | func (r *storage) delAllOrNothing(keys []string) []string { 93 | notFound := make([]string, 0, len(keys)) 94 | for _, key := range keys { // first check if all of these keys 95 | if _, ok := r.data[key]; !ok { 96 | notFound = append(notFound, key) 97 | } 98 | } 99 | if len(notFound) != 0 { 100 | return notFound 101 | } 102 | for _, key := range keys { // now delete 103 | delete(r.data, key) 104 | } 105 | return notFound 106 | } 107 | 108 | func (r *storage) iterator(chRecord chan *domain.Record, chFinish chan struct{}) { 109 | for key, value := range r.data { 110 | chRecord <- &domain.Record{ 111 | Key: key, 112 | Value: value, 113 | } 114 | } 115 | close(chFinish) 116 | } 117 | 118 | func (r *storage) countRecords() int { 119 | return len(r.data) 120 | } 121 | 122 | func (r *storage) keysList() []string { 123 | list := make([]string, 0, len(r.data)) 124 | for key := range r.data { 125 | list = append(list, key) 126 | } 127 | return list 128 | } 129 | 130 | func (r *storage) keysListWithPrefix(prefix string) []string { 131 | list := make([]string, 0, len(r.data)) 132 | for key := range r.data { 133 | if strings.HasPrefix(key, prefix) { 134 | list = append(list, key) 135 | } 136 | } 137 | return list 138 | } 139 | -------------------------------------------------------------------------------- /services/resources/resources.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | // Coffer 4 | // Resources API 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | "runtime" 11 | "sync/atomic" 12 | "time" 13 | 14 | "github.com/claygod/coffer/services/startstop" 15 | "github.com/shirou/gopsutil/v3/disk" 16 | "github.com/shirou/gopsutil/v3/mem" 17 | ) 18 | 19 | /* 20 | ResourcesControl - indicator of the status of the physical memory (and disk) of the device. 21 | if DiskPath == "" in config, then free disk space we do not control. 22 | */ 23 | type ResourcesControl struct { 24 | config *Config 25 | freeMemory int64 26 | freeDisk int64 27 | counter int64 28 | starter *startstop.StartStop 29 | //hasp int64 30 | } 31 | 32 | /* 33 | New - create ResourcesControl 34 | */ 35 | func New(cnf *Config) (*ResourcesControl, error) { 36 | m := &ResourcesControl{ 37 | config: cnf, 38 | starter: startstop.New(), 39 | } 40 | if m.config.DirPath != "" { 41 | if stat, err := os.Stat(m.config.DirPath); err != nil || !stat.IsDir() { 42 | return nil, fmt.Errorf("Invalid disk path: %s ", m.config.DirPath) 43 | } 44 | } 45 | if err := m.setFreeResources(); err != nil { 46 | return nil, err 47 | } 48 | if m.freeDisk < m.config.LimitDisk*2 { 49 | return nil, fmt.Errorf("Low available disk: %d bytes", m.freeDisk) 50 | } 51 | if m.freeMemory < m.config.LimitMemory*2 { 52 | return nil, fmt.Errorf("Low available memory: %d bytes", m.freeMemory) 53 | } 54 | //m.starter.Start() 55 | //atomic.StoreInt64(&m.hasp, 1) 56 | //m.setFreeResources() 57 | //go m.freeResourceSetter() 58 | return m, nil 59 | } 60 | 61 | /* 62 | Start - launch ResourcesControl 63 | */ 64 | func (r *ResourcesControl) Start() bool { 65 | res := r.starter.Start() 66 | if res { 67 | r.setFreeResources() 68 | go r.freeResourceSetter() 69 | } 70 | return res 71 | //atomic.StoreInt64(&r.hasp, 0) 72 | } 73 | 74 | /* 75 | Stop - ResourcesControl stop. 76 | */ 77 | func (r *ResourcesControl) Stop() bool { 78 | return r.starter.Stop() 79 | //atomic.StoreInt64(&r.hasp, 0) 80 | } 81 | 82 | /* 83 | GetPermission - get permission to use memory (and disk). 84 | */ 85 | func (r *ResourcesControl) GetPermission(size int64) bool { 86 | counterNew := atomic.AddInt64(&r.counter, 1) 87 | if int8(counterNew) == 0 { 88 | r.setFreeResources() 89 | } 90 | if r.getPermissionMemory(size) && r.getPermissionDisk(size) { 91 | return true 92 | } 93 | return false 94 | } 95 | 96 | func (r *ResourcesControl) setFreeResources() error { 97 | if err := r.setFreeDisk(); err != nil { 98 | return err 99 | } 100 | if err := r.setFreeMemory(); err != nil { 101 | return err 102 | } 103 | return nil 104 | } 105 | 106 | func (r *ResourcesControl) setFreeDisk() error { 107 | if r.config.DirPath == "" { 108 | return nil 109 | } 110 | us, err := disk.Usage(r.config.DirPath) 111 | if err != nil { 112 | atomic.StoreInt64(&r.freeDisk, 0) 113 | return err 114 | } 115 | atomic.StoreInt64(&r.freeDisk, int64(us.Free)) 116 | return nil 117 | } 118 | 119 | func (r *ResourcesControl) setFreeMemory() error { 120 | vms, err := mem.VirtualMemory() 121 | if err != nil { 122 | atomic.StoreInt64(&r.freeMemory, 0) 123 | return err 124 | } 125 | atomic.StoreInt64(&r.freeMemory, int64(vms.Available)) 126 | return nil 127 | } 128 | 129 | func (r *ResourcesControl) getPermissionDisk(size int64) bool { 130 | if r.config.DirPath == "" { 131 | return true 132 | } 133 | for { 134 | curFree := atomic.LoadInt64(&r.freeDisk) 135 | //fmt.Println("R:R:curFree: ", curFree, size, r.config.LimitDisk) 136 | if curFree-size > r.config.LimitDisk && 137 | atomic.CompareAndSwapInt64(&r.freeDisk, curFree, curFree-size) { 138 | //fmt.Println("R:R:curFree: ", true) 139 | return true 140 | } else if curFree-size <= r.config.LimitDisk { 141 | return false 142 | } 143 | runtime.Gosched() 144 | } 145 | } 146 | 147 | func (r *ResourcesControl) getPermissionMemory(size int64) bool { 148 | for { 149 | curFree := atomic.LoadInt64(&r.freeMemory) 150 | //fmt.Println("R:M:curFree: ", curFree, size, r.config.LimitMemory) 151 | if curFree-size > r.config.LimitMemory && 152 | atomic.CompareAndSwapInt64(&r.freeMemory, curFree, curFree-size) { 153 | //fmt.Println("R:M:curFree: ", true) 154 | return true 155 | } else if curFree-size <= r.config.LimitMemory { 156 | return false 157 | } 158 | runtime.Gosched() 159 | } 160 | } 161 | 162 | func (r *ResourcesControl) freeResourceSetter() { 163 | var counter int64 164 | ticker := time.NewTicker(timeRefresh) 165 | for range ticker.C { 166 | if r.starter.IsReady() { 167 | return 168 | } 169 | // if atomic.LoadInt64(&m.hasp) == 0 { 170 | // return 171 | // } 172 | if !r.starter.Add() { 173 | return 174 | } 175 | if byte(counter) == 0 { 176 | r.setFreeResources() 177 | } 178 | r.starter.Done() 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /services/resources/resources_config.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | // Coffer 4 | // Resources Config 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "time" 9 | ) 10 | 11 | /* 12 | Config - for ResourcesControl. 13 | */ 14 | type Config struct { 15 | LimitMemory int64 // minimum available memory (bytes) 16 | LimitDisk int64 // minimum free disk space 17 | DirPath string 18 | } 19 | 20 | const timeRefresh time.Duration = 1 * time.Millisecond 21 | -------------------------------------------------------------------------------- /services/resources/resources_test.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | // Coffer 4 | // Resources Tests 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "os" 9 | "runtime" 10 | "strconv" 11 | "testing" 12 | ) 13 | 14 | const overReq int64 = 1000000000000000000 15 | 16 | var badPathWin string = "c:\\qwertyzzzzzzzzzz" 17 | var badPathNix string = "/qwertyzzzzzzzzzzzzz" 18 | 19 | func TestGenBadPath(t *testing.T) { 20 | for i := 0; i < 100000000000; i++ { 21 | path := "" 22 | if runtime.GOOS == "windows" { 23 | path = "c:\\" + strconv.Itoa(i) 24 | } else { 25 | path = "/" + strconv.Itoa(i) 26 | } 27 | if stat, err := os.Stat(path); err != nil || !stat.IsDir() { 28 | if runtime.GOOS == "windows" { 29 | badPathWin = path 30 | } else { 31 | badPathNix = path 32 | } 33 | break 34 | } 35 | } 36 | } 37 | 38 | func TestGetPermissionWithoutDiskLimit100(t *testing.T) { 39 | cnf := &Config{ 40 | LimitMemory: 100, 41 | LimitDisk: 100, 42 | DirPath: "", 43 | } 44 | if runtime.GOOS == "windows" { 45 | cnf.DirPath = "c:\\" 46 | } else { 47 | cnf.DirPath = "/" 48 | } 49 | 50 | r, err := New(cnf) 51 | if err != nil { 52 | t.Error(err) 53 | } 54 | if !r.GetPermission(1) { 55 | t.Error("Could not get permission with minimum requirements") 56 | } 57 | if r.GetPermission(overReq) { 58 | t.Error("Permission received for too large requirements") 59 | } 60 | } 61 | 62 | func TestGetPermissionWithoutDiskLimit10000000000(t *testing.T) { 63 | cnf := &Config{ 64 | LimitMemory: 100, 65 | LimitDisk: 1000000000000, 66 | DirPath: "", 67 | } 68 | _, err := New(cnf) 69 | if err == nil { 70 | t.Error("Permission received for too large limit") 71 | } 72 | } 73 | 74 | func TestGetPermissionWithoutMemoryLimit10000000000(t *testing.T) { 75 | cnf := &Config{ 76 | LimitMemory: 1000000000000, 77 | LimitDisk: 100, 78 | DirPath: "", 79 | } 80 | _, err := New(cnf) 81 | if err == nil { 82 | t.Error("Permission received for too large limit") 83 | } 84 | } 85 | 86 | func TestGetPermissionWithDisk(t *testing.T) { 87 | cnf := &Config{ 88 | LimitMemory: 100, 89 | //AddRatioMemory: 5, 90 | LimitDisk: 100, 91 | //AddRatioDisk: 5, 92 | } 93 | if runtime.GOOS == "windows" { 94 | cnf.DirPath = "c:\\" 95 | } else { 96 | cnf.DirPath = "/" 97 | } 98 | r, err := New(cnf) 99 | if err != nil { 100 | t.Error(err) 101 | } 102 | if !r.GetPermission(1) { 103 | t.Error("Could not get permission with minimum requirements") 104 | } 105 | if r.GetPermission(overReq) { 106 | t.Error("Permission received for too large requirements") 107 | } 108 | } 109 | 110 | func TestGetPermissionWithDiskBadPath(t *testing.T) { 111 | cnf := &Config{ 112 | LimitMemory: 100, 113 | //AddRatioMemory: 5, 114 | LimitDisk: 100, 115 | //AddRatioDisk: 5, 116 | } 117 | if runtime.GOOS == "windows" { 118 | cnf.DirPath = badPathWin 119 | } else { 120 | cnf.DirPath = badPathNix 121 | } 122 | _, err := New(cnf) 123 | if err == nil { 124 | t.Errorf("Wrong path %s should have caused an error", cnf.DirPath) 125 | } 126 | } 127 | 128 | func BenchmarkSetFreeMemory(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 129 | b.StopTimer() 130 | cnf := &Config{ 131 | LimitMemory: 100, 132 | //AddRatioMemory: 5, 133 | LimitDisk: 100, 134 | //AddRatioDisk: 5, 135 | } 136 | if runtime.GOOS == "windows" { 137 | cnf.DirPath = "c:\\" 138 | } else { 139 | cnf.DirPath = "/" 140 | } 141 | r, err := New(cnf) 142 | if err != nil { 143 | b.Error(err) 144 | } 145 | b.StartTimer() 146 | for i := 0; i < b.N; i++ { 147 | r.setFreeMemory() 148 | } 149 | } 150 | 151 | func BenchmarkSetFreeDisk(b *testing.B) { // go tool pprof -web ./batcher.test ./cpu.txt 152 | b.StopTimer() 153 | cnf := &Config{ 154 | LimitMemory: 100, 155 | //AddRatioMemory: 5, 156 | LimitDisk: 100, 157 | //AddRatioDisk: 5, 158 | } 159 | if runtime.GOOS == "windows" { 160 | cnf.DirPath = "c:\\" 161 | } else { 162 | cnf.DirPath = "/" 163 | } 164 | r, err := New(cnf) 165 | if err != nil { 166 | b.Error(err) 167 | } 168 | b.StartTimer() 169 | for i := 0; i < b.N; i++ { 170 | r.setFreeDisk() 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /services/startstop/config.go: -------------------------------------------------------------------------------- 1 | package startstop 2 | 3 | // Coffer 4 | // StartStop (config) 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "time" 9 | ) 10 | 11 | const ( 12 | stateBlocked int64 = -2 13 | stateReady int64 = -1 14 | stateRun int64 = 0 15 | maxIterations int64 = 1e10 16 | blockedBarrier int64 = 1e15 17 | ) 18 | 19 | const pauseDefault time.Duration = 10 * time.Microsecond 20 | -------------------------------------------------------------------------------- /services/startstop/startstop.go: -------------------------------------------------------------------------------- 1 | package startstop 2 | 3 | // Coffer 4 | // StartStop (API) 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "runtime" 9 | "sync/atomic" 10 | "time" 11 | ) 12 | 13 | /* 14 | StartStop - counter to start and stop applications 15 | */ 16 | type StartStop struct { 17 | enumerator int64 18 | pause time.Duration 19 | } 20 | 21 | /* 22 | New - create new StartStop 23 | */ 24 | func New(args ...time.Duration) *StartStop { 25 | pause := pauseDefault 26 | if len(args) == 1 { 27 | pause = args[0] 28 | } 29 | return &StartStop{ 30 | enumerator: stateReady, 31 | pause: pause, 32 | } 33 | } 34 | 35 | /* 36 | Start - launch. 37 | */ 38 | func (s *StartStop) Start() bool { 39 | for i := int64(0); i < maxIterations; i++ { 40 | if atomic.LoadInt64(&s.enumerator) == stateRun || atomic.CompareAndSwapInt64(&s.enumerator, stateReady, stateRun) { 41 | return true 42 | } 43 | runtime.Gosched() 44 | time.Sleep(s.pause) 45 | } 46 | return false 47 | } 48 | 49 | /* 50 | Stop - stopped. 51 | */ 52 | func (s *StartStop) Stop() bool { 53 | for i := int64(0); i < maxIterations; i++ { 54 | curNum := atomic.LoadInt64(&s.enumerator) 55 | switch { 56 | case curNum == -blockedBarrier: // after blocking all tasks finally completed 57 | atomic.CompareAndSwapInt64(&s.enumerator, -blockedBarrier, stateReady) 58 | case curNum < stateBlocked: // not all tasks are completed 59 | // We are waiting and hoping for all the tasks to be completed, but new ones will definitely not appear here 60 | case curNum == stateBlocked: // blocked but also stopped 61 | return true 62 | case curNum == stateReady: // the best way 63 | return true 64 | case curNum == stateRun: 65 | atomic.CompareAndSwapInt64(&s.enumerator, stateRun, stateReady) 66 | case curNum >= stateRun: // disable the ability to start new tasks 67 | atomic.CompareAndSwapInt64(&s.enumerator, curNum, curNum-blockedBarrier) 68 | } 69 | runtime.Gosched() 70 | time.Sleep(s.pause) 71 | } 72 | return false 73 | } 74 | 75 | /* 76 | Block - block access. 77 | */ 78 | func (s *StartStop) Block() bool { 79 | for i := int64(0); i < maxIterations; i++ { 80 | if s.Stop() && atomic.CompareAndSwapInt64(&s.enumerator, stateReady, stateBlocked) { 81 | return true 82 | } 83 | runtime.Gosched() 84 | time.Sleep(s.pause) 85 | } 86 | return false 87 | } 88 | 89 | /* 90 | Unblock - unblock access. 91 | */ 92 | func (s *StartStop) Unblock() bool { 93 | for i := int64(0); i < maxIterations; i++ { 94 | if atomic.CompareAndSwapInt64(&s.enumerator, stateBlocked, stateReady) { 95 | return true 96 | } 97 | runtime.Gosched() 98 | time.Sleep(s.pause) 99 | } 100 | return false 101 | } 102 | 103 | /* 104 | Add - add task to list. 105 | */ 106 | func (s *StartStop) Add() bool { 107 | for { 108 | curNum := atomic.LoadInt64(&s.enumerator) 109 | if curNum <= stateReady { // blocked 110 | return false 111 | } else if atomic.CompareAndSwapInt64(&s.enumerator, curNum, curNum+1) { 112 | return true 113 | } 114 | runtime.Gosched() 115 | } 116 | } 117 | 118 | /* 119 | Done - deltask from list. 120 | */ 121 | func (s *StartStop) Done() bool { 122 | for { 123 | curNum := atomic.LoadInt64(&s.enumerator) 124 | if curNum == stateReady { 125 | return false 126 | } else if atomic.CompareAndSwapInt64(&s.enumerator, curNum, curNum-1) { 127 | return true 128 | } 129 | runtime.Gosched() 130 | } 131 | } 132 | 133 | /* 134 | Total - count tasks. 135 | */ 136 | func (s *StartStop) Total() int64 { 137 | return atomic.LoadInt64(&s.enumerator) 138 | } 139 | 140 | /* 141 | IsReady - check is ready. 142 | */ 143 | func (s *StartStop) IsReady() bool { 144 | return atomic.LoadInt64(&s.enumerator) == stateReady 145 | } 146 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Dir for tests -------------------------------------------------------------------------------- /usecases/checkpoint.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Checkpoint helper 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "io" 10 | "os" 11 | "sync" 12 | 13 | "github.com/claygod/coffer/domain" 14 | ) 15 | 16 | type checkpoint struct { 17 | m sync.Mutex 18 | config *Config 19 | } 20 | 21 | /* 22 | NewCheckpoint - create new checkpoint. 23 | */ 24 | func NewCheckpoint(config *Config) *checkpoint { 25 | return &checkpoint{ 26 | config: config, 27 | } 28 | } 29 | 30 | func (c *checkpoint) save(repo domain.RecordsRepository, chpName string) error { 31 | c.m.Lock() 32 | defer c.m.Unlock() 33 | 34 | f, err := os.Create(chpName) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | err = c.saveToFile(repo, f) 40 | f.Close() 41 | if err != nil { 42 | os.Remove(chpName) 43 | 44 | return err 45 | } 46 | 47 | if err := os.Rename(chpName, chpName+extPoint); err != nil { 48 | return fmt.Errorf("%v %v", err, os.Remove(chpName)) 49 | } 50 | 51 | return nil 52 | } 53 | 54 | func (c *checkpoint) saveToFile(repo domain.RecordsRepository, f *os.File) error { 55 | chRecord := make(chan *domain.Record, 10) //TODO: size? 56 | 57 | go repo.Iterator(chRecord) 58 | 59 | for { 60 | rec := <-chRecord 61 | if rec == nil { 62 | break 63 | } 64 | 65 | prb, err := c.prepareRecordToCheckpoint(rec.Key, rec.Value) 66 | if err != nil { 67 | return err 68 | } 69 | 70 | if _, err := f.Write(prb); err != nil { 71 | return err 72 | } 73 | } 74 | 75 | return nil 76 | } 77 | 78 | func (c *checkpoint) load(repo domain.RecordsRepository, fileName string) error { 79 | f, err := os.Open(fileName) // c.config.DirPath + "/" + 80 | 81 | if err != nil { 82 | return err 83 | } 84 | 85 | defer f.Close() 86 | 87 | if err := c.loadFromFile(repo, f); err != nil { 88 | return err 89 | } 90 | 91 | return c.loadFromFile(repo, f) 92 | } 93 | 94 | func (c *checkpoint) loadFromFile(repo domain.RecordsRepository, f *os.File) error { 95 | rSize := make([]byte, 8) 96 | recs := make(map[string][]byte) 97 | 98 | for { 99 | _, err := f.Read(rSize) 100 | if err != nil { 101 | if err == io.EOF { 102 | break 103 | } 104 | 105 | repo.Reset() 106 | 107 | return err 108 | } 109 | 110 | rSuint64 := bytesToUint64(rSize) 111 | sizeKey := int16(rSuint64) 112 | sizeValue := rSuint64 >> 16 113 | 114 | key := make([]byte, sizeKey) 115 | 116 | n, err := f.Read(key) 117 | if err != nil { 118 | // if err == io.EOF { // EOF ? 119 | // break 120 | // } 121 | 122 | return err 123 | } else if n != int(sizeKey) { 124 | repo.Reset() 125 | 126 | return fmt.Errorf("The key is not fully loaded (%v)", key) 127 | } 128 | 129 | value := make([]byte, int(sizeValue)) 130 | 131 | n, err = f.Read(value) 132 | if err != nil { 133 | // if err == io.EOF { // EOF ? 134 | // break 135 | // } 136 | repo.Reset() 137 | 138 | return err 139 | } else if n != int(sizeValue) { 140 | repo.Reset() 141 | 142 | return fmt.Errorf("The value is not fully loaded, (%v)", value) 143 | } 144 | 145 | recs[string(key)] = value 146 | } 147 | 148 | repo.WriteListUnsafe(recs) 149 | 150 | return nil 151 | } 152 | 153 | func (c *checkpoint) prepareRecordToCheckpoint(key string, value []byte) ([]byte, error) { 154 | if len(key) > c.config.MaxKeyLength { 155 | return nil, fmt.Errorf("Key length %d is greater than permissible %d", len(key), c.config.MaxKeyLength) 156 | } 157 | 158 | if len(value) > c.config.MaxValueLength { 159 | return nil, fmt.Errorf("Value length %d is greater than permissible %d", len(value), c.config.MaxValueLength) 160 | } 161 | 162 | var size uint64 = uint64(len([]byte(value))) 163 | size = size << 16 164 | size += uint64(len(key)) 165 | 166 | return append(uint64ToBytes(size), (append([]byte(key), value...))...), nil 167 | } 168 | -------------------------------------------------------------------------------- /usecases/config.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Config 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "time" 9 | ) 10 | 11 | /* 12 | Config - usecases universal config. 13 | */ 14 | type Config struct { 15 | FollowPause time.Duration 16 | LogsByCheckpoint int64 17 | DirPath string 18 | AllowStartupErrLoadLogs bool 19 | RemoveUnlessLogs bool // deleting logs after they hit the checkpoint 20 | MaxKeyLength int // = int(uint64(1)<<16) - 1 21 | MaxValueLength int // = int(uint64(1)<<48) - 1 22 | } 23 | 24 | const ( 25 | stateStopped int64 = iota 26 | stateStarted 27 | statePanic 28 | ) 29 | 30 | const ( 31 | extLog string = ".log" 32 | extCheck string = ".check" 33 | extPoint string = "point" 34 | megabyte int64 = 1024 * 1024 35 | ) 36 | 37 | const ( 38 | codeWriteList byte = iota //codeWrite 39 | codeTransaction 40 | codeDeleteListStrict 41 | codeDeleteListOptional 42 | ) 43 | -------------------------------------------------------------------------------- /usecases/follow_interactor.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Follow interactor 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "io/ioutil" 10 | "os" 11 | "strings" 12 | "sync/atomic" 13 | "time" 14 | 15 | "github.com/claygod/coffer/domain" 16 | ) 17 | 18 | /* 19 | FollowInteractor - after the database is launched, it writes all operations to the log. As a result, 20 | the log can grow very much. If in the end, at the end of the application, the database is correctly stopped, 21 | a new checkpoint will appear, and at the next start, the data will be taken from it. 22 | However, the stop may not be correct, and a new checkpoint will not be created. 23 | 24 | In this case, at a new start, the database will be forced to load the old checkpoint, and re-perform all operations 25 | that were completed and recorded in the log. This can turn out to be quite significant in time, and as a result, 26 | the database will take longer to load, which is not always acceptable for applications. 27 | 28 | That is why there is a follower mechanism in the database that methodically goes through the logs in the process of 29 | the database and periodically creates checkpoints that are much closer to the current moment. 30 | Also, the follower has the function of cleaning old logs and checkpoints to free up space on your hard drive. 31 | */ 32 | type FollowInteractor struct { 33 | logger Logger 34 | loader *Loader 35 | config *Config 36 | chp *checkpoint 37 | repo domain.RecordsRepository 38 | filenamer FileNamer 39 | changesCounter int64 40 | lastFileNameLog string 41 | hasp Starter 42 | } 43 | 44 | /* 45 | NewFollowInteractor - create new interactor. 46 | */ 47 | func NewFollowInteractor( 48 | logger Logger, 49 | loader *Loader, 50 | config *Config, 51 | chp *checkpoint, 52 | repo domain.RecordsRepository, 53 | filenamer FileNamer, 54 | hasp Starter, 55 | 56 | ) (*FollowInteractor, error) { 57 | fi := &FollowInteractor{ 58 | logger: logger, 59 | loader: loader, 60 | config: config, 61 | chp: chp, 62 | repo: repo, 63 | filenamer: filenamer, 64 | lastFileNameLog: "-1.log", //TODO: in config 65 | hasp: hasp, 66 | } 67 | 68 | chpList, err := fi.filenamer.GetHalf("-1"+extCheck+extPoint, true) 69 | 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | fChName, err := fi.loader.LoadLatestValidCheckpoint(chpList, fi.repo) // загрузить последнюю валидную версию checkpoint 75 | 76 | if err != nil { 77 | fi.logger.Warning(err) 78 | fChName = "-1" + extCheck + extPoint 79 | } 80 | 81 | fi.lastFileNameLog = strings.Replace(fChName, extCheck+extPoint, extLog, 1) // и выставить его номер 82 | 83 | return fi, nil 84 | } 85 | 86 | /* 87 | Start - launch FollowInteractor. 88 | */ 89 | func (f *FollowInteractor) Start() bool { 90 | if !f.hasp.Start() { 91 | return false 92 | } 93 | 94 | go f.worker() 95 | 96 | return true 97 | } 98 | 99 | /* 100 | Stop - stop FollowInteractor. 101 | */ 102 | func (f *FollowInteractor) Stop() bool { 103 | if !f.hasp.Stop() { 104 | return false 105 | } 106 | 107 | return true 108 | } 109 | 110 | /* 111 | worker - cyclic approximation of checkpoints to the current state. 112 | If any error occurs, operation stops (at least until a reboot). 113 | */ 114 | func (f *FollowInteractor) worker() { 115 | for { 116 | if f.hasp.IsReady() { 117 | return 118 | } 119 | 120 | f.hasp.Add() 121 | 122 | if err := f.follow(); err != nil { 123 | f.hasp.Done() 124 | f.Stop() 125 | f.hasp.Block() 126 | f.logger.Error(err, "Method=worker", "Follow interactor is STOPPED!") 127 | 128 | return 129 | } 130 | 131 | f.hasp.Done() 132 | time.Sleep(f.config.FollowPause) 133 | } 134 | } 135 | 136 | func (f *FollowInteractor) follow() error { 137 | list, err := f.findLatestLogs() 138 | if err != nil { 139 | return err 140 | } else if len(list) == 0 { 141 | return nil 142 | } 143 | 144 | err, wrn := f.loader.LoadLogs(list, f.repo) 145 | if err != nil { 146 | return err 147 | } else if wrn != nil { // we also stop on broken files 148 | return wrn 149 | } 150 | 151 | atomic.AddInt64(&f.changesCounter, int64(len(list))) 152 | logFileName := f.config.DirPath + list[len(list)-1] 153 | 154 | if atomic.LoadInt64((&f.changesCounter)) > f.config.LogsByCheckpoint && logFileName != f.lastFileNameLog { 155 | if err := f.newCheckpoint(logFileName); err != nil { 156 | return err 157 | } 158 | 159 | if f.config.RemoveUnlessLogs { 160 | f.removingUselessLogs(logFileName) 161 | } 162 | 163 | atomic.StoreInt64(&f.changesCounter, 0) 164 | //f.changesCounter = 0 165 | } 166 | 167 | f.lastFileNameLog = logFileName 168 | 169 | return nil 170 | } 171 | 172 | func (f *FollowInteractor) removingUselessLogs(lastLogPath string) { 173 | list1, err := f.filenamer.GetHalf(lastLogPath, false) 174 | if err != nil { 175 | f.logger.Warning(err) 176 | } 177 | 178 | for _, lgName := range list1 { 179 | err := os.Remove(f.config.DirPath + lgName) // we don’t look at errors if some file is not deleted accidentally, it’s not scary 180 | 181 | if err != nil { 182 | f.logger.Warning(err) 183 | } 184 | } 185 | 186 | list2, err := f.filenamer.GetHalf(strings.Replace(lastLogPath, extLog, extCheck+extPoint, 1), false) 187 | if err != nil { 188 | f.logger.Warning(err) 189 | } 190 | 191 | for _, lgName := range list2 { 192 | err := os.Remove(f.config.DirPath + lgName) // we don’t look at errors if some file is not deleted accidentally, it’s not scary 193 | 194 | if err != nil { 195 | f.logger.Warning(err) 196 | } 197 | } 198 | } 199 | 200 | func (f *FollowInteractor) findLatestLogs() ([]string, error) { 201 | fNamesList, err := f.filenamer.GetHalf(f.lastFileNameLog, true) 202 | if err != nil { 203 | return nil, err 204 | } 205 | 206 | ln := len(fNamesList) 207 | if ln <= 1 { // we don’t take the last log so as not to stumble into the log that is still being filled 208 | return make([]string, 0), nil 209 | } 210 | 211 | return fNamesList[0 : ln-2], nil 212 | } 213 | 214 | func (f *FollowInteractor) getFilesByExtList(ext string) ([]string, error) { 215 | files, err := ioutil.ReadDir(f.config.DirPath) 216 | 217 | if err != nil { 218 | return nil, err 219 | } 220 | 221 | list := make([]string, 0, len(files)) 222 | 223 | for _, fl := range files { 224 | if strings.HasSuffix(fl.Name(), ext) { 225 | list = append(list, f.config.DirPath+fl.Name()) 226 | } 227 | } 228 | 229 | return list, nil 230 | } 231 | 232 | func (f *FollowInteractor) newCheckpoint(logFileName string) error { 233 | if err := f.chp.save(f.repo, f.getNewCheckpointName(logFileName)); err != nil { 234 | return err 235 | } 236 | 237 | return nil 238 | } 239 | 240 | func (f *FollowInteractor) getNewCheckpointName(logFileName string) string { 241 | return strings.Replace(logFileName, extLog, extCheck, 1) 242 | } 243 | -------------------------------------------------------------------------------- /usecases/helpers.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Checkpoint 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | //"encoding/gob" 10 | "unsafe" 11 | ) 12 | 13 | func uint64ToBytes(i uint64) []byte { 14 | x := (*[8]byte)(unsafe.Pointer(&i)) 15 | out := make([]byte, 0, 8) 16 | out = append(out, x[:]...) 17 | 18 | return out 19 | } 20 | 21 | func bytesToUint64(b []byte) uint64 { 22 | var x [8]byte 23 | copy(x[:], b[:]) 24 | 25 | return *(*uint64)(unsafe.Pointer(&x)) 26 | } 27 | 28 | func prepareOperatToLog(code byte, value []byte) ([]byte, error) { 29 | var buf bytes.Buffer 30 | 31 | if _, err := buf.Write(uint64ToBytes(uint64(len(value) + 1))); err != nil { 32 | return nil, err 33 | } 34 | 35 | if err := buf.WriteByte(code); err != nil { 36 | return nil, err 37 | } 38 | 39 | if _, err := buf.Write(value); err != nil { 40 | return nil, err 41 | } 42 | 43 | return buf.Bytes(), nil 44 | } 45 | 46 | func (r *RecordsInteractor) checkPanic() { 47 | if rcvr := recover(); rcvr != nil { 48 | r.logger.Error(rcvr) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /usecases/interfaces.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Interfaces 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "github.com/claygod/coffer/domain" 9 | //"github.com/claygod/coffer/services/logger" 10 | ) 11 | 12 | /* 13 | Resourcer - interface for indicator of the status of the physical memory (and disk) of the device. 14 | */ 15 | type Resourcer interface { 16 | GetPermission(int64) bool 17 | } 18 | 19 | /* 20 | Porter - interface for regulates access to resources by keys. 21 | */ 22 | type Porter interface { 23 | Catch([]string) 24 | Throw([]string) 25 | } 26 | 27 | /* 28 | Logger - interface for logs. 29 | */ 30 | type Logger interface { 31 | //Fatal(...interface{}) 32 | Error(...interface{}) //*logger.Logger 33 | Warning(...interface{}) // *logger.Logger 34 | Info(...interface{}) //*logger.Logger 35 | //Context(string, interface{})// *logger.Logger 36 | //Send() (int, error) 37 | //Debug(...interface{}) 38 | } 39 | 40 | /* 41 | Journaler - interface for journal. 42 | */ 43 | type Journaler interface { 44 | Write([]byte) error 45 | Start() error 46 | Stop() 47 | //Close() 48 | Restart() 49 | } 50 | 51 | /* 52 | Starter - interface for StartStop. 53 | */ 54 | type Starter interface { 55 | Start() bool 56 | Stop() bool 57 | Add() bool 58 | Done() bool 59 | Total() int64 60 | IsReady() bool 61 | Block() bool 62 | Unblock() bool 63 | } 64 | 65 | /* 66 | HandleStore - interface for handlers store. 67 | */ 68 | type HandleStore interface { 69 | Get(string) (*domain.Handler, error) 70 | Set(string, *domain.Handler) 71 | } 72 | 73 | /* 74 | FileNamer - interface for logs names creator. 75 | */ 76 | type FileNamer interface { 77 | GetNewFileName(ext string) (string, error) 78 | GetAfterLatest(last string) ([]string, error) 79 | GetHalf(last string, more bool) ([]string, error) 80 | GetLatestFileName(ext string) (string, error) 81 | } 82 | -------------------------------------------------------------------------------- /usecases/loader.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Checkpoint loader 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | 11 | "github.com/claygod/coffer/domain" 12 | ) 13 | 14 | /* 15 | Loader - assistant for loading logs and checkpoints. 16 | */ 17 | type Loader struct { 18 | config *Config 19 | logger Logger 20 | chp *checkpoint 21 | opr *Operations 22 | } 23 | 24 | /* 25 | NewLoader - create new Loader 26 | */ 27 | func NewLoader(config *Config, lgr Logger, chp *checkpoint, reqCoder *ReqCoder, resControl Resourcer, trn *Transaction) *Loader { 28 | return &Loader{ 29 | config: config, 30 | logger: lgr, 31 | chp: chp, 32 | opr: NewOperations(config, reqCoder, resControl, trn), 33 | } 34 | } 35 | 36 | /* 37 | LoadLatestValidCheckpoint - download the last valid checkpoint (from the list). 38 | */ 39 | func (l *Loader) LoadLatestValidCheckpoint(chpList []string, repo domain.RecordsRepository) (string, error) { 40 | for i := len(chpList) - 1; i >= 0; i-- { 41 | fChName := chpList[i] 42 | 43 | if fChName != extCheck+extPoint && fChName != "" { 44 | if err := l.loadCheckpoint(fChName, repo); err != nil { // load the last checkpoint 45 | l.logger.Info(err) 46 | } else { 47 | return fChName, nil 48 | } 49 | } 50 | } 51 | 52 | return "-1" + extCheck + extPoint, nil 53 | } 54 | 55 | func (l *Loader) loadCheckpoint(chpName string, repo domain.RecordsRepository) error { 56 | if err := l.chp.load(repo, l.config.DirPath+chpName); err != nil { // load the last checkpoint 57 | repo.Reset() //TODO: this is done in checkpoints, but you can duplicate (for now) 58 | 59 | return err 60 | } 61 | 62 | return nil 63 | } 64 | 65 | /* 66 | LoadLogs - loading logs from the files from the list. 67 | */ 68 | func (l *Loader) LoadLogs(fList []string, repo domain.RecordsRepository) (error, error) { 69 | counter := 0 70 | var wr error 71 | 72 | for _, fName := range fList { 73 | brk := false 74 | counter++ 75 | ops, err, wrn := l.opr.loadFromFile(l.config.DirPath + fName) 76 | if err != nil { 77 | return err, wrn 78 | } else if wrn != nil { 79 | wr = wrn 80 | switch counter { // two options, as sometimes there will be a log with zero content last 81 | case len(fList): 82 | if !l.config.AllowStartupErrLoadLogs { 83 | return fmt.Errorf("The spoiled log. l.config.AllowStartupErrLoadLogs == false"), wrn 84 | } 85 | 86 | brk = true 87 | 88 | case len(fList) - 1: 89 | stat, err := os.Stat(l.config.DirPath + fList[len(fList)-1]) 90 | 91 | if err != nil { 92 | return err, wrn 93 | } 94 | 95 | if stat.Size() != 0 { 96 | return fmt.Errorf("The spoiled log (%s) is not the last, after it there is one more log file.", 97 | l.config.DirPath+fName), wrn 98 | } 99 | 100 | if !l.config.AllowStartupErrLoadLogs { 101 | return fmt.Errorf("The spoiled log. l.config.AllowStartupErrLoadLogs == false"), wrn 102 | } 103 | 104 | brk = true 105 | 106 | default: 107 | return fmt.Errorf("The spoiled log (%s) .", l.config.DirPath+fName), wrn 108 | } 109 | } 110 | 111 | if err := l.opr.DoOperations(ops, repo); err != nil { 112 | return err, wrn 113 | } 114 | 115 | if brk { 116 | break 117 | } 118 | } 119 | 120 | return nil, wr 121 | } 122 | -------------------------------------------------------------------------------- /usecases/operations.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Operations helper 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | "fmt" 10 | "io" 11 | "os" 12 | "strings" 13 | 14 | "github.com/claygod/coffer/domain" 15 | "github.com/claygod/coffer/reports/codes" 16 | ) 17 | 18 | /* 19 | Operations - uneversal operations structure. 20 | */ 21 | type Operations struct { 22 | config *Config 23 | reqCoder *ReqCoder 24 | resControl Resourcer 25 | trn *Transaction 26 | } 27 | 28 | /* 29 | NewOperations - create new Operations. 30 | */ 31 | func NewOperations(config *Config, reqCoder *ReqCoder, resControl Resourcer, trn *Transaction) *Operations { 32 | return &Operations{ 33 | config: config, 34 | reqCoder: reqCoder, 35 | resControl: resControl, 36 | trn: trn, 37 | } 38 | } 39 | 40 | /* 41 | DoOperations - do operations (write, transaction, delete). 42 | */ 43 | func (o *Operations) DoOperations(ops []*domain.Operation, repo domain.RecordsRepository) error { 44 | for _, op := range ops { 45 | if !o.resControl.GetPermission(int64(len(op.Body))) { 46 | return fmt.Errorf("Operation code %d, len(body)=%d, Not permission!", op.Code, len(op.Body)) 47 | } 48 | //TODO: пока не проверяем результаты операций, считаем, что раз он были ок в первый раз, должны быть ок и сейчас 49 | // если не ок, то надо всё останавливать, т.к. все записанные операции раньше были успешными 50 | switch op.Code { 51 | case codeWriteList: 52 | reqWL, err := o.reqCoder.ReqWriteListDecode(op.Body) 53 | if err != nil { 54 | return err 55 | } 56 | repo.WriteList(reqWL.List) 57 | 58 | case codeTransaction: 59 | reqTr, err := o.reqCoder.ReqTransactionDecode(op.Body) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | if rep := o.trn.doOperationTransaction(reqTr, repo); rep.Code != codes.Ok { 65 | return rep.Error 66 | } 67 | 68 | case codeDeleteListStrict: 69 | reqDL, err := o.reqCoder.ReqDeleteListDecode(op.Body) 70 | if err != nil { 71 | return err 72 | } else if notFound := repo.DelListStrict(reqDL.Keys); len(notFound) != 0 { 73 | return fmt.Errorf("Operations:DoOperations:DeleteList:Keys not found: %s", strings.Join(notFound, ", ")) 74 | } 75 | 76 | case codeDeleteListOptional: 77 | reqDL, err := o.reqCoder.ReqDeleteListDecode(op.Body) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | repo.DelListOptional(reqDL.Keys) 83 | 84 | default: 85 | return fmt.Errorf("Unknown operation `%d`", op.Code) 86 | } 87 | } 88 | 89 | return nil 90 | } 91 | 92 | func (o *Operations) loadFromFile(filePath string) ([]*domain.Operation, error, error) { 93 | opFile, err := os.Open(filePath) 94 | if err != nil { 95 | return nil, err, nil 96 | } 97 | 98 | defer opFile.Close() 99 | 100 | fInfo, err := opFile.Stat() 101 | if err != nil || fInfo.Size() == 0 { 102 | return make([]*domain.Operation, 0), nil, nil // here you can return nil, but it’s better to still have an empty list 103 | } 104 | 105 | ops, wrn := o.loadOperationsFromFile(opFile) 106 | 107 | return ops, nil, wrn 108 | } 109 | 110 | /* 111 | loadOperationsFromFile - download operations from a file, return error 112 | most likely means that some operation was not completely recorded and it was impossible to read it. 113 | Accordingly, errors are not critical, and are rather needed for logs. 114 | (Since it’s critical, if it were impossible to open the file, there was no directory, 115 | and in this case the file is already open in the arguments, it remains only to read it.) 116 | */ 117 | func (o *Operations) loadOperationsFromFile(fl *os.File) ([]*domain.Operation, error) { 118 | counReadedBytes := 0 119 | ops := make([]*domain.Operation, 0, 16) 120 | rSize := make([]byte, 8) 121 | var errOut error 122 | 123 | for { 124 | _, err := fl.Read(rSize) 125 | if err != nil { 126 | if err != io.EOF { 127 | errOut = err 128 | } 129 | 130 | break 131 | } 132 | 133 | counReadedBytes += 8 134 | rSuint64 := bytesToUint64(rSize) 135 | bTotal := make([]byte, int(rSuint64)) 136 | n, err := fl.Read(bTotal) 137 | if err != nil { 138 | // if err == io.EOF { // EOF ? 139 | // break 140 | // } 141 | errOut = err 142 | 143 | break 144 | 145 | } else if n != int(rSuint64) { 146 | errOut = fmt.Errorf("The operation is not fully loaded: %d from %d )", n, rSuint64) 147 | 148 | break 149 | } 150 | 151 | op, err := o.logToOperat(bTotal) 152 | 153 | if err != nil { 154 | errOut = err 155 | 156 | break 157 | } 158 | 159 | ops = append(ops, op) 160 | } 161 | 162 | return ops, errOut 163 | } 164 | 165 | func (o *Operations) operatToLog(op *domain.Operation) ([]byte, error) { 166 | var buf bytes.Buffer 167 | 168 | if _, err := buf.Write(uint64ToBytes(uint64(len(op.Body) + 1))); err != nil { //TODO +1 169 | return nil, err 170 | } 171 | 172 | if err := buf.WriteByte(op.Code); err != nil { 173 | return nil, err 174 | } 175 | 176 | if _, err := buf.Write(op.Body); err != nil { 177 | return nil, err 178 | } 179 | 180 | return buf.Bytes(), nil 181 | } 182 | 183 | func (o *Operations) logToOperat(in []byte) (*domain.Operation, error) { 184 | if len(in) < 3 { //TODO: deal with the minimum number (through tests) 185 | return nil, fmt.Errorf("Len of input operation array == %d", len(in)) 186 | } 187 | 188 | op := &domain.Operation{ 189 | Code: in[0], 190 | Body: in[1:], 191 | } 192 | 193 | return op, nil 194 | } 195 | -------------------------------------------------------------------------------- /usecases/operations_test.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Operations tests 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | //"fmt" 9 | "testing" 10 | "time" 11 | 12 | "github.com/claygod/coffer/domain" 13 | "github.com/claygod/coffer/services/resources" 14 | //"github.com/sirupsen/logrus" 15 | ) 16 | 17 | func TestNewOperations(t *testing.T) { 18 | ucCnf := &Config{ 19 | FollowPause: 400 * time.Millisecond, 20 | LogsByCheckpoint: 2, 21 | DirPath: "../test/", 22 | AllowStartupErrLoadLogs: true, 23 | MaxKeyLength: 100, 24 | MaxValueLength: 10000, 25 | } 26 | rcCnf := &resources.Config{ 27 | LimitMemory: 1000 * megabyte, // minimum available memory (bytes) 28 | LimitDisk: 1000 * megabyte, // minimum free disk space 29 | DirPath: "../test/", 30 | } 31 | resControl, err := resources.New(rcCnf) 32 | if err != nil { 33 | t.Error(err) 34 | return 35 | } 36 | hdl := newMockHandler() 37 | trn := NewTransaction(hdl) 38 | //logger := logrus.New() // logger.New(services.NewLog("Coffer ")) 39 | reqCoder := NewReqCoder() 40 | NewOperations(ucCnf, reqCoder, resControl, trn) 41 | } 42 | 43 | type mockHandler struct { 44 | } 45 | 46 | func newMockHandler() *mockHandler { 47 | return &mockHandler{} 48 | } 49 | 50 | func (m *mockHandler) Get(handlerName string) (*domain.Handler, error) { 51 | hdl := domain.Handler(func(params []byte, inMap map[string][]byte) (map[string][]byte, error) { 52 | return inMap, nil 53 | }) 54 | return &hdl, nil //TODO 55 | } 56 | func (m *mockHandler) Set(handlerName string, handler *domain.Handler) { 57 | return //TODO 58 | } 59 | -------------------------------------------------------------------------------- /usecases/records_interactor.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Records interactor 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "io/ioutil" 10 | "sort" 11 | "strconv" 12 | "strings" 13 | 14 | "github.com/claygod/coffer/domain" 15 | "github.com/claygod/coffer/reports" 16 | "github.com/claygod/coffer/reports/codes" 17 | ) 18 | 19 | /* 20 | RecordsInteractor - the main request handler for operations with database records. 21 | */ 22 | type RecordsInteractor struct { 23 | config *Config 24 | logger Logger 25 | loader *Loader 26 | chp *checkpoint 27 | opr *Operations 28 | trs *Transaction 29 | coder *ReqCoder 30 | repo domain.RecordsRepository 31 | handlers domain.HandlersRepository 32 | resControl Resourcer 33 | journal Journaler 34 | filenamer FileNamer 35 | hasp Starter 36 | } 37 | 38 | /* 39 | NewRecordsInteractor - create new RecordsInteractor 40 | */ 41 | func NewRecordsInteractor( 42 | config *Config, 43 | logger Logger, 44 | loader *Loader, 45 | chp *checkpoint, 46 | trs *Transaction, 47 | reqCoder *ReqCoder, 48 | repo domain.RecordsRepository, 49 | handlers domain.HandlersRepository, 50 | resControl Resourcer, 51 | journal Journaler, 52 | filenamer FileNamer, 53 | hasp Starter) (*RecordsInteractor, error, error) { 54 | 55 | r := &RecordsInteractor{ 56 | config: config, 57 | logger: logger, 58 | loader: loader, 59 | chp: chp, 60 | opr: NewOperations(config, reqCoder, resControl, trs), 61 | trs: trs, 62 | coder: reqCoder, 63 | repo: repo, 64 | handlers: handlers, 65 | resControl: resControl, 66 | journal: journal, 67 | filenamer: filenamer, 68 | hasp: hasp, 69 | } 70 | 71 | chpList, err := r.filenamer.GetHalf("-1"+extCheck+extPoint, true) 72 | if err != nil { 73 | return nil, err, nil 74 | } 75 | 76 | fChName, err := r.loader.LoadLatestValidCheckpoint(chpList, r.repo) // download the latest valid version of checkpoint 77 | if err != nil { 78 | r.logger.Warning(err) 79 | fChName = "-1" + extCheck + extPoint 80 | } 81 | 82 | // download all available subsequent logs 83 | logsList, err := r.filenamer.GetHalf(strings.Replace(fChName, extCheck+extPoint, extLog, -1), true) 84 | if err != nil { 85 | return nil, err, nil 86 | } 87 | 88 | // execute all available subsequent logs 89 | if len(logsList) > 0 { 90 | // eсли последний по номеру не `checkpoint`, значит была аварийная остановка, 91 | // и нужно загрузить всё, что можно, сохранить, и только потом продолжить 92 | err, wrn := r.loader.LoadLogs(logsList, r.repo) 93 | if err != nil { 94 | return nil, err, wrn 95 | } 96 | 97 | if err := r.save(); err != nil { 98 | return nil, err, wrn 99 | } 100 | 101 | r.journal.Restart() 102 | } 103 | 104 | return r, nil, nil 105 | } 106 | 107 | /* 108 | Start - start the interactor. 109 | */ 110 | func (r *RecordsInteractor) Start() bool { 111 | if err := r.journal.Start(); err != nil { 112 | return false 113 | } 114 | 115 | return r.hasp.Start() 116 | } 117 | 118 | /* 119 | Stop - stop the interactor. 120 | */ 121 | func (r *RecordsInteractor) Stop() bool { 122 | if !r.hasp.Block() { 123 | return false 124 | } 125 | 126 | defer r.hasp.Unblock() 127 | r.journal.Stop() 128 | 129 | if err := r.save(); err != nil { 130 | r.logger.Error(err, "Method=Stop") 131 | 132 | return false 133 | } else if r.config.RemoveUnlessLogs { 134 | //TODO: here you can delete all junk except the last checkpoint 135 | } 136 | 137 | return true 138 | } 139 | 140 | func (r *RecordsInteractor) save(args ...string) error { 141 | var novName string 142 | 143 | if len(args) == 1 { 144 | novName = args[0] 145 | } else { 146 | nm, err := r.filenamer.GetNewFileName(extCheck + extPoint) 147 | if err != nil { 148 | return err 149 | } 150 | novName = nm 151 | } 152 | 153 | novName = strings.Replace(novName, extCheck+extPoint, extCheck, 1) 154 | 155 | if err := r.chp.save(r.repo, novName); err != nil { 156 | return err 157 | } 158 | 159 | return nil 160 | } 161 | 162 | /* 163 | WriteListOptional - set a few records in safe mode. 164 | */ 165 | func (r *RecordsInteractor) WriteListOptional(req *ReqWriteList) *reports.ReportWriteList { 166 | rep := &reports.ReportWriteList{Report: reports.Report{}} 167 | 168 | if !r.hasp.Add() { 169 | rep.Code = codes.PanicStopped 170 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 171 | 172 | return rep 173 | } 174 | 175 | defer r.hasp.Done() 176 | 177 | // prepare the byte version of the operation for the log 178 | opBytes, err := r.reqWriteListToLog(req) 179 | if err != nil { 180 | rep.Code = codes.ErrParseRequest 181 | rep.Error = err 182 | 183 | return rep 184 | } 185 | 186 | // check if there are enough resources (memory, disk) to complete the task 187 | if !r.resControl.GetPermission(int64(len(opBytes))) { 188 | rep.Code = codes.ErrResources 189 | rep.Error = fmt.Errorf("Insufficient resources (memory, disk)") 190 | 191 | return rep 192 | } 193 | 194 | // execute 195 | rep.Found = r.repo.WriteListOptional(req.List) 196 | 197 | if err := r.journal.Write(opBytes); err != nil { 198 | defer r.hasp.Stop() 199 | rep.Code = codes.PanicWAL 200 | rep.Error = err 201 | 202 | return rep 203 | } 204 | 205 | rep.Code = codes.Ok 206 | 207 | return rep 208 | } 209 | 210 | /* 211 | WriteListStrict - set a few records in strict mode. 212 | */ 213 | func (r *RecordsInteractor) WriteListStrict(req *ReqWriteList) *reports.ReportWriteList { 214 | rep := &reports.ReportWriteList{Report: reports.Report{}} 215 | 216 | if !r.hasp.Add() { 217 | rep.Code = codes.PanicStopped 218 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 219 | 220 | return rep 221 | } 222 | 223 | defer r.hasp.Done() 224 | 225 | // prepare the byte version of the operation for the log 226 | opBytes, err := r.reqWriteListToLog(req) 227 | if err != nil { 228 | rep.Code = codes.ErrParseRequest 229 | rep.Error = err 230 | 231 | return rep 232 | } 233 | 234 | // check if there are enough resources (memory, disk) to complete the task 235 | if !r.resControl.GetPermission(int64(len(opBytes))) { 236 | rep.Code = codes.ErrResources 237 | rep.Error = fmt.Errorf("Insufficient resources (memory, disk)") 238 | 239 | return rep 240 | } 241 | 242 | // execute 243 | rep.Found = r.repo.WriteListStrict(req.List) 244 | if err := r.journal.Write(opBytes); err != nil { 245 | defer r.hasp.Stop() 246 | rep.Code = codes.PanicWAL 247 | rep.Error = err 248 | 249 | return rep 250 | } 251 | 252 | if len(rep.Found) == 0 { 253 | rep.Code = codes.Ok 254 | } else { 255 | rep.Code = codes.ErrRecordsFound 256 | } 257 | 258 | return rep 259 | } 260 | 261 | /* 262 | WriteListUnsafe - set a few records in unsafe mode. 263 | */ 264 | func (r *RecordsInteractor) WriteListUnsafe(req *ReqWriteList) *reports.Report { 265 | rep := &reports.Report{} 266 | 267 | // prepare the byte version of the operation for the log 268 | opBytes, err := r.reqWriteListToLog(req) 269 | 270 | if err != nil { 271 | rep.Code = codes.ErrParseRequest 272 | rep.Error = err 273 | 274 | return rep 275 | } 276 | 277 | // execute 278 | r.repo.WriteListUnsafe(req.List) 279 | 280 | if err := r.journal.Write(opBytes); err != nil { 281 | defer r.hasp.Stop() 282 | rep.Code = codes.PanicWAL 283 | rep.Error = err 284 | 285 | return rep 286 | } 287 | 288 | rep.Code = codes.Ok 289 | 290 | return rep 291 | } 292 | 293 | /* 294 | ReadList - get a few records in safe mode. 295 | */ 296 | func (r *RecordsInteractor) ReadList(req *ReqLoadList) *reports.ReportReadList { 297 | rep := &reports.ReportReadList{Report: reports.Report{}} 298 | defer r.checkPanic() 299 | 300 | if !r.hasp.Add() { 301 | rep.Code = codes.PanicStopped 302 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 303 | 304 | return rep 305 | } 306 | 307 | defer r.hasp.Done() 308 | // execute 309 | data, notFound := r.repo.ReadList(req.Keys) 310 | 311 | if len(notFound) != 0 { 312 | rep.Code = codes.ErrReadRecords 313 | } 314 | 315 | rep.Data = data 316 | rep.NotFound = notFound 317 | 318 | return rep 319 | } 320 | 321 | /* 322 | ReadListUnsafe - get a few records in unsafe mode. 323 | */ 324 | func (r *RecordsInteractor) ReadListUnsafe(req *ReqLoadList) *reports.ReportReadList { 325 | rep := &reports.ReportReadList{Report: reports.Report{}} 326 | defer r.checkPanic() 327 | 328 | // выполняем 329 | data, notFound := r.repo.ReadListUnsafe(req.Keys) 330 | 331 | if len(notFound) != 0 { 332 | rep.Code = codes.ErrReadRecords 333 | } 334 | 335 | rep.Data = data 336 | rep.NotFound = notFound 337 | 338 | return rep 339 | } 340 | 341 | /* 342 | DeleteList - delete multiple records in the database. 343 | */ 344 | func (r *RecordsInteractor) DeleteList(req *ReqDeleteList, strictMode bool) *reports.ReportDeleteList { 345 | defer r.checkPanic() 346 | 347 | rep := &reports.ReportDeleteList{Report: reports.Report{}} 348 | if !r.hasp.Add() { 349 | rep.Code = codes.PanicStopped 350 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 351 | 352 | return rep 353 | } 354 | 355 | defer r.hasp.Done() 356 | 357 | // prepare the byte version of the operation for the log 358 | opBytes, err := r.reqDeleteListToLog(req) 359 | if err != nil { 360 | rep.Code = codes.ErrParseRequest 361 | rep.Error = err 362 | 363 | return rep 364 | } 365 | 366 | // check if there are enough resources (memory, disk) to complete the task 367 | if !r.resControl.GetPermission(int64(len(opBytes))) { 368 | rep.Code = codes.ErrResources 369 | rep.Error = fmt.Errorf("Insufficient resources (memory, disk)") 370 | 371 | return rep 372 | } 373 | 374 | // execute 375 | if strictMode { 376 | rep = r.deleteListStrict(req.Keys, opBytes) 377 | } else { 378 | rep = r.deleteListOptional(req.Keys, opBytes) 379 | } 380 | 381 | if rep.Code >= codes.Panic { 382 | defer r.hasp.Stop() 383 | } 384 | 385 | return rep 386 | } 387 | 388 | func (r *RecordsInteractor) deleteListStrict(keys []string, opBytes []byte) *reports.ReportDeleteList { 389 | rep := &reports.ReportDeleteList{Report: reports.Report{}} 390 | 391 | // execute 392 | notFound := r.repo.DelListStrict(keys) // during warning - not everything was deleted (some keys are not in the database) 393 | rep.NotFound = notFound 394 | 395 | if len(notFound) != 0 { 396 | rep.Code = codes.ErrNotFound 397 | rep.Error = fmt.Errorf("Keys not found: %s", strings.Join(notFound, ", ")) 398 | 399 | return rep 400 | } 401 | 402 | if err := r.journal.Write(opBytes); err != nil { 403 | rep.Code = codes.PanicWAL 404 | rep.Error = err 405 | 406 | return rep 407 | } 408 | 409 | rep.Code = codes.Ok 410 | rep.Removed = keys 411 | 412 | return rep 413 | } 414 | 415 | func (r *RecordsInteractor) deleteListOptional(keys []string, opBytes []byte) *reports.ReportDeleteList { 416 | rep := &reports.ReportDeleteList{Report: reports.Report{}} 417 | 418 | // execute 419 | removedList, notFound := r.repo.DelListOptional(keys) // during warning - not everything was deleted (some keys are not in the database) 420 | rep.Removed = removedList 421 | rep.NotFound = notFound 422 | 423 | if err := r.journal.Write(opBytes); err != nil { 424 | rep.Code = codes.PanicWAL 425 | rep.Error = err 426 | 427 | return rep 428 | } 429 | 430 | rep.Code = codes.Ok 431 | 432 | return rep 433 | } 434 | 435 | func (r *RecordsInteractor) reqWriteListToLog(req *ReqWriteList) ([]byte, error) { 436 | // req marshall to bytes 437 | reqBytes, err := r.coder.ReqWriteListEncode(req) 438 | 439 | if err != nil { 440 | return nil, err 441 | } 442 | 443 | // form the operation 444 | op := &domain.Operation{ 445 | Code: codeWriteList, 446 | Body: reqBytes, 447 | } 448 | 449 | // marshall operation in bytes 450 | return r.opr.operatToLog(op) 451 | } 452 | 453 | func (r *RecordsInteractor) reqDeleteListToLog(req *ReqDeleteList) ([]byte, error) { 454 | // req marshall to bytes 455 | reqBytes, err := r.coder.ReqDeleteListEncode(req) 456 | 457 | if err != nil { 458 | return nil, err 459 | } 460 | 461 | // form the operation 462 | op := &domain.Operation{ 463 | Code: codeWriteList, 464 | Body: reqBytes, 465 | } 466 | 467 | // marshall operation in bytes 468 | return r.opr.operatToLog(op) 469 | } 470 | 471 | func (r *RecordsInteractor) reqTransactionToLog(req *ReqTransaction) ([]byte, error) { 472 | // req marshall to bytes 473 | reqBytes, err := r.coder.ReqTransactionEncode(req) 474 | 475 | if err != nil { 476 | return nil, err 477 | } 478 | 479 | // form the operation 480 | op := &domain.Operation{ 481 | Code: codeTransaction, 482 | Body: reqBytes, 483 | } 484 | 485 | // marshall operation in bytes 486 | return r.opr.operatToLog(op) 487 | } 488 | 489 | /* 490 | Transaction - complete a transaction. 491 | */ 492 | func (r *RecordsInteractor) Transaction(req *ReqTransaction) *reports.ReportTransaction { // interface{}, map[string][]byte, *domain.Handler 493 | //tStart := time.Now().UnixNano() 494 | //defer fmt.Println("Operation time ", time.Now().UnixNano()-tStart) 495 | 496 | rep := &reports.ReportTransaction{Report: reports.Report{}} 497 | 498 | if !r.hasp.Add() { 499 | rep.Code = codes.PanicStopped 500 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 501 | 502 | return rep 503 | } 504 | 505 | defer r.hasp.Done() 506 | 507 | // prepare the byte version of the operation for the log 508 | opBytes, err := r.reqTransactionToLog(req) 509 | 510 | if err != nil { 511 | rep.Code = codes.ErrParseRequest 512 | rep.Error = err 513 | 514 | return rep 515 | } 516 | 517 | // check if there are enough resources (memory, disk) to complete the task 518 | if !r.resControl.GetPermission(int64(len(opBytes))) { 519 | rep.Code = codes.ErrResources 520 | rep.Error = fmt.Errorf("Insufficient resources (memory, disk)") 521 | 522 | return rep 523 | } 524 | 525 | // execute a transaction 526 | rep = r.trs.doOperationTransaction(req, r.repo) 527 | 528 | if rep.Code >= codes.Panic { 529 | defer r.hasp.Stop() 530 | } 531 | 532 | if rep.Code >= codes.Error { 533 | return rep 534 | } 535 | 536 | // записываем результат 537 | if err := r.journal.Write(opBytes); err != nil { 538 | defer r.hasp.Stop() 539 | rep.Code = codes.PanicWAL 540 | rep.Error = err 541 | 542 | return rep 543 | } 544 | 545 | rep.Code = codes.Ok 546 | 547 | return rep 548 | } 549 | 550 | /* 551 | RecordsCount - get the total number of records in the database. 552 | */ 553 | func (r *RecordsInteractor) RecordsCount() *reports.ReportRecordsCount { 554 | rep := &reports.ReportRecordsCount{Report: reports.Report{}} 555 | 556 | if !r.hasp.Add() { 557 | rep.Code = codes.PanicStopped 558 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 559 | 560 | return rep 561 | } 562 | 563 | defer r.hasp.Done() 564 | 565 | // execute 566 | rep.Count = r.repo.CountRecords() 567 | rep.Code = codes.Ok 568 | 569 | return rep 570 | } 571 | 572 | /* 573 | RecordsList - get records list 574 | */ 575 | func (r *RecordsInteractor) RecordsList() *reports.ReportRecordsList { 576 | rep := &reports.ReportRecordsList{Report: reports.Report{}} 577 | 578 | if !r.hasp.Add() { 579 | rep.Code = codes.PanicStopped 580 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 581 | 582 | return rep 583 | } 584 | 585 | defer r.hasp.Done() 586 | 587 | // execute 588 | rep.Data = r.repo.RecordsList() 589 | rep.Code = codes.Ok 590 | 591 | return rep 592 | } 593 | 594 | /* 595 | RecordsListWithPrefix - get records list with prefix. 596 | */ 597 | func (r *RecordsInteractor) RecordsListWithPrefix(prefix string) *reports.ReportRecordsList { 598 | rep := &reports.ReportRecordsList{Report: reports.Report{}} 599 | 600 | if !r.hasp.Add() { 601 | rep.Code = codes.PanicStopped 602 | rep.Error = fmt.Errorf("RecordsInteractor is stopped") 603 | 604 | return rep 605 | } 606 | 607 | defer r.hasp.Done() 608 | 609 | // execute 610 | rep.Data = r.repo.RecordsListWithPrefix(prefix) 611 | rep.Code = codes.Ok 612 | 613 | return rep 614 | } 615 | 616 | func (r *RecordsInteractor) findExtraKeys(writeList map[string][]byte, curMap map[string][]byte) error { 617 | extraKeys := make([]string, 0, len(writeList)) 618 | 619 | for key := range writeList { 620 | if _, ok := curMap[key]; !ok { 621 | extraKeys = append(extraKeys, key) 622 | } 623 | } 624 | 625 | if len(extraKeys) > 0 { 626 | return fmt.Errorf("Found extra keys: %s", strings.Join(extraKeys, " , ")) 627 | } 628 | 629 | return nil 630 | } 631 | 632 | func (r *RecordsInteractor) findLogsAfterCheckpoint(chpName string) ([]string, error) { 633 | logBarrier, err := strconv.ParseInt(strings.Replace(chpName, extCheck+extPoint, "", 1), 10, 64) 634 | 635 | if err != nil { 636 | return nil, err 637 | } 638 | 639 | logsNames, err := r.getFilesByExtList(extLog) 640 | if err != nil { 641 | return nil, err 642 | } 643 | 644 | for i, logName := range logsNames { 645 | num, err := strconv.ParseInt(strings.Replace(logName, extLog, "", 1), 10, 64) 646 | 647 | if err != nil { 648 | return nil, err 649 | } 650 | 651 | if num > logBarrier { 652 | return logsNames[i : len(logsNames)-1], nil 653 | } 654 | } 655 | 656 | return make([]string, 0), nil 657 | } 658 | 659 | func (r *RecordsInteractor) getFilesByExtList(ext string) ([]string, error) { 660 | files, err := ioutil.ReadDir(r.config.DirPath) 661 | 662 | if err != nil { 663 | return nil, err 664 | } 665 | 666 | list := make([]string, 0, len(files)) 667 | 668 | for _, fl := range files { 669 | if strings.HasSuffix(fl.Name(), ext) { 670 | list = append(list, fl.Name()) 671 | } 672 | } 673 | 674 | sort.Strings(list) 675 | 676 | return list, nil 677 | } 678 | 679 | func (r *RecordsInteractor) getKeysFromMap(arr map[string][]byte) []string { 680 | keys := make([]string, 0, len(arr)) 681 | 682 | for key := range arr { 683 | keys = append(keys, key) 684 | } 685 | 686 | return keys 687 | } 688 | -------------------------------------------------------------------------------- /usecases/requests.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Requests 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "bytes" 9 | "encoding/gob" 10 | "time" 11 | ) 12 | 13 | /* 14 | ReqCoder - requests encoder and decoder. 15 | */ 16 | type ReqCoder struct { 17 | } 18 | 19 | /* 20 | NewReqCoder - create new ReqCoder. 21 | */ 22 | func NewReqCoder() *ReqCoder { 23 | return &ReqCoder{} 24 | } 25 | 26 | /* 27 | ReqWriteListEncode - encode ReqWriteList 28 | */ 29 | func (r *ReqCoder) ReqWriteListEncode(req *ReqWriteList) ([]byte, error) { 30 | var buf bytes.Buffer 31 | enc := gob.NewEncoder(&buf) 32 | err := enc.Encode(req) 33 | 34 | return buf.Bytes(), err 35 | } 36 | 37 | /* 38 | ReqDeleteListEncode - encode ReqDeleteList 39 | */ 40 | func (r *ReqCoder) ReqDeleteListEncode(req *ReqDeleteList) ([]byte, error) { 41 | var buf bytes.Buffer 42 | enc := gob.NewEncoder(&buf) 43 | err := enc.Encode(req) 44 | 45 | return buf.Bytes(), err 46 | } 47 | 48 | /* 49 | ReqTransactionEncode - encode ReqTransaction 50 | */ 51 | func (r *ReqCoder) ReqTransactionEncode(req *ReqTransaction) ([]byte, error) { 52 | var buf bytes.Buffer 53 | enc := gob.NewEncoder(&buf) 54 | err := enc.Encode(req) 55 | 56 | return buf.Bytes(), err 57 | } 58 | 59 | /* 60 | ReqWriteListDecode - decode ReqWriteList 61 | */ 62 | func (r *ReqCoder) ReqWriteListDecode(body []byte) (*ReqWriteList, error) { 63 | dec := gob.NewDecoder(bytes.NewBuffer(body)) 64 | var req ReqWriteList 65 | err := dec.Decode(&req) 66 | 67 | return &req, err 68 | } 69 | 70 | /* 71 | ReqDeleteListDecode - decode ReqDeleteList 72 | */ 73 | func (r *ReqCoder) ReqDeleteListDecode(body []byte) (*ReqDeleteList, error) { 74 | dec := gob.NewDecoder(bytes.NewBuffer(body)) 75 | var req ReqDeleteList 76 | err := dec.Decode(&req) 77 | 78 | return &req, err 79 | } 80 | 81 | /* 82 | ReqTransactionDecode - decode ReqTransaction 83 | */ 84 | func (r *ReqCoder) ReqTransactionDecode(body []byte) (*ReqTransaction, error) { 85 | dec := gob.NewDecoder(bytes.NewBuffer(body)) 86 | var req ReqTransaction 87 | err := dec.Decode(&req) 88 | 89 | return &req, err 90 | } 91 | 92 | /* 93 | ReqWriteList - write list request 94 | */ 95 | type ReqWriteList struct { 96 | Time time.Time 97 | List map[string][]byte 98 | } 99 | 100 | /* 101 | ReqLoadList - load list request 102 | */ 103 | type ReqLoadList struct { 104 | Time time.Time 105 | Keys []string 106 | } 107 | 108 | /* 109 | ReqDeleteList - delete list request 110 | */ 111 | type ReqDeleteList struct { 112 | Time time.Time 113 | Keys []string 114 | } 115 | 116 | /* 117 | ReqTransaction - transaction request 118 | */ 119 | type ReqTransaction struct { 120 | Time time.Time 121 | HandlerName string 122 | Keys []string 123 | Value []byte 124 | } 125 | -------------------------------------------------------------------------------- /usecases/transaction.go: -------------------------------------------------------------------------------- 1 | package usecases 2 | 3 | // Coffer 4 | // Transaction helper 5 | // Copyright © 2019 Eduard Sesigin. All rights reserved. Contacts: 6 | 7 | import ( 8 | "fmt" 9 | "strings" 10 | 11 | //"time" 12 | 13 | "github.com/claygod/coffer/domain" 14 | "github.com/claygod/coffer/reports" 15 | "github.com/claygod/coffer/reports/codes" 16 | ) 17 | 18 | /* 19 | Transaction - starts processing transaction handlers. 20 | */ 21 | type Transaction struct { 22 | //repo domain.RecordsRepository 23 | handlers HandleStore 24 | } 25 | 26 | /* 27 | NewTransaction - create new Transaction. 28 | */ 29 | func NewTransaction(handlers HandleStore) *Transaction { 30 | return &Transaction{ 31 | handlers: handlers, 32 | } 33 | } 34 | 35 | func (t *Transaction) doOperationTransaction(reqTr *ReqTransaction, repo domain.RecordsRepository) *reports.ReportTransaction { 36 | rep := &reports.ReportTransaction{Report: reports.Report{}} 37 | 38 | // find handler 39 | hdlx, err := t.handlers.Get(reqTr.HandlerName) 40 | if err != nil { 41 | rep.Code = codes.ErrHandlerNotFound 42 | rep.Error = err 43 | 44 | return rep 45 | } 46 | 47 | hdl := *hdlx 48 | 49 | // read the current values 50 | curRecsMap, notFound := repo.ReadList(reqTr.Keys) 51 | 52 | if len(notFound) != 0 { 53 | rep.Code = codes.ErrReadRecords 54 | rep.Error = fmt.Errorf("Records not found: %s", strings.Join(notFound, ", ")) 55 | 56 | return rep 57 | } 58 | 59 | // we carry out the operation with the values obtained from the repo 60 | novRecsMap, err := hdl(reqTr.Value, curRecsMap) 61 | if err != nil { 62 | rep.Code = codes.ErrHandlerResponse 63 | rep.Error = err 64 | 65 | return rep 66 | } 67 | 68 | // saving modified records (obtained as a result of a transaction) 69 | repo.WriteList(novRecsMap) 70 | rep.Code = codes.Ok 71 | rep.Data = novRecsMap 72 | 73 | return rep 74 | } 75 | --------------------------------------------------------------------------------