├── rename.go ├── channel_test.go ├── samples └── main.go ├── readme.md ├── channel.go └── diskqueue.go /rename.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | package durable 4 | 5 | // Original: https://github.com/nsqio/nsq/blob/master/nsqd/rename.go 6 | 7 | import ( 8 | "os" 9 | ) 10 | 11 | func atomicRename(sourceFile, targetFile string) error { 12 | return os.Rename(sourceFile, targetFile) 13 | } 14 | -------------------------------------------------------------------------------- /channel_test.go: -------------------------------------------------------------------------------- 1 | package durable 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestInfiniteChannel(t *testing.T) { 9 | c := make(chan interface{}) 10 | 11 | d := Channel(c, nil) 12 | 13 | for i := 0; i < 1000; i++ { 14 | c <- fmt.Sprintf("%d", i) 15 | } 16 | 17 | for { 18 | item := <-d 19 | 20 | fmt.Printf("%#v\n", item) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /samples/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/dutchcoders/durable" 10 | ) 11 | 12 | func main() { 13 | writer := make(chan interface{}) 14 | c := durable.Channel(writer, durable.Config{ 15 | Name: "", 16 | DataPath: "./data", 17 | MaxBytesPerFile: 102400, 18 | MinMsgSize: 0, 19 | MaxMsgSize: 1000, 20 | SyncEvery: 10, 21 | SyncTimeout: time.Second * 10, 22 | Logger: log.New(os.Stdout, "", 0), 23 | }) 24 | 25 | for i := 0; i < 10000; i++ { 26 | writer <- fmt.Sprintf("%d", i) 27 | } 28 | 29 | for { 30 | item := <-c 31 | fmt.Printf("%#v\n", item) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Durable Channel 2 | 3 | The durable channel allows channels to persist on disk, and being used as normal channels. These channels are limited by storage only. 4 | 5 | # Use cases 6 | 7 | * sending data with unreliable connections 8 | 9 | # Sample 10 | 11 | ``` 12 | package main 13 | 14 | import ( 15 | "encoding/json" 16 | "io/ioutil" 17 | "log" 18 | "time" 19 | "github.com/dutchcoders/durable" 20 | ) 21 | 22 | func main() { 23 | writer := make(chan interface{}) 24 | c := durable.Channel(writer, durable.Config{ 25 | Name: "", 26 | DataPath: "./data", 27 | MaxBytesPerFile: 102400, 28 | MinMsgSize: 0, 29 | MaxMsgSize: 1000, 30 | SyncEvery: 10, 31 | SyncTimeout: time.Second * 10, 32 | Logger: log.New(os.Stdout, "", 0), 33 | }) 34 | 35 | for i := 0; i < 10000; i++ { 36 | writer <- fmt.Sprintf("%d", i) 37 | } 38 | 39 | for { 40 | item := <- c 41 | fmt.Printf("%#v\n", item) 42 | } 43 | } 44 | 45 | ``` 46 | 47 | # Disclaimer 48 | 49 | The durable channel is based on the disk queue of [nsqd](https://github.com/nsqio/nsq/blob/master/nsqd/diskqueue.go). 50 | 51 | 52 | -------------------------------------------------------------------------------- /channel.go: -------------------------------------------------------------------------------- 1 | package durable 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "log" 7 | "time" 8 | ) 9 | 10 | type Config struct { 11 | Name string 12 | DataPath string 13 | MaxBytesPerFile int64 14 | MinMsgSize int32 15 | MaxMsgSize int32 16 | SyncEvery int64 17 | SyncTimeout time.Duration 18 | Logger *log.Logger 19 | } 20 | 21 | func defaultConfig() *Config { 22 | return &Config{ 23 | Name: "", 24 | DataPath: "./data", 25 | MaxBytesPerFile: 102400, 26 | MinMsgSize: 0, 27 | MaxMsgSize: 1000, 28 | SyncEvery: 10, 29 | SyncTimeout: time.Second * 10, 30 | Logger: log.New(ioutil.Discard, "", 0), 31 | } 32 | } 33 | 34 | type channel struct { 35 | in chan interface{} 36 | out chan interface{} 37 | dq *diskQueue 38 | config *Config 39 | } 40 | 41 | func newChannel(c chan interface{}, config *Config) chan interface{} { 42 | out := make(chan interface{}) 43 | 44 | b := channel{ 45 | in: c, 46 | out: out, 47 | config: config, 48 | } 49 | 50 | b.dq = newDiskQueue(config) 51 | 52 | go b.reader() 53 | go b.writer() 54 | 55 | return out 56 | } 57 | 58 | func (b channel) reader() { 59 | for data := range b.dq.ReadChan() { 60 | var item interface{} 61 | 62 | if err := json.Unmarshal(data, &item); err != nil { 63 | b.config.Logger.Printf("Error unmarshalling json object: %s\n", err.Error()) 64 | } 65 | 66 | b.out <- item 67 | } 68 | } 69 | 70 | func (b channel) writer() { 71 | for { 72 | item := <-b.in 73 | 74 | if data, err := json.Marshal(item); err != nil { 75 | b.config.Logger.Printf("Error marshalling json object: %s\n", err.Error()) 76 | } else if err := b.dq.Put(data); err != nil { 77 | b.config.Logger.Printf("Error putting object: %s\n", err.Error()) 78 | } 79 | 80 | } 81 | } 82 | 83 | func Channel(c chan interface{}, config *Config) chan interface{} { 84 | if config == nil { 85 | config = defaultConfig() 86 | } 87 | 88 | return newChannel(c, config) 89 | } 90 | -------------------------------------------------------------------------------- /diskqueue.go: -------------------------------------------------------------------------------- 1 | package durable 2 | 3 | // Original: https://github.com/nsqio/nsq/blob/master/nsqd/diskqueue.go 4 | 5 | import ( 6 | "bufio" 7 | "bytes" 8 | "encoding/binary" 9 | "errors" 10 | "fmt" 11 | "io" 12 | "log" 13 | "math/rand" 14 | "os" 15 | "path" 16 | "sync" 17 | "sync/atomic" 18 | "time" 19 | ) 20 | 21 | // diskQueue implements the BackendQueue interface 22 | // providing a filesystem backed FIFO queue 23 | type diskQueue struct { 24 | // 64bit atomic vars need to be first for proper alignment on 32bit platforms 25 | 26 | // run-time state (also persisted to disk) 27 | readPos int64 28 | writePos int64 29 | readFileNum int64 30 | writeFileNum int64 31 | depth int64 32 | 33 | sync.RWMutex 34 | 35 | // instantiation time metadata 36 | name string 37 | dataPath string 38 | maxBytesPerFile int64 // currently this cannot change once created 39 | minMsgSize int32 40 | maxMsgSize int32 41 | syncEvery int64 // number of writes per fsync 42 | syncTimeout time.Duration // duration of time per fsync 43 | exitFlag int32 44 | needSync bool 45 | 46 | // keeps track of the position where we have read 47 | // (but not yet sent over readChan) 48 | nextReadPos int64 49 | nextReadFileNum int64 50 | 51 | readFile *os.File 52 | writeFile *os.File 53 | reader *bufio.Reader 54 | writeBuf bytes.Buffer 55 | 56 | // exposed via ReadChan() 57 | readChan chan []byte 58 | 59 | // internal channels 60 | writeChan chan []byte 61 | writeResponseChan chan error 62 | emptyChan chan int 63 | emptyResponseChan chan error 64 | exitChan chan int 65 | exitSyncChan chan int 66 | 67 | logger *log.Logger 68 | } 69 | 70 | // newDiskQueue instantiates a new instance of diskQueue, retrieving metadata 71 | // from the filesystem and starting the read ahead goroutine 72 | func newDiskQueue(config *Config) *diskQueue { 73 | d := diskQueue{ 74 | name: config.Name, 75 | dataPath: config.DataPath, 76 | maxBytesPerFile: config.MaxBytesPerFile, 77 | minMsgSize: config.MinMsgSize, 78 | maxMsgSize: config.MaxMsgSize, 79 | readChan: make(chan []byte), 80 | writeChan: make(chan []byte), 81 | writeResponseChan: make(chan error), 82 | emptyChan: make(chan int), 83 | emptyResponseChan: make(chan error), 84 | exitChan: make(chan int), 85 | exitSyncChan: make(chan int), 86 | syncEvery: config.SyncEvery, 87 | syncTimeout: config.SyncTimeout, 88 | logger: config.Logger, 89 | } 90 | 91 | // no need to lock here, nothing else could possibly be touching this instance 92 | err := d.retrieveMetaData() 93 | if err != nil && !os.IsNotExist(err) { 94 | d.logf("ERROR: diskqueue(%s) failed to retrieveMetaData - %s", d.name, err) 95 | } 96 | 97 | go d.ioLoop() 98 | 99 | return &d 100 | } 101 | 102 | func (d *diskQueue) logf(f string, args ...interface{}) { 103 | d.logger.Printf(f, args...) 104 | } 105 | 106 | // Depth returns the depth of the queue 107 | func (d *diskQueue) Depth() int64 { 108 | return atomic.LoadInt64(&d.depth) 109 | } 110 | 111 | // ReadChan returns the []byte channel for reading data 112 | func (d *diskQueue) ReadChan() chan []byte { 113 | return d.readChan 114 | } 115 | 116 | // Put writes a []byte to the queue 117 | func (d *diskQueue) Put(data []byte) error { 118 | d.RLock() 119 | defer d.RUnlock() 120 | 121 | if d.exitFlag == 1 { 122 | return errors.New("exiting") 123 | } 124 | 125 | d.writeChan <- data 126 | return <-d.writeResponseChan 127 | } 128 | 129 | // Close cleans up the queue and persists metadata 130 | func (d *diskQueue) Close() error { 131 | err := d.exit(false) 132 | if err != nil { 133 | return err 134 | } 135 | return d.sync() 136 | } 137 | 138 | func (d *diskQueue) Delete() error { 139 | return d.exit(true) 140 | } 141 | 142 | func (d *diskQueue) exit(deleted bool) error { 143 | d.Lock() 144 | defer d.Unlock() 145 | 146 | d.exitFlag = 1 147 | 148 | if deleted { 149 | d.logf("DISKQUEUE(%s): deleting", d.name) 150 | } else { 151 | d.logf("DISKQUEUE(%s): closing", d.name) 152 | } 153 | 154 | close(d.exitChan) 155 | // ensure that ioLoop has exited 156 | <-d.exitSyncChan 157 | 158 | if d.readFile != nil { 159 | d.readFile.Close() 160 | d.readFile = nil 161 | } 162 | 163 | if d.writeFile != nil { 164 | d.writeFile.Close() 165 | d.writeFile = nil 166 | } 167 | 168 | return nil 169 | } 170 | 171 | // Empty destructively clears out any pending data in the queue 172 | // by fast forwarding read positions and removing intermediate files 173 | func (d *diskQueue) Empty() error { 174 | d.RLock() 175 | defer d.RUnlock() 176 | 177 | if d.exitFlag == 1 { 178 | return errors.New("exiting") 179 | } 180 | 181 | d.logf("DISKQUEUE(%s): emptying", d.name) 182 | 183 | d.emptyChan <- 1 184 | return <-d.emptyResponseChan 185 | } 186 | 187 | func (d *diskQueue) deleteAllFiles() error { 188 | err := d.skipToNextRWFile() 189 | 190 | innerErr := os.Remove(d.metaDataFileName()) 191 | if innerErr != nil && !os.IsNotExist(innerErr) { 192 | d.logf("ERROR: diskqueue(%s) failed to remove metadata file - %s", d.name, innerErr) 193 | return innerErr 194 | } 195 | 196 | return err 197 | } 198 | 199 | func (d *diskQueue) skipToNextRWFile() error { 200 | var err error 201 | 202 | if d.readFile != nil { 203 | d.readFile.Close() 204 | d.readFile = nil 205 | } 206 | 207 | if d.writeFile != nil { 208 | d.writeFile.Close() 209 | d.writeFile = nil 210 | } 211 | 212 | for i := d.readFileNum; i <= d.writeFileNum; i++ { 213 | fn := d.fileName(i) 214 | innerErr := os.Remove(fn) 215 | if innerErr != nil && !os.IsNotExist(innerErr) { 216 | d.logf("ERROR: diskqueue(%s) failed to remove data file - %s", d.name, innerErr) 217 | err = innerErr 218 | } 219 | } 220 | 221 | d.writeFileNum++ 222 | d.writePos = 0 223 | d.readFileNum = d.writeFileNum 224 | d.readPos = 0 225 | d.nextReadFileNum = d.writeFileNum 226 | d.nextReadPos = 0 227 | atomic.StoreInt64(&d.depth, 0) 228 | 229 | return err 230 | } 231 | 232 | // readOne performs a low level filesystem read for a single []byte 233 | // while advancing read positions and rolling files, if necessary 234 | func (d *diskQueue) readOne() ([]byte, error) { 235 | var err error 236 | var msgSize int32 237 | 238 | if d.readFile == nil { 239 | curFileName := d.fileName(d.readFileNum) 240 | d.readFile, err = os.OpenFile(curFileName, os.O_RDONLY, 0600) 241 | if err != nil { 242 | return nil, err 243 | } 244 | 245 | d.logf("DISKQUEUE(%s): readOne() opened %s", d.name, curFileName) 246 | 247 | if d.readPos > 0 { 248 | _, err = d.readFile.Seek(d.readPos, 0) 249 | if err != nil { 250 | d.readFile.Close() 251 | d.readFile = nil 252 | return nil, err 253 | } 254 | } 255 | 256 | d.reader = bufio.NewReader(d.readFile) 257 | } 258 | 259 | err = binary.Read(d.reader, binary.BigEndian, &msgSize) 260 | if err != nil { 261 | d.readFile.Close() 262 | d.readFile = nil 263 | return nil, err 264 | } 265 | 266 | if msgSize < d.minMsgSize || msgSize > d.maxMsgSize { 267 | // this file is corrupt and we have no reasonable guarantee on 268 | // where a new message should begin 269 | d.readFile.Close() 270 | d.readFile = nil 271 | return nil, fmt.Errorf("invalid message read size (%d)", msgSize) 272 | } 273 | 274 | readBuf := make([]byte, msgSize) 275 | _, err = io.ReadFull(d.reader, readBuf) 276 | if err != nil { 277 | d.readFile.Close() 278 | d.readFile = nil 279 | return nil, err 280 | } 281 | 282 | totalBytes := int64(4 + msgSize) 283 | 284 | // we only advance next* because we have not yet sent this to consumers 285 | // (where readFileNum, readPos will actually be advanced) 286 | d.nextReadPos = d.readPos + totalBytes 287 | d.nextReadFileNum = d.readFileNum 288 | 289 | // TODO: each data file should embed the maxBytesPerFile 290 | // as the first 8 bytes (at creation time) ensuring that 291 | // the value can change without affecting runtime 292 | if d.nextReadPos > d.maxBytesPerFile { 293 | if d.readFile != nil { 294 | d.readFile.Close() 295 | d.readFile = nil 296 | } 297 | 298 | d.nextReadFileNum++ 299 | d.nextReadPos = 0 300 | } 301 | 302 | return readBuf, nil 303 | } 304 | 305 | // writeOne performs a low level filesystem write for a single []byte 306 | // while advancing write positions and rolling files, if necessary 307 | func (d *diskQueue) writeOne(data []byte) error { 308 | var err error 309 | 310 | if d.writeFile == nil { 311 | curFileName := d.fileName(d.writeFileNum) 312 | d.writeFile, err = os.OpenFile(curFileName, os.O_RDWR|os.O_CREATE, 0600) 313 | if err != nil { 314 | return err 315 | } 316 | 317 | d.logf("DISKQUEUE(%s): writeOne() opened %s", d.name, curFileName) 318 | 319 | if d.writePos > 0 { 320 | _, err = d.writeFile.Seek(d.writePos, 0) 321 | if err != nil { 322 | d.writeFile.Close() 323 | d.writeFile = nil 324 | return err 325 | } 326 | } 327 | } 328 | 329 | dataLen := int32(len(data)) 330 | 331 | if dataLen < d.minMsgSize || dataLen > d.maxMsgSize { 332 | return fmt.Errorf("invalid message write size (%d) maxMsgSize=%d", dataLen, d.maxMsgSize) 333 | } 334 | 335 | d.writeBuf.Reset() 336 | err = binary.Write(&d.writeBuf, binary.BigEndian, dataLen) 337 | if err != nil { 338 | return err 339 | } 340 | 341 | _, err = d.writeBuf.Write(data) 342 | if err != nil { 343 | return err 344 | } 345 | 346 | // only write to the file once 347 | _, err = d.writeFile.Write(d.writeBuf.Bytes()) 348 | if err != nil { 349 | d.writeFile.Close() 350 | d.writeFile = nil 351 | return err 352 | } 353 | 354 | totalBytes := int64(4 + dataLen) 355 | d.writePos += totalBytes 356 | atomic.AddInt64(&d.depth, 1) 357 | 358 | if d.writePos > d.maxBytesPerFile { 359 | d.writeFileNum++ 360 | d.writePos = 0 361 | 362 | // sync every time we start writing to a new file 363 | err = d.sync() 364 | if err != nil { 365 | d.logf("ERROR: diskqueue(%s) failed to sync - %s", d.name, err) 366 | } 367 | 368 | if d.writeFile != nil { 369 | d.writeFile.Close() 370 | d.writeFile = nil 371 | } 372 | } 373 | 374 | return err 375 | } 376 | 377 | // sync fsyncs the current writeFile and persists metadata 378 | func (d *diskQueue) sync() error { 379 | if d.writeFile != nil { 380 | err := d.writeFile.Sync() 381 | if err != nil { 382 | d.writeFile.Close() 383 | d.writeFile = nil 384 | return err 385 | } 386 | } 387 | 388 | err := d.persistMetaData() 389 | if err != nil { 390 | return err 391 | } 392 | 393 | d.needSync = false 394 | return nil 395 | } 396 | 397 | // retrieveMetaData initializes state from the filesystem 398 | func (d *diskQueue) retrieveMetaData() error { 399 | var f *os.File 400 | var err error 401 | 402 | fileName := d.metaDataFileName() 403 | f, err = os.OpenFile(fileName, os.O_RDONLY, 0600) 404 | if err != nil { 405 | return err 406 | } 407 | defer f.Close() 408 | 409 | var depth int64 410 | _, err = fmt.Fscanf(f, "%d\n%d,%d\n%d,%d\n", 411 | &depth, 412 | &d.readFileNum, &d.readPos, 413 | &d.writeFileNum, &d.writePos) 414 | if err != nil { 415 | return err 416 | } 417 | atomic.StoreInt64(&d.depth, depth) 418 | d.nextReadFileNum = d.readFileNum 419 | d.nextReadPos = d.readPos 420 | 421 | return nil 422 | } 423 | 424 | // persistMetaData atomically writes state to the filesystem 425 | func (d *diskQueue) persistMetaData() error { 426 | var f *os.File 427 | var err error 428 | 429 | fileName := d.metaDataFileName() 430 | tmpFileName := fmt.Sprintf("%s.%d.tmp", fileName, rand.Int()) 431 | 432 | // write to tmp file 433 | f, err = os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE, 0600) 434 | if err != nil { 435 | return err 436 | } 437 | 438 | _, err = fmt.Fprintf(f, "%d\n%d,%d\n%d,%d\n", 439 | atomic.LoadInt64(&d.depth), 440 | d.readFileNum, d.readPos, 441 | d.writeFileNum, d.writePos) 442 | if err != nil { 443 | f.Close() 444 | return err 445 | } 446 | f.Sync() 447 | f.Close() 448 | 449 | // atomically rename 450 | return atomicRename(tmpFileName, fileName) 451 | } 452 | 453 | func (d *diskQueue) metaDataFileName() string { 454 | return fmt.Sprintf(path.Join(d.dataPath, "%s.diskqueue.meta.dat"), d.name) 455 | } 456 | 457 | func (d *diskQueue) fileName(fileNum int64) string { 458 | return fmt.Sprintf(path.Join(d.dataPath, "%s.diskqueue.%06d.dat"), d.name, fileNum) 459 | } 460 | 461 | func (d *diskQueue) checkTailCorruption(depth int64) { 462 | if d.readFileNum < d.writeFileNum || d.readPos < d.writePos { 463 | return 464 | } 465 | 466 | // we've reached the end of the diskqueue 467 | // if depth isn't 0 something went wrong 468 | if depth != 0 { 469 | if depth < 0 { 470 | d.logf( 471 | "ERROR: diskqueue(%s) negative depth at tail (%d), metadata corruption, resetting 0...", 472 | d.name, depth) 473 | } else if depth > 0 { 474 | d.logf( 475 | "ERROR: diskqueue(%s) positive depth at tail (%d), data loss, resetting 0...", 476 | d.name, depth) 477 | } 478 | // force set depth 0 479 | atomic.StoreInt64(&d.depth, 0) 480 | d.needSync = true 481 | } 482 | 483 | if d.readFileNum != d.writeFileNum || d.readPos != d.writePos { 484 | if d.readFileNum > d.writeFileNum { 485 | d.logf( 486 | "ERROR: diskqueue(%s) readFileNum > writeFileNum (%d > %d), corruption, skipping to next writeFileNum and resetting 0...", 487 | d.name, d.readFileNum, d.writeFileNum) 488 | } 489 | 490 | if d.readPos > d.writePos { 491 | d.logf( 492 | "ERROR: diskqueue(%s) readPos > writePos (%d > %d), corruption, skipping to next writeFileNum and resetting 0...", 493 | d.name, d.readPos, d.writePos) 494 | } 495 | 496 | d.skipToNextRWFile() 497 | d.needSync = true 498 | } 499 | } 500 | 501 | func (d *diskQueue) moveForward() { 502 | oldReadFileNum := d.readFileNum 503 | d.readFileNum = d.nextReadFileNum 504 | d.readPos = d.nextReadPos 505 | depth := atomic.AddInt64(&d.depth, -1) 506 | 507 | // see if we need to clean up the old file 508 | if oldReadFileNum != d.nextReadFileNum { 509 | // sync every time we start reading from a new file 510 | d.needSync = true 511 | 512 | fn := d.fileName(oldReadFileNum) 513 | err := os.Remove(fn) 514 | if err != nil { 515 | d.logf("ERROR: failed to Remove(%s) - %s", fn, err) 516 | } 517 | } 518 | 519 | d.checkTailCorruption(depth) 520 | } 521 | 522 | func (d *diskQueue) handleReadError() { 523 | // jump to the next read file and rename the current (bad) file 524 | if d.readFileNum == d.writeFileNum { 525 | // if you can't properly read from the current write file it's safe to 526 | // assume that something is fucked and we should skip the current file too 527 | if d.writeFile != nil { 528 | d.writeFile.Close() 529 | d.writeFile = nil 530 | } 531 | d.writeFileNum++ 532 | d.writePos = 0 533 | } 534 | 535 | badFn := d.fileName(d.readFileNum) 536 | badRenameFn := badFn + ".bad" 537 | 538 | d.logf( 539 | "NOTICE: diskqueue(%s) jump to next file and saving bad file as %s", 540 | d.name, badRenameFn) 541 | 542 | err := atomicRename(badFn, badRenameFn) 543 | if err != nil { 544 | d.logf( 545 | "ERROR: diskqueue(%s) failed to rename bad diskqueue file %s to %s", 546 | d.name, badFn, badRenameFn) 547 | } 548 | 549 | d.readFileNum++ 550 | d.readPos = 0 551 | d.nextReadFileNum = d.readFileNum 552 | d.nextReadPos = 0 553 | 554 | // significant state change, schedule a sync on the next iteration 555 | d.needSync = true 556 | } 557 | 558 | // ioLoop provides the backend for exposing a go channel (via ReadChan()) 559 | // in support of multiple concurrent queue consumers 560 | // 561 | // it works by looping and branching based on whether or not the queue has data 562 | // to read and blocking until data is either read or written over the appropriate 563 | // go channels 564 | // 565 | // conveniently this also means that we're asynchronously reading from the filesystem 566 | func (d *diskQueue) ioLoop() { 567 | var dataRead []byte 568 | var err error 569 | var count int64 570 | var r chan []byte 571 | 572 | syncTicker := time.NewTicker(d.syncTimeout) 573 | 574 | for { 575 | // dont sync all the time :) 576 | if count == d.syncEvery { 577 | d.needSync = true 578 | } 579 | 580 | if d.needSync { 581 | err = d.sync() 582 | if err != nil { 583 | d.logf("ERROR: diskqueue(%s) failed to sync - %s", d.name, err) 584 | } 585 | count = 0 586 | } 587 | 588 | if (d.readFileNum < d.writeFileNum) || (d.readPos < d.writePos) { 589 | if d.nextReadPos == d.readPos { 590 | dataRead, err = d.readOne() 591 | if err != nil { 592 | d.logf("ERROR: reading from diskqueue(%s) at %d of %s - %s", 593 | d.name, d.readPos, d.fileName(d.readFileNum), err) 594 | d.handleReadError() 595 | continue 596 | } 597 | } 598 | r = d.readChan 599 | } else { 600 | r = nil 601 | } 602 | 603 | select { 604 | // the Go channel spec dictates that nil channel operations (read or write) 605 | // in a select are skipped, we set r to d.readChan only when there is data to read 606 | case r <- dataRead: 607 | count++ 608 | // moveForward sets needSync flag if a file is removed 609 | d.moveForward() 610 | case <-d.emptyChan: 611 | d.emptyResponseChan <- d.deleteAllFiles() 612 | count = 0 613 | case dataWrite := <-d.writeChan: 614 | count++ 615 | d.writeResponseChan <- d.writeOne(dataWrite) 616 | case <-syncTicker.C: 617 | if count == 0 { 618 | // avoid sync when there's no activity 619 | continue 620 | } 621 | d.needSync = true 622 | case <-d.exitChan: 623 | goto exit 624 | } 625 | } 626 | 627 | exit: 628 | d.logf("DISKQUEUE(%s): closing ... ioLoop", d.name) 629 | syncTicker.Stop() 630 | d.exitSyncChan <- 1 631 | } 632 | --------------------------------------------------------------------------------