├── .gitignore ├── segment ├── pkg.go └── v1 │ ├── segment_writer.go │ ├── segment_reader.go │ ├── segment_writer_test.go │ ├── segment_reader_test.go │ ├── segment.go │ └── segment_test.go ├── .github ├── dependabot.yml └── workflows │ └── go.yml ├── go.mod ├── segment_header_test.go ├── segment_header.go ├── example └── main.go ├── pkg.go ├── utils_test.go ├── common └── pkg.go ├── utils.go ├── go.sum ├── entry ├── entry.go └── entry_test.go ├── queue_bench_test.go ├── queue.go ├── README.md ├── queue_test.go └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | -------------------------------------------------------------------------------- /segment/pkg.go: -------------------------------------------------------------------------------- 1 | package segment 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/linxGnu/pqueue/common" 7 | "github.com/linxGnu/pqueue/entry" 8 | ) 9 | 10 | // Segment interface. 11 | type Segment interface { 12 | io.Closer 13 | Reading(io.ReadSeekCloser) (int, error) 14 | ReadEntry(*entry.Entry) (common.ErrCode, int, error) 15 | WriteEntry(entry.Entry) (common.ErrCode, error) 16 | WriteBatch(entry.Batch) (common.ErrCode, error) 17 | SeekToRead(int64) error 18 | } 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/linxGnu/pqueue 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/grandecola/bigqueue v0.5.0 7 | github.com/hashicorp/go-multierror v1.1.1 8 | github.com/stretchr/testify v1.9.0 9 | ) 10 | 11 | require ( 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/grandecola/mmap v0.6.0 // indirect 14 | github.com/hashicorp/errwrap v1.1.0 // indirect 15 | github.com/kr/pretty v0.2.0 // indirect 16 | github.com/pmezard/go-difflib v1.0.0 // indirect 17 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 18 | gopkg.in/yaml.v3 v3.0.1 // indirect 19 | ) 20 | -------------------------------------------------------------------------------- /segment_header_test.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestSegmentHeadRW(t *testing.T) { 11 | var sh segmentHeader 12 | 13 | t.Run("Happy", func(t *testing.T) { 14 | buf := bytes.NewBuffer([]byte{}) 15 | require.NoError(t, sh.WriteHeader(&mockWriterErr{buf: buf}, 123)) 16 | require.Equal(t, []byte{0, 0, 0, 123}, buf.Bytes()) 17 | }) 18 | 19 | t.Run("Error", func(t *testing.T) { 20 | buf := bytes.NewBuffer([]byte{}) 21 | require.Error(t, sh.WriteHeader(&mockWriterErr{buf: buf, onWrite: true}, 123)) 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /segment_header.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/linxGnu/pqueue/common" 7 | ) 8 | 9 | type segmentHeadWriter interface { 10 | WriteHeader(io.WriteCloser, common.SegmentFormat) error 11 | ReadHeader(io.ReadCloser) (common.SegmentFormat, error) 12 | } 13 | 14 | type segmentHeader struct{} 15 | 16 | func (s *segmentHeader) WriteHeader(w io.WriteCloser, format common.SegmentFormat) (err error) { 17 | var buf [4]byte 18 | common.Endianese.PutUint32(buf[:], format) 19 | if _, err = w.Write(buf[:]); err != nil { 20 | _ = w.Close() 21 | } 22 | return 23 | } 24 | 25 | func (s *segmentHeader) ReadHeader(r io.ReadCloser) (format common.SegmentFormat, err error) { 26 | // read segment header 27 | var buf [4]byte 28 | if _, err = io.ReadFull(r, buf[:]); err != nil { 29 | _ = r.Close() 30 | } else { 31 | format = common.Endianese.Uint32(buf[:]) 32 | } 33 | return 34 | } 35 | -------------------------------------------------------------------------------- /example/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/linxGnu/pqueue" 8 | "github.com/linxGnu/pqueue/entry" 9 | ) 10 | 11 | func main() { 12 | // 1000 entries per segment 13 | q, err := pqueue.New("/tmp", 1000) // anywhere you want, instead of /tmp 14 | if err != nil { 15 | log.Fatal(err) 16 | } 17 | defer func() { 18 | _ = q.Close() // it's important to close the queue before exit 19 | }() 20 | 21 | // enqueue 22 | if err = q.Enqueue([]byte{1, 2, 3, 4}); err != nil { 23 | log.Fatal(err) 24 | } 25 | if err = q.Enqueue([]byte{5, 6, 7, 8}); err != nil { 26 | log.Fatal(err) 27 | } 28 | 29 | // peek 30 | var v entry.Entry 31 | if hasItem := q.Peek(&v); hasItem { 32 | fmt.Println(v) // print: [1 2 3 4] 33 | } 34 | 35 | // dequeue 36 | if hasItem := q.Dequeue(&v); hasItem { 37 | fmt.Println(v) // print: [1 2 3 4] 38 | } 39 | if hasItem := q.Dequeue(&v); hasItem { 40 | fmt.Println(v) // print: [5 6 7 8] 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /pkg.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/linxGnu/pqueue/common" 7 | "github.com/linxGnu/pqueue/entry" 8 | ) 9 | 10 | const ( 11 | // DefaultMaxEntriesPerSegment is default value for max entries per segment. 12 | DefaultMaxEntriesPerSegment = 1000 13 | ) 14 | 15 | // QueueSettings are settings for queue. 16 | type QueueSettings struct { 17 | DataDir string 18 | SegmentFormat common.SegmentFormat 19 | EntryFormat common.EntryFormat 20 | MaxEntriesPerSegment uint32 21 | } 22 | 23 | // Queue interface. 24 | type Queue interface { 25 | io.Closer 26 | Enqueue(entry.Entry) error 27 | EnqueueBatch(entry.Batch) error 28 | Dequeue(*entry.Entry) bool 29 | Peek(*entry.Entry) bool 30 | } 31 | 32 | // New queue from directory. 33 | func New(dataDir string, maxEntriesPerSegment uint32) (Queue, error) { 34 | return load(QueueSettings{ 35 | DataDir: dataDir, 36 | MaxEntriesPerSegment: maxEntriesPerSegment, 37 | SegmentFormat: common.SegmentV1, 38 | EntryFormat: common.EntryV1, 39 | }, &segmentHeader{}) 40 | } 41 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Set up Go 1.x 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ^1.16 18 | id: go 19 | 20 | - name: Check out code into the Go module directory 21 | uses: actions/checkout@v2 22 | 23 | - name: Get dependencies 24 | run: | 25 | go get -v -t -d ./... 26 | if [ -f Gopkg.toml ]; then 27 | curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh 28 | dep ensure 29 | fi 30 | 31 | - name: Linter 32 | uses: golangci/golangci-lint-action@v2 33 | with: 34 | version: latest 35 | 36 | - name: Test Coverage 37 | run: go test -v -race -count=1 -coverprofile=coverage.out ./... 38 | 39 | - name: Convert coverage to lcov 40 | uses: jandelgado/gcov2lcov-action@v1.0.4 41 | with: 42 | infile: coverage.out 43 | outfile: coverage.lcov 44 | 45 | - name: Coveralls 46 | uses: coverallsapp/github-action@v1.1.0 47 | with: 48 | github-token: ${{ secrets.github_token }} 49 | path-to-lcov: coverage.lcov 50 | -------------------------------------------------------------------------------- /segment/v1/segment_writer.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | 7 | "github.com/linxGnu/pqueue/common" 8 | "github.com/linxGnu/pqueue/entry" 9 | 10 | "github.com/hashicorp/go-multierror" 11 | ) 12 | 13 | var segmentEnding = []byte{0, 0, 0, 0, 0, 0, 0, 0} 14 | 15 | type segmentWriter struct { 16 | w *bufio.Writer 17 | underlying io.WriteCloser 18 | entryFormat common.EntryFormat 19 | } 20 | 21 | func newSegmentWriter(w io.WriteCloser, entryFormat common.EntryFormat) *segmentWriter { 22 | return &segmentWriter{ 23 | w: bufio.NewWriter(w), 24 | underlying: w, 25 | entryFormat: entryFormat, 26 | } 27 | } 28 | 29 | func (s *segmentWriter) Close() (err error) { 30 | _, err = s.w.Write(segmentEnding) 31 | err = multierror.Append(err, s.w.Flush(), s.underlying.Close()).ErrorOrNil() 32 | return 33 | } 34 | 35 | // WriteEntry to underlying writer. 36 | func (s *segmentWriter) WriteEntry(e entry.Entry) (common.ErrCode, error) { 37 | _, err := e.Marshal(s.w, s.entryFormat) 38 | if err == nil { 39 | err = s.w.Flush() 40 | } 41 | if err == nil { 42 | return common.NoError, nil 43 | } 44 | return common.SegmentCorrupted, err 45 | } 46 | 47 | // WriteEntry to underlying writer. 48 | func (s *segmentWriter) WriteBatch(b entry.Batch) (common.ErrCode, error) { 49 | _, err := b.Marshal(s.w, s.entryFormat) 50 | if err == nil { 51 | err = s.w.Flush() 52 | } 53 | if err == nil { 54 | return common.NoError, nil 55 | } 56 | return common.SegmentCorrupted, err 57 | } 58 | -------------------------------------------------------------------------------- /utils_test.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/linxGnu/pqueue/common" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestLoadInfos(t *testing.T) { 14 | t.Run("Invalid", func(t *testing.T) { 15 | _, err := loadFileInfos("/abc", nil) 16 | require.Error(t, err) 17 | }) 18 | 19 | t.Run("Happy", func(t *testing.T) { 20 | f1, err := os.CreateTemp(tmpDir, "seg_") 21 | require.NoError(t, err) 22 | require.NoError(t, f1.Close()) 23 | 24 | f2, err := os.CreateTemp(tmpDir, "seg_") 25 | require.NoError(t, err) 26 | require.NoError(t, f2.Close()) 27 | 28 | _, err = loadFileInfos(tmpDir, func(os.DirEntry) (os.FileInfo, error) { 29 | return nil, fmt.Errorf("fake error") 30 | }) 31 | require.Error(t, err) 32 | 33 | files, err := loadFileInfos(tmpDir, fileInfoExtractor) 34 | require.NoError(t, err) 35 | for i := range files { 36 | _ = os.Remove(files[i].path) 37 | } 38 | }) 39 | } 40 | 41 | func TestLoading(t *testing.T) { 42 | t.Run("Error", func(t *testing.T) { 43 | _, err := load(QueueSettings{ 44 | DataDir: "/abc", 45 | }, nil) 46 | require.Error(t, err) 47 | 48 | _, err = load(QueueSettings{ 49 | DataDir: tmpDir, 50 | EntryFormat: 123, 51 | }, &segmentHeader{}) 52 | require.Error(t, err) 53 | 54 | _, err = load(QueueSettings{ 55 | DataDir: tmpDir, 56 | SegmentFormat: 123, 57 | }, &segmentHeader{}) 58 | require.Error(t, err) 59 | }) 60 | 61 | t.Run("OK", func(t *testing.T) { 62 | q, err := load(QueueSettings{ 63 | DataDir: tmpDir, 64 | SegmentFormat: common.SegmentV1, 65 | EntryFormat: common.EntryV1, 66 | }, &segmentHeader{}) 67 | require.NoError(t, err) 68 | for q.segments.Len() > 0 { 69 | front := q.segments.Front() 70 | _ = os.Remove(front.Value.(*segment).path) 71 | q.segments.Remove(front) 72 | } 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /segment/v1/segment_reader.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | 7 | "github.com/linxGnu/pqueue/common" 8 | "github.com/linxGnu/pqueue/entry" 9 | ) 10 | 11 | const ( 12 | bufferingSize = 16 << 10 // 16KB 13 | ) 14 | 15 | type bufferReader struct { 16 | *bufio.Reader 17 | r io.ReadSeekCloser // underlying reader (i.e *os.File) 18 | } 19 | 20 | func newBufferReader(r io.ReadSeekCloser) *bufferReader { 21 | return &bufferReader{ 22 | Reader: bufio.NewReaderSize(r, bufferingSize), 23 | r: r, 24 | } 25 | } 26 | 27 | func (r *bufferReader) Seek(offset int64, whence int) (int64, error) { 28 | ret, err := r.r.Seek(offset, whence) 29 | if err == nil { 30 | r.Reader.Reset(r.r) 31 | } 32 | return ret, err 33 | } 34 | 35 | func (r *bufferReader) Close() error { 36 | return r.r.Close() 37 | } 38 | 39 | type segmentReader struct { 40 | r io.ReadSeekCloser 41 | entryFormat common.EntryFormat 42 | } 43 | 44 | func newSegmentReader(r io.ReadSeekCloser, entryFormat common.EntryFormat) *segmentReader { 45 | return &segmentReader{r: r, entryFormat: entryFormat} 46 | } 47 | 48 | func (s *segmentReader) Seek(offset int64, whence int) (int64, error) { 49 | return s.r.Seek(offset, whence) 50 | } 51 | 52 | func (s *segmentReader) Close() error { 53 | return s.r.Close() 54 | } 55 | 56 | // ReadEntry into destination. 57 | func (s *segmentReader) ReadEntry(dst *entry.Entry) (common.ErrCode, int, error) { 58 | code, n, err := dst.Unmarshal(s.r, s.entryFormat) 59 | switch code { 60 | case common.NoError: 61 | return common.NoError, n, nil 62 | 63 | case common.EntryNoMore: 64 | return common.SegmentNoMoreReadWeak, 0, nil 65 | 66 | case common.EntryZeroSize: 67 | return common.SegmentNoMoreReadStrong, 0, nil 68 | 69 | case common.EntryTooBig: 70 | return common.SegmentCorrupted, n, common.ErrEntryTooBig 71 | 72 | default: 73 | return common.SegmentCorrupted, n, err 74 | } 75 | } 76 | 77 | func (s *segmentReader) SeekToRead(offset int64) (err error) { 78 | _, err = s.r.Seek(offset, 0) 79 | return err 80 | } 81 | -------------------------------------------------------------------------------- /common/pkg.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "math" 7 | ) 8 | 9 | const ( 10 | // MaxEntrySize indicates max size for Entry. 11 | MaxEntrySize = math.MaxInt32 >> 1 12 | ) 13 | 14 | // EntryFormat layout. 15 | type EntryFormat = uint32 16 | 17 | const ( 18 | // EntryV1 layout: 19 | // 20 | // [Length - uint32][Checksum - uint32][Payload - bytes] 21 | // 22 | // Note: 23 | // - `Payload` has size of `Length` 24 | // - `Checksum` is crc32_IEEE(`Payload`) 25 | // - `Entry` always starts with non-zero `Length` header 26 | // - `Length` == 0 means ending, Payload won't be written in this case. 27 | EntryV1 EntryFormat = iota 28 | ) 29 | 30 | var ( 31 | // ErrEntryTooBig indicates entry size is bigger than 1GB. 32 | ErrEntryTooBig = fmt.Errorf("entry size is bigger than limitation of 1GB") 33 | 34 | // ErrEntryUnsupportedFormat indicates unsupported format for entry on disk. 35 | ErrEntryUnsupportedFormat = fmt.Errorf("unsupported entry format") 36 | 37 | // ErrEntryInvalidCheckSum indicates entry invalid checksum. 38 | ErrEntryInvalidCheckSum = fmt.Errorf("invalid checksum") 39 | ) 40 | 41 | // SegmentFormat layout 42 | type SegmentFormat = uint32 43 | 44 | const ( 45 | // SegmentV1 layout: 46 | // 47 | // [Segment Format - uin32][Entry Format - uint32][Entries] 48 | SegmentV1 SegmentFormat = iota 49 | ) 50 | 51 | var ( 52 | // ErrSegmentUnsupportedFormat indicates invalid segment format. 53 | ErrSegmentUnsupportedFormat = fmt.Errorf("unsupported segment format") 54 | ) 55 | 56 | var ( 57 | // Endianese for all. 58 | Endianese = binary.BigEndian 59 | ) 60 | 61 | // ErrCode for internal use. 62 | type ErrCode int 63 | 64 | // Codes for internal user. 65 | const ( 66 | NoError ErrCode = iota 67 | 68 | EntryCorrupted 69 | EntryZeroSize 70 | EntryTooBig 71 | EntryUnsupportedFormat 72 | EntryWriteErr 73 | EntryNoMore 74 | 75 | SegmentNoMoreReadWeak 76 | SegmentNoMoreReadStrong 77 | SegmentNoMoreWrite 78 | SegmentCorrupted 79 | ) 80 | 81 | var ( 82 | // ErrQueueCorrupted indicates queue corrupted. 83 | ErrQueueCorrupted = fmt.Errorf("queue corrupted") 84 | ) 85 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "container/list" 5 | "fmt" 6 | "os" 7 | "path" 8 | "path/filepath" 9 | "sort" 10 | "strconv" 11 | "strings" 12 | "time" 13 | ) 14 | 15 | type file struct { 16 | modTime time.Time 17 | path string 18 | } 19 | 20 | func load(settings QueueSettings, segHeader segmentHeadWriter) (*queue, error) { 21 | if settings.MaxEntriesPerSegment <= 0 { 22 | settings.MaxEntriesPerSegment = DefaultMaxEntriesPerSegment 23 | } 24 | 25 | files, err := loadFileInfos(settings.DataDir, fileInfoExtractor) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | segments := list.New() 31 | for i := range files { 32 | segments.PushBack(&segment{ 33 | readable: false, 34 | path: files[i].path, 35 | }) 36 | } 37 | 38 | // create new segment for upcoming entries 39 | q := &queue{ 40 | settings: settings, 41 | segHeadWriter: segHeader, 42 | } 43 | 44 | seg, err := q.newSegment() 45 | if err != nil { 46 | return nil, err 47 | } 48 | segments.PushBack(seg) 49 | 50 | q.segments = segments 51 | return q, nil 52 | } 53 | 54 | func loadFileInfos(dir string, infoExtractor func(os.DirEntry) (os.FileInfo, error)) ([]file, error) { 55 | fileList, err := os.ReadDir(dir) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | files := make([]file, 0, len(fileList)) 61 | for i := range fileList { 62 | fileName := fileList[i].Name() 63 | 64 | if strings.HasPrefix(fileName, segPrefix) && 65 | !strings.HasSuffix(fileName, segOffsetFileSuffix) { 66 | // extract file info 67 | info, e := infoExtractor(fileList[i]) 68 | if e != nil { 69 | return nil, e 70 | } 71 | 72 | // add to list 73 | files = append(files, file{ 74 | path: filepath.Join(dir, fileList[i].Name()), 75 | modTime: info.ModTime(), 76 | }) 77 | } 78 | } 79 | 80 | sort.Slice(files, func(i, j int) bool { 81 | return files[i].modTime.Before(files[j].modTime) 82 | }) 83 | 84 | return files, nil 85 | } 86 | 87 | func fileInfoExtractor(f os.DirEntry) (os.FileInfo, error) { 88 | return f.Info() 89 | } 90 | 91 | func createFile(dir, prefix string) (f *os.File, err error) { 92 | prefix = path.Join(dir, prefix) 93 | 94 | for attempt := 0; attempt < 10_000; attempt++ { 95 | name := prefix + strconv.FormatInt(time.Now().UnixNano(), 10) 96 | 97 | f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o600) 98 | if !os.IsExist(err) { 99 | return 100 | } 101 | } 102 | 103 | err = fmt.Errorf("creating file but fail. path: %s", dir) 104 | return 105 | } 106 | -------------------------------------------------------------------------------- /segment/v1/segment_writer_test.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/linxGnu/pqueue/common" 9 | "github.com/linxGnu/pqueue/entry" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | type mockWriter struct { 15 | *bytes.Buffer 16 | } 17 | 18 | func (m *mockWriter) Sync() error { return nil } 19 | func (m *mockWriter) Close() error { return nil } 20 | 21 | type mockWriterErr struct { 22 | onWrite bool 23 | onSync bool 24 | onClose bool 25 | } 26 | 27 | func (m *mockWriterErr) Write([]byte) (int, error) { 28 | if m.onWrite { 29 | return 0, fmt.Errorf("fake error") 30 | } 31 | return 0, nil 32 | } 33 | func (m *mockWriterErr) Sync() error { 34 | if m.onSync { 35 | return fmt.Errorf("fake error") 36 | } 37 | return nil 38 | } 39 | func (m *mockWriterErr) Close() error { 40 | if m.onClose { 41 | return fmt.Errorf("fake error") 42 | } 43 | return nil 44 | } 45 | 46 | func TestSegmentWriter(t *testing.T) { 47 | t.Run("Happy", func(t *testing.T) { 48 | w := newSegmentWriter( 49 | &mockWriter{Buffer: bytes.NewBuffer(make([]byte, 0, 128))}, 50 | common.EntryV1) 51 | 52 | code, err := w.WriteEntry([]byte{1, 2, 3}) 53 | require.NoError(t, err) 54 | require.Equal(t, common.NoError, code) 55 | 56 | code, err = w.WriteEntry(nil) 57 | require.NoError(t, err) 58 | require.Equal(t, common.NoError, code) 59 | 60 | require.NoError(t, w.Close()) 61 | }) 62 | 63 | t.Run("HappyBatch", func(t *testing.T) { 64 | w := newSegmentWriter( 65 | &mockWriter{Buffer: bytes.NewBuffer(make([]byte, 0, 128))}, 66 | common.EntryV1) 67 | 68 | b := entry.NewBatch(2) 69 | b.Append([]byte{1, 2, 3}) 70 | 71 | code, err := w.WriteBatch(b) 72 | require.NoError(t, err) 73 | require.Equal(t, common.NoError, code) 74 | 75 | code, err = w.WriteBatch(entry.Batch{}) 76 | require.NoError(t, err) 77 | require.Equal(t, common.NoError, code) 78 | 79 | require.NoError(t, w.Close()) 80 | }) 81 | 82 | t.Run("CloseError", func(t *testing.T) { 83 | m := newSegmentWriter(&mockWriterErr{onWrite: true}, common.EntryV1) 84 | require.Error(t, m.Close()) 85 | }) 86 | 87 | t.Run("WriteError", func(t *testing.T) { 88 | w := newSegmentWriter(&mockWriterErr{onWrite: true}, common.EntryV1) 89 | 90 | code, err := w.WriteEntry([]byte{1, 2, 3}) 91 | require.Error(t, err) 92 | require.Equal(t, common.SegmentCorrupted, code) 93 | 94 | b := entry.NewBatch(2) 95 | b.Append([]byte{1, 2, 3}) 96 | 97 | code, err = w.WriteBatch(b) 98 | require.Error(t, err) 99 | require.Equal(t, common.SegmentCorrupted, code) 100 | }) 101 | } 102 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/grandecola/bigqueue v0.5.0 h1:Ln02FkusfWav4FDeIs+7D+VhnC1aBZBqZIUN7f6HRXo= 5 | github.com/grandecola/bigqueue v0.5.0/go.mod h1:2gzUVpbn5/aaGa9f45OWQvClbohutIIqztheSS5w4u8= 6 | github.com/grandecola/mmap v0.6.0 h1:dYrZWLay1rDlmlsGsSIoXGQ+JMu/t2ZnKt8vT1h+1o0= 7 | github.com/grandecola/mmap v0.6.0/go.mod h1:q5v9jpm393rcp5PXE6biArHKc2SWJBpXjfxSRtQMtNU= 8 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 9 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 10 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 11 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 12 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 13 | github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= 14 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 15 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 16 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 17 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 18 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 19 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 20 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 21 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 22 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 23 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 24 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 25 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 26 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 27 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 28 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 29 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 30 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 31 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 32 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 33 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 34 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 35 | -------------------------------------------------------------------------------- /segment/v1/segment_reader_test.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/linxGnu/pqueue/common" 8 | "github.com/linxGnu/pqueue/entry" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | type mockReadSeeker struct { 14 | *bytes.Buffer 15 | } 16 | 17 | func newMockReadSeeker(buf *bytes.Buffer) *mockReadSeeker { 18 | return &mockReadSeeker{Buffer: buf} 19 | } 20 | func (m *mockReadSeeker) Seek(int64, int) (int64, error) { return 0, nil } 21 | func (m *mockReadSeeker) Close() error { return nil } 22 | 23 | func TestSegmentReader(t *testing.T) { 24 | t.Run("Corrupted", func(t *testing.T) { 25 | { 26 | w := newSegmentReader(newMockReadSeeker(bytes.NewBuffer([]byte{1, 2})), common.EntryV1) 27 | 28 | var e entry.Entry 29 | code, n, err := w.ReadEntry(&e) 30 | require.Equal(t, common.SegmentCorrupted, code) 31 | require.Error(t, err) 32 | require.Equal(t, 2, n) 33 | } 34 | 35 | { 36 | w := newSegmentReader(newMockReadSeeker(bytes.NewBuffer([]byte{255, 255, 0, 0, 0, 0, 0, 0})), common.EntryV1) 37 | 38 | var e entry.Entry 39 | code, n, err := w.ReadEntry(&e) 40 | require.Equal(t, common.SegmentCorrupted, code) 41 | require.Equal(t, common.ErrEntryTooBig, err) 42 | require.Equal(t, 8, n) 43 | } 44 | 45 | { 46 | w := newSegmentReader(newMockReadSeeker(bytes.NewBuffer([]byte{0, 0, 0, 0, 0, 0, 0, 0})), common.EntryV1) 47 | 48 | var e entry.Entry 49 | code, n, err := w.ReadEntry(&e) 50 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 51 | require.NoError(t, err) 52 | require.Equal(t, 0, n) 53 | } 54 | 55 | { 56 | w := newSegmentReader(newMockReadSeeker(bytes.NewBuffer([]byte{})), common.EntryV1) 57 | 58 | var e entry.Entry 59 | code, n, err := w.ReadEntry(&e) 60 | require.Equal(t, common.SegmentNoMoreReadWeak, code) 61 | require.NoError(t, err) 62 | require.Equal(t, 0, n) 63 | } 64 | }) 65 | 66 | t.Run("Happy", func(t *testing.T) { 67 | underlying := newMockReadSeeker(bytes.NewBuffer([]byte{ 68 | 0, 0, 0, 2, 173, 62, 94, 152, 19, 31, 69 | 0, 0, 0, 2, 173, 62, 94, 152, 19, 31, 70 | })) 71 | w := newSegmentReader(newBufferReader(underlying), common.EntryV1) 72 | require.NoError(t, w.SeekToRead(0)) 73 | 74 | { 75 | var e entry.Entry 76 | code, n, err := w.ReadEntry(&e) 77 | require.Equal(t, common.NoError, code) 78 | require.NoError(t, err) 79 | require.EqualValues(t, []byte{19, 31}, e) 80 | require.Equal(t, 10, n) 81 | } 82 | 83 | { 84 | var e entry.Entry 85 | code, n, err := w.ReadEntry(&e) 86 | require.Equal(t, common.NoError, code) 87 | require.NoError(t, err) 88 | require.EqualValues(t, []byte{19, 31}, e) 89 | require.Equal(t, 10, n) 90 | 91 | } 92 | 93 | { 94 | var e entry.Entry 95 | code, n, err := w.ReadEntry(&e) 96 | require.Equal(t, common.SegmentNoMoreReadWeak, code) 97 | require.NoError(t, err) 98 | require.Equal(t, 0, n) 99 | } 100 | 101 | require.NoError(t, w.Close()) 102 | }) 103 | } 104 | -------------------------------------------------------------------------------- /entry/entry.go: -------------------------------------------------------------------------------- 1 | package entry 2 | 3 | import ( 4 | "errors" 5 | "hash/crc32" 6 | "io" 7 | 8 | "github.com/linxGnu/pqueue/common" 9 | ) 10 | 11 | // Reader interface. 12 | type Reader interface { 13 | io.Closer 14 | io.Seeker 15 | ReadEntry(*Entry) (common.ErrCode, int, error) 16 | } 17 | 18 | // Writer interface. 19 | type Writer interface { 20 | io.Closer 21 | WriteEntry(Entry) (common.ErrCode, error) 22 | WriteBatch(Batch) (common.ErrCode, error) 23 | } 24 | 25 | // Entry represents queue entry. 26 | type Entry []byte 27 | 28 | // Marshal writes entry to writer. 29 | func (e Entry) Marshal(w io.Writer, format common.EntryFormat) (code common.ErrCode, err error) { 30 | switch format { 31 | case common.EntryV1: 32 | return e.marshalV1(w) 33 | 34 | default: 35 | return common.EntryUnsupportedFormat, common.ErrEntryUnsupportedFormat 36 | } 37 | } 38 | 39 | // [Length - uint32][Checksum - uint32][Payload - bytes] 40 | func (e Entry) marshalV1(w io.Writer) (code common.ErrCode, err error) { 41 | var buf [8]byte 42 | common.Endianese.PutUint64(buf[:], uint64(len(e))<<32|uint64(crc32.ChecksumIEEE(e))) 43 | if _, err = w.Write(buf[:]); err == nil { 44 | _, err = w.Write(e) 45 | } 46 | 47 | if err != nil { 48 | code = common.EntryWriteErr 49 | } else { 50 | code = common.NoError 51 | } 52 | 53 | return 54 | } 55 | 56 | // Unmarshal from reader. 57 | func (e *Entry) Unmarshal(r io.Reader, format common.EntryFormat) (common.ErrCode, int, error) { 58 | switch format { 59 | case common.EntryV1: 60 | return e.unmarshalV1(r) 61 | 62 | default: 63 | return common.EntryUnsupportedFormat, 0, common.ErrEntryUnsupportedFormat 64 | } 65 | } 66 | 67 | // [Length - uint32][Checksum - uint32][Payload - bytes] 68 | func (e *Entry) unmarshalV1(r io.Reader) (code common.ErrCode, n int, err error) { 69 | var buffer [8]byte 70 | 71 | // read length 72 | n, err = io.ReadFull(r, buffer[:]) 73 | if errors.Is(err, io.EOF) { 74 | code, err = common.EntryNoMore, nil 75 | return 76 | } 77 | if err != nil { 78 | code = common.EntryCorrupted 79 | return 80 | } 81 | 82 | // check length 83 | sizeAndSum := common.Endianese.Uint64(buffer[:]) 84 | size := sizeAndSum >> 32 85 | if size == 0 { 86 | code = common.EntryZeroSize 87 | return 88 | } 89 | if size > common.MaxEntrySize { 90 | code = common.EntryTooBig 91 | return 92 | } 93 | 94 | // read payload 95 | data := e.alloc(int(size)) 96 | 97 | n_, err := io.ReadFull(r, data) 98 | n += n_ 99 | 100 | if err != nil { 101 | code = common.EntryCorrupted 102 | return 103 | } 104 | 105 | // checksum 106 | if crc32.ChecksumIEEE(data) != uint32(sizeAndSum) { // downcast to get lower 32-bit 107 | code, err = common.EntryCorrupted, common.ErrEntryInvalidCheckSum 108 | } else { 109 | *e = data 110 | code = common.NoError 111 | } 112 | 113 | return 114 | } 115 | 116 | // CloneFrom other entry. 117 | func (e *Entry) CloneFrom(other Entry) { 118 | data := e.alloc(len(other)) 119 | copy(data, other) 120 | *e = data 121 | } 122 | 123 | // alloc slice from entry if capable. If not, create new one. 124 | func (e *Entry) alloc(expected int) (data []byte) { 125 | data = *e 126 | if cap(data) >= expected { 127 | data = data[:expected] 128 | } else { 129 | data = make([]byte, expected) 130 | } 131 | return 132 | } 133 | 134 | // Batch of entries. 135 | type Batch struct { 136 | entries []Entry 137 | } 138 | 139 | // NewBatch with capacity. 140 | func NewBatch(capacity int) Batch { 141 | return Batch{ 142 | entries: make([]Entry, 0, capacity), 143 | } 144 | } 145 | 146 | // ValidateSize checks size of entries. 147 | func (b *Batch) ValidateSize(size int) bool { 148 | for _, e := range b.entries { 149 | if len(e) > size { 150 | return false 151 | } 152 | } 153 | return true 154 | } 155 | 156 | // Len is number of entries inside Batch. 157 | func (b *Batch) Len() int { 158 | return len(b.entries) 159 | } 160 | 161 | // Append an entry. 162 | func (b *Batch) Append(e Entry) { 163 | if len(e) > 0 { 164 | b.entries = append(b.entries, e) 165 | } 166 | } 167 | 168 | // Marshal into writer. 169 | func (b *Batch) Marshal(w io.Writer, format common.EntryFormat) (code common.ErrCode, err error) { 170 | if b.Len() > 0 { 171 | for _, e := range b.entries { 172 | if code, err = e.Marshal(w, format); err != nil { 173 | return code, err 174 | } 175 | } 176 | } 177 | return 178 | } 179 | 180 | // Reset batch. 181 | func (b *Batch) Reset() { 182 | if b.Len() > 0 { 183 | for i := range b.entries { 184 | b.entries[i] = nil 185 | } 186 | b.entries = b.entries[:0] 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /segment/v1/segment.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "io" 5 | "sync/atomic" 6 | 7 | "github.com/linxGnu/pqueue/common" 8 | "github.com/linxGnu/pqueue/entry" 9 | 10 | "github.com/hashicorp/go-multierror" 11 | ) 12 | 13 | // Segment represents a portion (segment) of a persistent queue. 14 | type Segment struct { 15 | readOnly bool 16 | 17 | entryFormat common.EntryFormat 18 | w entry.Writer 19 | 20 | offset uint32 21 | numEntries uint32 22 | maxEntries uint32 23 | r entry.Reader 24 | } 25 | 26 | // NewReadOnlySegment creates new Segment for readonly. 27 | func NewReadOnlySegment(source io.ReadSeekCloser) (*Segment, int, error) { 28 | // get entry format 29 | var buf [4]byte 30 | n, err := io.ReadFull(source, buf[:]) 31 | if err != nil { 32 | return nil, n, err 33 | } 34 | 35 | // check entry format 36 | entryFormat := common.Endianese.Uint32(buf[:]) 37 | switch entryFormat { 38 | case common.EntryV1: 39 | 40 | default: 41 | return nil, n, common.ErrEntryUnsupportedFormat 42 | } 43 | 44 | // ok now 45 | return &Segment{ 46 | readOnly: true, 47 | entryFormat: entryFormat, 48 | r: newSegmentReader(newBufferReader(source), entryFormat), 49 | }, n, nil 50 | } 51 | 52 | // NewSegment from path. 53 | func NewSegment(w io.WriteCloser, entryFormat common.EntryFormat, maxEntries uint32) (*Segment, error) { 54 | switch entryFormat { 55 | case common.EntryV1: 56 | 57 | default: 58 | return nil, common.ErrEntryUnsupportedFormat 59 | } 60 | 61 | // write header: [EntryFormat] 62 | var buf [4]byte 63 | common.Endianese.PutUint32(buf[:], uint32(entryFormat)) 64 | _, err := w.Write(buf[:]) 65 | if err != nil { 66 | _ = w.Close() 67 | return nil, err 68 | } 69 | 70 | // ok now 71 | return &Segment{ 72 | readOnly: false, 73 | entryFormat: entryFormat, 74 | maxEntries: maxEntries, 75 | w: newSegmentWriter(w, entryFormat), 76 | }, nil 77 | } 78 | 79 | // Close segment. 80 | func (s *Segment) Close() (err error) { 81 | if s == nil { 82 | return 83 | } 84 | if s.r != nil { 85 | err = s.r.Close() 86 | } 87 | if s.w != nil { 88 | err = multierror.Append(err, s.w.Close()).ErrorOrNil() 89 | } 90 | return 91 | } 92 | 93 | // Reading from source. 94 | func (s *Segment) Reading(source io.ReadSeekCloser) (n int, err error) { 95 | // should bypass entryFormat 96 | var dummy [4]byte 97 | n, err = io.ReadFull(source, dummy[:]) 98 | 99 | // no problem? 100 | if err == nil { 101 | s.r = newSegmentReader(newBufferReader(source), s.entryFormat) 102 | } 103 | 104 | return 105 | } 106 | 107 | // WriteEntry to segment. 108 | func (s *Segment) WriteEntry(e entry.Entry) (common.ErrCode, error) { 109 | // check entry size 110 | if len(e) == 0 { 111 | return common.NoError, nil 112 | } 113 | if len(e) > common.MaxEntrySize { 114 | return common.EntryTooBig, common.ErrEntryTooBig 115 | } 116 | 117 | return s.writeEntry(e) 118 | } 119 | 120 | func (s *Segment) writeEntry(e entry.Entry) (common.ErrCode, error) { 121 | if s.numEntries >= s.maxEntries { 122 | return common.SegmentNoMoreWrite, nil 123 | } 124 | 125 | code, err := s.w.WriteEntry(e) 126 | if (code == common.NoError && atomic.AddUint32(&s.numEntries, 1) >= s.maxEntries) || 127 | code == common.SegmentCorrupted { 128 | _ = s.w.Close() 129 | } 130 | 131 | return code, err 132 | } 133 | 134 | // WriteBatch to segment. 135 | func (s *Segment) WriteBatch(b entry.Batch) (common.ErrCode, error) { 136 | // check entry size 137 | if !b.ValidateSize(common.MaxEntrySize) { 138 | return common.EntryTooBig, common.ErrEntryTooBig 139 | } 140 | return s.writeBatch(b) 141 | } 142 | 143 | func (s *Segment) writeBatch(b entry.Batch) (common.ErrCode, error) { 144 | if s.numEntries >= s.maxEntries { 145 | return common.SegmentNoMoreWrite, nil 146 | } 147 | 148 | code, err := s.w.WriteBatch(b) 149 | if (code == common.NoError && atomic.AddUint32(&s.numEntries, uint32(b.Len())) >= s.maxEntries) || 150 | code == common.SegmentCorrupted { 151 | _ = s.w.Close() 152 | } 153 | 154 | return code, err 155 | } 156 | 157 | // ReadEntry from segment. 158 | func (s *Segment) ReadEntry(e *entry.Entry) (common.ErrCode, int, error) { 159 | if !s.readOnly { 160 | // readable? 161 | if s.offset == atomic.LoadUint32(&s.numEntries) { 162 | if s.offset >= s.maxEntries { 163 | _ = s.r.Close() 164 | return common.SegmentNoMoreReadStrong, 0, nil 165 | } 166 | 167 | return common.SegmentNoMoreReadWeak, 0, nil 168 | } 169 | 170 | s.offset++ 171 | } 172 | 173 | return s.readEntry(e) 174 | } 175 | 176 | func (s *Segment) readEntry(e *entry.Entry) (common.ErrCode, int, error) { 177 | code, n, err := s.r.ReadEntry(e) 178 | 179 | switch code { 180 | case common.NoError: 181 | return common.NoError, n, nil 182 | 183 | case common.SegmentNoMoreReadStrong: 184 | _ = s.r.Close() 185 | return common.SegmentNoMoreReadStrong, 0, nil 186 | 187 | case common.SegmentNoMoreReadWeak: 188 | if s.readOnly { 189 | _ = s.r.Close() 190 | return common.SegmentNoMoreReadStrong, 0, nil 191 | } 192 | return common.SegmentNoMoreReadWeak, 0, nil 193 | 194 | default: // corrupted 195 | _ = s.r.Close() 196 | return common.SegmentCorrupted, n, err 197 | } 198 | } 199 | 200 | // SeekToRead - offset from beginning of Segment. 201 | func (s *Segment) SeekToRead(offset int64) error { 202 | _, err := s.r.Seek(offset, 0) 203 | return err 204 | } 205 | -------------------------------------------------------------------------------- /entry/entry_test.go: -------------------------------------------------------------------------------- 1 | package entry 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "testing" 8 | 9 | "github.com/linxGnu/pqueue/common" 10 | 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestEntryUnmarshal(t *testing.T) { 15 | t.Run("UnsupportedFormat", func(t *testing.T) { 16 | var e Entry 17 | code, n, err := e.Unmarshal(nil, 123) 18 | require.Equal(t, common.EntryUnsupportedFormat, code) 19 | require.Equal(t, common.ErrEntryUnsupportedFormat, err) 20 | require.Equal(t, 0, n) 21 | }) 22 | 23 | t.Run("Corrupted", func(t *testing.T) { 24 | var e Entry 25 | 26 | // length missing 27 | { 28 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{1, 2}), common.EntryV1) 29 | require.Equal(t, common.EntryCorrupted, code) 30 | require.Error(t, err) 31 | require.Equal(t, 2, n) 32 | } 33 | 34 | // length zero 35 | { 36 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{0, 0, 0, 0, 0, 0, 0, 0}), common.EntryV1) 37 | require.Equal(t, common.EntryZeroSize, code) 38 | require.NoError(t, err) 39 | require.Equal(t, 8, n) 40 | } 41 | 42 | // too big 43 | { 44 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{255, 255, 0, 0, 0, 0, 0, 0}), common.EntryV1) 45 | require.Equal(t, common.EntryTooBig, code) 46 | require.NoError(t, err) 47 | require.Equal(t, 8, n) 48 | } 49 | 50 | // missing payload 51 | { 52 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{0, 0, 0, 2, 1}), common.EntryV1) 53 | require.Equal(t, common.EntryCorrupted, code) 54 | require.Error(t, err) 55 | require.Equal(t, 5, n) 56 | } 57 | 58 | // missing sum 59 | { 60 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{0, 0, 0, 2, 1, 1}), common.EntryV1) 61 | require.Equal(t, common.EntryCorrupted, code) 62 | require.Error(t, err) 63 | require.Equal(t, 6, n) 64 | } 65 | 66 | // invalid sum 67 | { 68 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{0, 0, 0, 2, 1, 1, 1, 2, 3, 4}), common.EntryV1) 69 | require.Equal(t, common.EntryCorrupted, code) 70 | require.Equal(t, common.ErrEntryInvalidCheckSum, err) 71 | require.Equal(t, 10, n) 72 | } 73 | 74 | // corrupt 75 | { 76 | code, n, err := e.Unmarshal(bytes.NewBuffer([]byte{0, 0, 0, 2, 1, 1, 1, 2, 3}), common.EntryV1) 77 | require.Equal(t, common.EntryCorrupted, code) 78 | require.Error(t, err) 79 | require.Equal(t, 9, n) 80 | } 81 | }) 82 | 83 | t.Run("Happy", func(t *testing.T) { 84 | var e Entry = make([]byte, 32) 85 | 86 | buf := bytes.NewBuffer([]byte{0, 0, 0, 2, 173, 62, 94, 152, 19, 31}) 87 | 88 | code, n, err := e.Unmarshal(buf, common.EntryV1) 89 | require.Equal(t, common.NoError, code) 90 | require.NoError(t, err) 91 | require.Equal(t, 10, n) 92 | 93 | code, n, err = e.Unmarshal(buf, common.EntryV1) 94 | require.Equal(t, common.EntryNoMore, code) 95 | require.NoError(t, err) 96 | require.Equal(t, 0, n) 97 | }) 98 | 99 | t.Run("Clone", func(t *testing.T) { 100 | var e1 Entry = make([]byte, 16, 32) 101 | var e2 Entry 102 | e2.CloneFrom(e1) 103 | 104 | require.True(t, &e1[0] != &e2[0]) 105 | require.EqualValues(t, e1, e2) 106 | 107 | origin := &e2[0] 108 | e1[0] = 12 109 | e2.CloneFrom(e1) 110 | require.True(t, &e1[0] != &e2[0]) 111 | require.EqualValues(t, e1, e2) 112 | require.True(t, &e2[0] == origin) 113 | 114 | e1 = e1[:31] 115 | e2.CloneFrom(e1) 116 | require.True(t, &e1[0] != &e2[0]) 117 | require.EqualValues(t, e1, e2) 118 | require.True(t, &e2[0] != origin) 119 | }) 120 | } 121 | 122 | type errorWriter struct{} 123 | 124 | func (e *errorWriter) Write([]byte) (int, error) { return 0, fmt.Errorf("fake error") } 125 | 126 | func (e *errorWriter) Flush() error { return nil } 127 | 128 | type noopFlusher struct{ io.Writer } 129 | 130 | func (f *noopFlusher) Flush() error { return nil } 131 | 132 | func TestEntryMarshal(t *testing.T) { 133 | t.Run("UnsupportedFormat", func(t *testing.T) { 134 | var e Entry = []byte{1, 2, 3, 4} 135 | 136 | code, err := e.Marshal(nil, 123) 137 | require.Equal(t, common.ErrEntryUnsupportedFormat, err) 138 | require.Equal(t, common.EntryUnsupportedFormat, code) 139 | }) 140 | 141 | t.Run("ErrorHandling", func(t *testing.T) { 142 | var e Entry = []byte{1, 2, 3, 4} 143 | 144 | code, err := e.Marshal(&errorWriter{}, common.EntryV1) 145 | require.Equal(t, common.EntryWriteErr, code) 146 | require.Error(t, err) 147 | 148 | batch := NewBatch(2) 149 | batch.Append(e) 150 | 151 | code, err = batch.Marshal(&errorWriter{}, common.EntryV1) 152 | require.Equal(t, common.EntryWriteErr, code) 153 | require.Error(t, err) 154 | }) 155 | 156 | t.Run("Happy", func(t *testing.T) { 157 | var e Entry = []byte{1, 2, 3, 4} 158 | var buf bytes.Buffer 159 | 160 | code, err := e.Marshal(&noopFlusher{Writer: &buf}, common.EntryV1) 161 | require.NoError(t, err) 162 | require.Equal(t, code, common.NoError) 163 | require.EqualValues(t, []byte{0, 0, 0, 4, 0xb6, 0x3c, 0xfb, 0xcd, 1, 2, 3, 4}, buf.Bytes()) 164 | }) 165 | 166 | t.Run("HappyBatch", func(t *testing.T) { 167 | batch := NewBatch(2) 168 | batch.Append([]byte{1, 2, 3, 4}) 169 | batch.Append([]byte{1, 2, 3, 4}) 170 | require.Equal(t, 2, batch.Len()) 171 | require.True(t, batch.ValidateSize(4)) 172 | require.False(t, batch.ValidateSize(3)) 173 | 174 | var buf bytes.Buffer 175 | 176 | code, err := batch.Marshal(&noopFlusher{Writer: &buf}, common.EntryV1) 177 | require.NoError(t, err) 178 | require.Equal(t, code, common.NoError) 179 | require.EqualValues(t, []byte{ 180 | 0, 0, 0, 4, 0xb6, 0x3c, 0xfb, 0xcd, 1, 2, 3, 4, 181 | 0, 0, 0, 4, 0xb6, 0x3c, 0xfb, 0xcd, 1, 2, 3, 4, 182 | }, buf.Bytes()) 183 | }) 184 | } 185 | 186 | func TestEntry(t *testing.T) { 187 | var e Entry = make([]byte, 123) 188 | for i := range e { 189 | e[i] = byte(i) 190 | } 191 | 192 | var buf bytes.Buffer 193 | 194 | code, err := e.Marshal(&noopFlusher{Writer: &buf}, common.EntryV1) 195 | require.NoError(t, err) 196 | require.Equal(t, common.NoError, code) 197 | 198 | var tmp Entry 199 | code, n, err := tmp.Unmarshal(&buf, common.EntryV1) 200 | require.NoError(t, err) 201 | require.Equal(t, common.NoError, code) 202 | require.Equal(t, 131, n) 203 | 204 | require.EqualValues(t, e, tmp) 205 | } 206 | 207 | func TestBatch(t *testing.T) { 208 | b := NewBatch(2) 209 | 210 | b.Append([]byte{1, 2, 3}) 211 | require.Equal(t, 1, b.Len()) 212 | 213 | b.Reset() 214 | require.Equal(t, 0, b.Len()) 215 | } 216 | -------------------------------------------------------------------------------- /queue_bench_test.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "runtime" 7 | "sync" 8 | "sync/atomic" 9 | "testing" 10 | 11 | "github.com/linxGnu/pqueue/common" 12 | "github.com/linxGnu/pqueue/entry" 13 | 14 | "github.com/grandecola/bigqueue" 15 | ) 16 | 17 | const ( 18 | totalEntries = 10000 19 | totalEntriesForRW = 10000 20 | numReader = 1 21 | flushOps = 10 22 | ) 23 | 24 | func BenchmarkPQueueWriting_16(b *testing.B) { 25 | b.ReportAllocs() 26 | b.SetBytes(16 * totalEntries) 27 | b.ResetTimer() 28 | for i := 0; i < b.N; i++ { 29 | benchmarkPQueue(b, totalEntries, 16, false) 30 | } 31 | } 32 | 33 | func BenchmarkBigQueueWriting_16(b *testing.B) { 34 | b.ReportAllocs() 35 | b.SetBytes(16 * totalEntries) 36 | b.ResetTimer() 37 | for i := 0; i < b.N; i++ { 38 | benchmarkBigQueue(b, totalEntries, 16, false) 39 | } 40 | } 41 | 42 | func BenchmarkPQueueWriting_64(b *testing.B) { 43 | b.ReportAllocs() 44 | b.SetBytes(64 * totalEntries) 45 | b.ResetTimer() 46 | for i := 0; i < b.N; i++ { 47 | benchmarkPQueue(b, totalEntries, 64, false) 48 | } 49 | } 50 | 51 | func BenchmarkBigQueueWriting_64(b *testing.B) { 52 | b.ReportAllocs() 53 | b.SetBytes(64 * totalEntries) 54 | b.ResetTimer() 55 | for i := 0; i < b.N; i++ { 56 | benchmarkBigQueue(b, totalEntries, 64, false) 57 | } 58 | } 59 | 60 | func BenchmarkPQueueWriting_256(b *testing.B) { 61 | b.ReportAllocs() 62 | b.SetBytes(256 * totalEntries) 63 | b.ResetTimer() 64 | for i := 0; i < b.N; i++ { 65 | benchmarkPQueue(b, totalEntries, 256, false) 66 | } 67 | } 68 | 69 | func BenchmarkBigQueueWriting_256(b *testing.B) { 70 | b.ReportAllocs() 71 | b.SetBytes(256 * totalEntries) 72 | b.ResetTimer() 73 | for i := 0; i < b.N; i++ { 74 | benchmarkBigQueue(b, totalEntries, 256, false) 75 | } 76 | } 77 | 78 | func BenchmarkPQueueWriting_2048(b *testing.B) { 79 | b.ReportAllocs() 80 | b.SetBytes(2048 * totalEntries) 81 | b.ResetTimer() 82 | for i := 0; i < b.N; i++ { 83 | benchmarkPQueue(b, totalEntries, 2048, false) 84 | } 85 | } 86 | 87 | func BenchmarkBigQueueWriting_2048(b *testing.B) { 88 | b.ReportAllocs() 89 | b.SetBytes(2048 * totalEntries) 90 | b.ResetTimer() 91 | for i := 0; i < b.N; i++ { 92 | benchmarkBigQueue(b, totalEntries, 2048, false) 93 | } 94 | } 95 | 96 | func BenchmarkPQueueWriting_16K(b *testing.B) { 97 | b.ReportAllocs() 98 | b.SetBytes((16 << 10) * totalEntries) 99 | b.ResetTimer() 100 | for i := 0; i < b.N; i++ { 101 | benchmarkPQueue(b, totalEntries, 16<<10, false) 102 | } 103 | } 104 | 105 | func BenchmarkBigQueueWriting_16K(b *testing.B) { 106 | b.ReportAllocs() 107 | b.SetBytes((16 << 10) * totalEntries) 108 | b.ResetTimer() 109 | for i := 0; i < b.N; i++ { 110 | benchmarkBigQueue(b, totalEntries, 16<<10, false) 111 | } 112 | } 113 | 114 | func BenchmarkPQueueWriting_64K(b *testing.B) { 115 | b.ReportAllocs() 116 | b.SetBytes((64 << 10) * totalEntries) 117 | b.ResetTimer() 118 | for i := 0; i < b.N; i++ { 119 | benchmarkPQueue(b, totalEntries, 64<<10, false) 120 | } 121 | } 122 | 123 | func BenchmarkBigQueueWriting_64K(b *testing.B) { 124 | b.ReportAllocs() 125 | b.SetBytes((64 << 10) * totalEntries) 126 | b.ResetTimer() 127 | for i := 0; i < b.N; i++ { 128 | benchmarkBigQueue(b, totalEntries, 64<<10, false) 129 | } 130 | } 131 | 132 | func BenchmarkPQueueRW_16(b *testing.B) { 133 | b.ReportAllocs() 134 | b.SetBytes(16 * totalEntriesForRW) 135 | b.ResetTimer() 136 | for i := 0; i < b.N; i++ { 137 | benchmarkPQueue(b, totalEntriesForRW, 16, true) 138 | } 139 | } 140 | 141 | func BenchmarkBigQueueRW_16(b *testing.B) { 142 | b.ReportAllocs() 143 | b.SetBytes(16 * totalEntriesForRW) 144 | b.ResetTimer() 145 | for i := 0; i < b.N; i++ { 146 | benchmarkBigQueue(b, totalEntriesForRW, 16, true) 147 | } 148 | } 149 | 150 | func BenchmarkPQueueRW_64(b *testing.B) { 151 | b.ReportAllocs() 152 | b.SetBytes(64 * totalEntriesForRW) 153 | b.ResetTimer() 154 | for i := 0; i < b.N; i++ { 155 | benchmarkPQueue(b, totalEntriesForRW, 64, true) 156 | } 157 | } 158 | 159 | func BenchmarkBigQueueRW_64(b *testing.B) { 160 | b.ReportAllocs() 161 | b.SetBytes(64 * totalEntriesForRW) 162 | b.ResetTimer() 163 | for i := 0; i < b.N; i++ { 164 | benchmarkBigQueue(b, totalEntriesForRW, 64, true) 165 | } 166 | } 167 | 168 | func BenchmarkPQueueRW_256(b *testing.B) { 169 | b.ReportAllocs() 170 | b.SetBytes(256 * totalEntriesForRW) 171 | b.ResetTimer() 172 | for i := 0; i < b.N; i++ { 173 | benchmarkPQueue(b, totalEntriesForRW, 256, true) 174 | } 175 | } 176 | 177 | func BenchmarkBigQueueRW_256(b *testing.B) { 178 | b.ReportAllocs() 179 | b.SetBytes(256 * totalEntriesForRW) 180 | b.ResetTimer() 181 | for i := 0; i < b.N; i++ { 182 | benchmarkBigQueue(b, totalEntries, 256, true) 183 | } 184 | } 185 | 186 | func BenchmarkPQueueRW_2048(b *testing.B) { 187 | b.ReportAllocs() 188 | b.SetBytes(2048 * totalEntriesForRW) 189 | b.ResetTimer() 190 | for i := 0; i < b.N; i++ { 191 | benchmarkPQueue(b, totalEntriesForRW, 2048, true) 192 | } 193 | } 194 | 195 | func BenchmarkBigQueueRW_2048(b *testing.B) { 196 | b.ReportAllocs() 197 | b.SetBytes(2048 * totalEntriesForRW) 198 | b.ResetTimer() 199 | for i := 0; i < b.N; i++ { 200 | benchmarkBigQueue(b, totalEntries, 2048, true) 201 | } 202 | } 203 | 204 | func BenchmarkPQueueRW_16K(b *testing.B) { 205 | b.ReportAllocs() 206 | b.SetBytes((16 << 10) * totalEntriesForRW) 207 | b.ResetTimer() 208 | for i := 0; i < b.N; i++ { 209 | benchmarkPQueue(b, totalEntriesForRW, 16<<10, true) 210 | } 211 | } 212 | 213 | func BenchmarkBigQueueRW_16K(b *testing.B) { 214 | b.ReportAllocs() 215 | b.SetBytes((16 << 10) * totalEntriesForRW) 216 | b.ResetTimer() 217 | for i := 0; i < b.N; i++ { 218 | benchmarkBigQueue(b, totalEntries, 16<<10, true) 219 | } 220 | } 221 | 222 | func BenchmarkPQueueRW_64K(b *testing.B) { 223 | b.ReportAllocs() 224 | b.SetBytes((64 << 10) * totalEntriesForRW) 225 | b.ResetTimer() 226 | for i := 0; i < b.N; i++ { 227 | benchmarkPQueue(b, totalEntriesForRW, 64<<10, true) 228 | } 229 | } 230 | 231 | func BenchmarkBigQueueRW_64K(b *testing.B) { 232 | b.ReportAllocs() 233 | b.SetBytes((64 << 10) * totalEntriesForRW) 234 | b.ResetTimer() 235 | for i := 0; i < b.N; i++ { 236 | benchmarkBigQueue(b, totalEntries, 64<<10, true) 237 | } 238 | } 239 | 240 | func prepareDataDir(dir string) string { 241 | dataDir := filepath.Join(tmpDir, dir) 242 | _ = os.RemoveAll(dataDir) 243 | _ = os.MkdirAll(dataDir, 0o777) 244 | return dataDir 245 | } 246 | 247 | func benchmarkPQueue(b *testing.B, size int, entrySize int, alsoRead bool) { 248 | b.StopTimer() 249 | 250 | var path string 251 | if alsoRead { 252 | path = "bench_rw_pqueue" 253 | } else { 254 | path = "bench_pqueue" 255 | } 256 | 257 | dataDir := prepareDataDir(path) 258 | defer func() { 259 | _ = os.RemoveAll(dataDir) 260 | }() 261 | 262 | q, _ := New(dataDir, 2000) 263 | defer func() { 264 | _ = q.Close() 265 | }() 266 | 267 | b.StartTimer() 268 | 269 | var wg sync.WaitGroup 270 | 271 | if alsoRead { 272 | var total uint32 273 | for i := 0; i < numReader; i++ { 274 | wg.Add(1) 275 | go func() { 276 | defer wg.Done() 277 | 278 | var e entry.Entry 279 | for { 280 | if atomic.LoadUint32(&total) >= uint32(size) { 281 | return 282 | } 283 | 284 | if ok := q.Dequeue(&e); ok { 285 | atomic.AddUint32(&total, 1) 286 | } 287 | 288 | runtime.Gosched() 289 | } 290 | }() 291 | } 292 | } 293 | 294 | batch := entry.NewBatch(flushOps) 295 | for i := 0; i < size; i++ { 296 | buf := make([]byte, entrySize) 297 | common.Endianese.PutUint32(buf, uint32(i)) 298 | 299 | batch.Append(buf) 300 | if batch.Len() == flushOps { 301 | _ = q.EnqueueBatch(batch) 302 | batch.Reset() 303 | } 304 | } 305 | 306 | if batch.Len() > 0 { 307 | _ = q.EnqueueBatch(batch) 308 | } 309 | 310 | wg.Wait() 311 | } 312 | 313 | func benchmarkBigQueue(b *testing.B, size int, entrySize int, alsoRead bool) { 314 | b.StopTimer() 315 | 316 | var path string 317 | if alsoRead { 318 | path = "bench_rw_bqueue" 319 | } else { 320 | path = "bench_bqueue" 321 | } 322 | 323 | dataDir := prepareDataDir(path) 324 | defer func() { 325 | _ = os.RemoveAll(dataDir) 326 | }() 327 | 328 | q, _ := bigqueue.NewMmapQueue(dataDir, 329 | bigqueue.SetPeriodicFlushOps(flushOps), 330 | bigqueue.SetMaxInMemArenas(256<<20), 331 | bigqueue.SetArenaSize(512<<20)) 332 | defer func() { 333 | _ = q.Close() 334 | }() 335 | 336 | b.StartTimer() 337 | 338 | var wg sync.WaitGroup 339 | 340 | var total uint32 341 | if alsoRead { 342 | for i := 0; i < numReader; i++ { 343 | wg.Add(1) 344 | go func() { 345 | defer wg.Done() 346 | for { 347 | if atomic.LoadUint32(&total) >= uint32(size) { 348 | return 349 | } 350 | 351 | if _, err := q.Dequeue(); err == nil { 352 | atomic.AddUint32(&total, 1) 353 | } 354 | 355 | runtime.Gosched() 356 | } 357 | }() 358 | } 359 | } 360 | 361 | for i := 0; i < size; i++ { 362 | buf := make([]byte, entrySize) 363 | common.Endianese.PutUint32(buf, uint32(i)) 364 | _ = q.Enqueue(buf) 365 | } 366 | 367 | wg.Wait() 368 | } 369 | -------------------------------------------------------------------------------- /segment/v1/segment_test.go: -------------------------------------------------------------------------------- 1 | package segv1 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/linxGnu/pqueue/common" 8 | "github.com/linxGnu/pqueue/entry" 9 | 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func TestSegment(t *testing.T) { 14 | t.Run("NewSegmentFailure", func(t *testing.T) { 15 | { 16 | _, err := NewSegment(nil, 123, 4) 17 | require.Error(t, err) 18 | } 19 | 20 | { 21 | _, err := NewSegment(&mockWriterErr{onWrite: true}, common.EntryV1, 4) 22 | require.Error(t, err) 23 | } 24 | }) 25 | 26 | t.Run("NewSegmentOK", func(t *testing.T) { 27 | _, err := NewSegment(&mockWriter{Buffer: bytes.NewBuffer(make([]byte, 0, 16))}, common.EntryV1, 4) 28 | require.NoError(t, err) 29 | }) 30 | } 31 | 32 | func TestNewSegmentReadWrite(t *testing.T) { 33 | t.Run("Closing", func(t *testing.T) { 34 | var s *Segment 35 | require.NoError(t, s.Close()) 36 | }) 37 | 38 | t.Run("Happy", func(t *testing.T) { 39 | buffer := bytes.NewBuffer(make([]byte, 0, 16)) 40 | 41 | s, err := NewSegment(&mockWriter{Buffer: buffer}, common.EntryV1, 2) 42 | require.NoError(t, err) 43 | 44 | // reading 45 | n, err := s.Reading(newMockReadSeeker(buffer)) 46 | require.NoError(t, err) 47 | require.Equal(t, 4, n) 48 | 49 | code, err := s.WriteEntry([]byte{}) 50 | require.NoError(t, err) 51 | require.Equal(t, common.NoError, code) 52 | 53 | code, err = s.WriteEntry([]byte("alpha")) 54 | require.NoError(t, err) 55 | require.Equal(t, common.NoError, code) 56 | 57 | var e entry.Entry 58 | code, n, err = s.ReadEntry(&e) 59 | require.NoError(t, err) 60 | require.Equal(t, common.NoError, code) 61 | require.Equal(t, "alpha", string(e)) 62 | require.Equal(t, 13, n) 63 | 64 | code, n, err = s.ReadEntry(&e) 65 | require.NoError(t, err) 66 | require.Equal(t, common.SegmentNoMoreReadWeak, code) 67 | require.Equal(t, 0, n) 68 | 69 | code, err = s.WriteEntry([]byte("beta")) 70 | require.NoError(t, err) 71 | require.Equal(t, common.NoError, code) 72 | 73 | code, n, err = s.ReadEntry(&e) 74 | require.NoError(t, err) 75 | require.Equal(t, common.NoError, code) 76 | require.Equal(t, "beta", string(e)) 77 | require.Equal(t, 12, n) 78 | 79 | code, err = s.WriteEntry([]byte("gamma")) 80 | require.NoError(t, err) 81 | require.Equal(t, common.SegmentNoMoreWrite, code) 82 | 83 | code, n, err = s.ReadEntry(&e) 84 | require.NoError(t, err) 85 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 86 | require.Equal(t, 0, n) 87 | 88 | require.NoError(t, s.SeekToRead(0)) 89 | }) 90 | 91 | t.Run("HappyBatch", func(t *testing.T) { 92 | buffer := bytes.NewBuffer(make([]byte, 0, 16)) 93 | 94 | s, err := NewSegment(&mockWriter{Buffer: buffer}, common.EntryV1, 2) 95 | require.NoError(t, err) 96 | 97 | // reading 98 | n, err := s.Reading(newMockReadSeeker(buffer)) 99 | require.NoError(t, err) 100 | require.Equal(t, 4, n) 101 | 102 | b := entry.NewBatch(3) 103 | b.Append([]byte("alpha")) 104 | b.Append([]byte("beta")) 105 | b.Append([]byte("gama")) 106 | 107 | code, err := s.WriteBatch(b) 108 | require.NoError(t, err) 109 | require.Equal(t, common.NoError, code) 110 | 111 | var e entry.Entry 112 | code, n, err = s.ReadEntry(&e) 113 | require.NoError(t, err) 114 | require.Equal(t, common.NoError, code) 115 | require.Equal(t, "alpha", string(e)) 116 | require.Equal(t, 13, n) 117 | 118 | code, n, err = s.ReadEntry(&e) 119 | require.NoError(t, err) 120 | require.Equal(t, common.NoError, code) 121 | require.Equal(t, "beta", string(e)) 122 | require.Equal(t, 12, n) 123 | 124 | code, n, err = s.ReadEntry(&e) 125 | require.NoError(t, err) 126 | require.Equal(t, common.NoError, code) 127 | require.Equal(t, "gama", string(e)) 128 | require.Equal(t, 12, n) 129 | 130 | code, n, err = s.ReadEntry(&e) 131 | require.NoError(t, err) 132 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 133 | require.Equal(t, 0, n) 134 | }) 135 | 136 | t.Run("WriteError", func(t *testing.T) { 137 | s, err := NewSegment(&mockWriter{Buffer: bytes.NewBuffer(make([]byte, 0, 16))}, common.EntryV1, 2) 138 | require.NoError(t, err) 139 | 140 | // hiject another writer 141 | s.w = newSegmentWriter(&mockWriterErr{onWrite: true}, common.EntryV1) 142 | 143 | code, err := s.WriteEntry([]byte{}) 144 | require.NoError(t, err) 145 | require.Equal(t, common.NoError, code) 146 | 147 | code, err = s.WriteEntry([]byte("alpha")) 148 | require.Error(t, err) 149 | require.Equal(t, common.SegmentCorrupted, code) 150 | 151 | b := entry.NewBatch(2) 152 | b.Append([]byte{1, 2, 3}) 153 | 154 | code, err = s.WriteBatch(b) 155 | require.Error(t, err) 156 | require.Equal(t, common.SegmentCorrupted, code) 157 | }) 158 | 159 | t.Run("ReadOnly", func(t *testing.T) { 160 | // entry format header missing 161 | { 162 | buffer := bytes.NewBuffer([]byte{0, 0, 0}) 163 | _, _, err := NewReadOnlySegment(newMockReadSeeker(buffer)) 164 | require.Error(t, err) 165 | } 166 | 167 | // unsupported format 168 | { 169 | buffer := bytes.NewBuffer([]byte{0, 0, 0, 5}) 170 | _, n, err := NewReadOnlySegment(newMockReadSeeker(buffer)) 171 | require.Error(t, err) 172 | require.Equal(t, 4, n) 173 | } 174 | 175 | // corrupt 176 | { 177 | buffer := bytes.NewBuffer([]byte{0, 0, 0, 0, 1}) 178 | 179 | s, n, err := NewReadOnlySegment(newMockReadSeeker(buffer)) 180 | require.NoError(t, err) 181 | require.Equal(t, 4, n) 182 | 183 | var e entry.Entry 184 | 185 | code, n, err := s.ReadEntry(&e) 186 | require.Error(t, err) 187 | require.Equal(t, common.SegmentCorrupted, code) 188 | require.Equal(t, 1, n) 189 | } 190 | 191 | // ok 192 | { 193 | buffer := bytes.NewBuffer([]byte{0, 0, 0, 0}) 194 | 195 | s, n, err := NewReadOnlySegment(newMockReadSeeker(buffer)) 196 | require.NoError(t, err) 197 | require.Equal(t, 4, n) 198 | 199 | var e entry.Entry 200 | 201 | code, n, err := s.ReadEntry(&e) 202 | require.NoError(t, err) 203 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 204 | require.Equal(t, 0, n) 205 | } 206 | 207 | { 208 | buffer := bytes.NewBuffer([]byte{0, 0, 0, 0}) 209 | 210 | s, n, err := NewReadOnlySegment(newMockReadSeeker(buffer)) 211 | require.NoError(t, err) 212 | require.Equal(t, 4, n) 213 | 214 | var e entry.Entry 215 | 216 | code, n, err := s.readEntry(&e) 217 | require.NoError(t, err) 218 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 219 | require.Equal(t, 0, n) 220 | 221 | // hijack the state 222 | s.readOnly = false 223 | code, n, err = s.readEntry(&e) 224 | require.NoError(t, err) 225 | require.Equal(t, common.SegmentNoMoreReadWeak, code) 226 | require.Equal(t, 0, n) 227 | 228 | _, _ = buffer.Write(segmentEnding) 229 | code, n, err = s.readEntry(&e) 230 | require.NoError(t, err) 231 | require.Equal(t, common.SegmentNoMoreReadStrong, code) 232 | require.Equal(t, 0, n) 233 | } 234 | }) 235 | } 236 | 237 | // func TestSegmentRace(t *testing.T) { 238 | // size := 20000 239 | // 240 | // // prepare temp file 241 | // tmpFile := filepath.Join(tmpDir, "segment.tmp") 242 | // 243 | // // create/trunc it 244 | // f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) 245 | // require.NoError(t, err) 246 | // 247 | // // remove when done 248 | // defer func() { _ = os.Remove(tmpFile) }() 249 | // 250 | // // open temp file for reading 251 | // fr, err := os.Open(tmpFile) 252 | // require.NoError(t, err) 253 | // 254 | // // create new segment 255 | // s, err := NewSegment(f, common.EntryV1, uint32(size)) 256 | // require.NoError(t, err) 257 | // n, err := s.Reading(fr) 258 | // require.NoError(t, err) 259 | // require.Equal(t, 4, n) 260 | // defer func() { 261 | // _ = s.Close() 262 | // }() 263 | // 264 | // // start reader 265 | // var wg sync.WaitGroup 266 | // 267 | // collectValue := make([]int, size) 268 | // for i := 0; i < 8; i++ { 269 | // wg.Add(1) 270 | // go func() { 271 | // defer wg.Done() 272 | // 273 | // var e entry.Entry 274 | // for { 275 | // code, _, err := s.ReadEntry(&e) 276 | // if code == common.SegmentNoMoreReadStrong { 277 | // return 278 | // } 279 | // if code == common.SegmentNoMoreReadWeak { 280 | // time.Sleep(500 * time.Microsecond) 281 | // } else { 282 | // require.Equal(t, common.NoError, code) 283 | // require.NoError(t, err) 284 | // 285 | // value := common.Endianese.Uint32(e) 286 | // require.Less(t, value, uint32(size)) 287 | // collectValue[value]++ 288 | // } 289 | // } 290 | // }() 291 | // } 292 | // 293 | // ch := make(chan uint32, 1) 294 | // for i := 0; i < 8; i++ { 295 | // wg.Add(1) 296 | // go func() { 297 | // defer wg.Done() 298 | // 299 | // var buf [4]byte 300 | // for data := range ch { 301 | // time.Sleep(time.Millisecond) 302 | // 303 | // common.Endianese.PutUint32(buf[:], data) 304 | // code, err := s.WriteEntry(buf[:]) 305 | // require.NoError(t, err) 306 | // require.Equal(t, common.NoError, code) 307 | // } 308 | // }() 309 | // } 310 | // 311 | // for i := 0; i < size; i++ { 312 | // ch <- uint32(i) 313 | // } 314 | // close(ch) 315 | // 316 | // wg.Wait() 317 | // 318 | // for i := range collectValue { 319 | // require.Equal(t, 1, collectValue[i]) 320 | // } 321 | // } 322 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "container/list" 5 | "io" 6 | "os" 7 | "sync" 8 | 9 | "github.com/linxGnu/pqueue/common" 10 | "github.com/linxGnu/pqueue/entry" 11 | segmentPkg "github.com/linxGnu/pqueue/segment" 12 | segv1 "github.com/linxGnu/pqueue/segment/v1" 13 | 14 | "github.com/hashicorp/go-multierror" 15 | ) 16 | 17 | const ( 18 | segPrefix = "seg_" 19 | segOffsetFileSuffix = ".offset" 20 | ) 21 | 22 | type segment struct { 23 | seg segmentPkg.Segment 24 | path string 25 | readable bool 26 | } 27 | 28 | type queue struct { 29 | rLock sync.Mutex 30 | wLock sync.RWMutex 31 | segHeadWriter segmentHeadWriter 32 | segments *list.List 33 | offsetTracker struct { 34 | f *os.File 35 | offset int64 36 | } 37 | peek entry.Entry 38 | settings QueueSettings 39 | } 40 | 41 | func (q *queue) Close() (err error) { 42 | for { 43 | node := q.segments.Front() 44 | if node == nil { 45 | break 46 | } 47 | 48 | seg := q.segments.Remove(node).(*segment) 49 | if seg.seg != nil { 50 | err = multierror.Append(err, seg.seg.Close()).ErrorOrNil() 51 | } 52 | } 53 | err = multierror.Append(err, q.closeOffsetTracker()).ErrorOrNil() 54 | return 55 | } 56 | 57 | func (q *queue) Peek(dst *entry.Entry) (hasEntry bool) { 58 | q.rLock.Lock() 59 | 60 | hasEntry = q.peek != nil || q.dequeue(&q.peek) 61 | if hasEntry { 62 | dst.CloneFrom(q.peek) 63 | } 64 | 65 | q.rLock.Unlock() 66 | return 67 | } 68 | 69 | func (q *queue) Dequeue(dst *entry.Entry) (hasEntry bool) { 70 | q.rLock.Lock() 71 | if hasEntry = q.dequeue(dst); hasEntry { 72 | q.commitOffset() 73 | } 74 | q.rLock.Unlock() 75 | return 76 | } 77 | 78 | func (q *queue) dequeue(dst *entry.Entry) bool { 79 | if q.peek != nil { 80 | *dst = q.peek 81 | q.peek = nil 82 | return true 83 | } 84 | 85 | for { 86 | front := q.front() 87 | if front == nil { 88 | return false 89 | } 90 | 91 | head := front.Value.(*segment) 92 | if !head.readable { // should open the file? 93 | q.offsetTracker.f = nil 94 | q.offsetTracker.offset = 0 95 | 96 | format, file, err := q.openSegmentForRead(head.path) 97 | if err == nil { 98 | var n int 99 | if n, err = q.startReadingSegment(format, head, file); err == nil { 100 | q.offsetTracker.offset = 4 + int64(n) 101 | 102 | var ( 103 | offset int64 104 | offsetFile *os.File 105 | ) 106 | offset, offsetFile, err = loadOffsetTracker(offsetFilePath(head.path)) 107 | if err == nil { 108 | q.offsetTracker.f = offsetFile 109 | if offset > 0 && head.seg.SeekToRead(offset) == nil { 110 | q.offsetTracker.offset = offset 111 | } 112 | } 113 | } 114 | } 115 | 116 | if err != nil { 117 | if file != nil { 118 | _ = file.Close() 119 | } 120 | if q.removeSegment(front) { 121 | return false 122 | } 123 | continue 124 | } 125 | 126 | // now readable 127 | head.readable = true 128 | } 129 | 130 | if n, hasElement, shouldCont := q.readEntryFromHead(head, front, dst); shouldCont { 131 | continue 132 | } else { 133 | q.offsetTracker.offset += int64(n) 134 | return hasElement 135 | } 136 | } 137 | } 138 | 139 | func (q *queue) front() (fr *list.Element) { 140 | q.wLock.RLock() 141 | fr = q.segments.Front() 142 | q.wLock.RUnlock() 143 | return 144 | } 145 | 146 | func (q *queue) openSegmentForRead(path string) (format common.SegmentFormat, f *os.File, err error) { 147 | f, err = os.Open(path) 148 | if err == nil { 149 | // read segment header 150 | format, err = q.segHeadWriter.ReadHeader(f) 151 | } 152 | 153 | if err != nil && f != nil { 154 | _ = f.Close() 155 | } 156 | 157 | return 158 | } 159 | 160 | func (q *queue) startReadingSegment(format common.SegmentFormat, s *segment, file *os.File) (n int, err error) { 161 | switch format { 162 | case common.SegmentV1: 163 | if s.seg == nil { 164 | s.seg, n, err = segv1.NewReadOnlySegment(file) 165 | } else { 166 | n, err = s.seg.Reading(file) 167 | } 168 | 169 | default: 170 | err = common.ErrSegmentUnsupportedFormat 171 | } 172 | return 173 | } 174 | 175 | func (q *queue) readEntryFromHead(head *segment, front *list.Element, dst *entry.Entry) (n int, hasElement, shouldContinue bool) { 176 | // now read 177 | code, n, _ := head.seg.ReadEntry(dst) 178 | switch code { 179 | case common.NoError: 180 | hasElement = true 181 | return 182 | 183 | case common.SegmentNoMoreReadWeak: 184 | return 185 | 186 | default: 187 | // TODO: write log here 188 | // if code != common.SegmentNoMoreReadStrong { 189 | // } 190 | 191 | if q.removeSegment(front) { 192 | return // no need to continue 193 | } 194 | 195 | shouldContinue = true 196 | return 197 | } 198 | } 199 | 200 | func (q *queue) removeSegment(e *list.Element) bool { 201 | q.wLock.RLock() 202 | 203 | // do not remove back/tail of segment list 204 | if e == q.segments.Back() { 205 | q.wLock.RUnlock() 206 | return true 207 | } 208 | 209 | // remove from list 210 | val := q.segments.Remove(e) 211 | 212 | q.wLock.RUnlock() 213 | 214 | seg := val.(*segment) 215 | 216 | // close segment 217 | if seg.seg != nil { 218 | _ = seg.seg.Close() 219 | } 220 | 221 | // remove underlying file 222 | if len(seg.path) > 0 { 223 | _ = os.Remove(seg.path) 224 | q.closeAndRemoveOffsetTracker(offsetFilePath(seg.path)) 225 | } 226 | 227 | return false 228 | } 229 | 230 | func (q *queue) commitOffset() { 231 | if q.offsetTracker.f != nil { 232 | var buf [8]byte 233 | common.Endianese.PutUint64(buf[:], uint64(q.offsetTracker.offset)) 234 | _, _ = q.offsetTracker.f.Write(buf[:]) 235 | } 236 | } 237 | 238 | func (q *queue) closeOffsetTracker() (err error) { 239 | if q.offsetTracker.f != nil { 240 | err = q.offsetTracker.f.Close() 241 | } 242 | return 243 | } 244 | 245 | func (q *queue) closeAndRemoveOffsetTracker(path string) { 246 | _ = q.closeOffsetTracker() 247 | _ = os.Remove(path) 248 | } 249 | 250 | func (q *queue) Enqueue(e entry.Entry) error { 251 | q.wLock.Lock() 252 | err := q.enqueue(e) 253 | q.wLock.Unlock() 254 | return err 255 | } 256 | 257 | func (q *queue) enqueue(e entry.Entry) error { 258 | for attempt := 0; attempt < 2; attempt++ { 259 | back := q.segments.Back() 260 | if back == nil { 261 | return common.ErrQueueCorrupted 262 | } 263 | 264 | tail := back.Value.(*segment) 265 | 266 | code, err := tail.seg.WriteEntry(e) 267 | switch code { 268 | case common.NoError, common.EntryTooBig: 269 | return err 270 | 271 | default: // full? corrupted? 272 | // try to write new one 273 | seg, err := q.newSegment() 274 | if err != nil { 275 | return err 276 | } 277 | q.segments.PushBack(seg) 278 | } 279 | } 280 | 281 | return common.ErrQueueCorrupted 282 | } 283 | 284 | func (q *queue) EnqueueBatch(b entry.Batch) error { 285 | q.wLock.Lock() 286 | err := q.enqueueBatch(b) 287 | q.wLock.Unlock() 288 | return err 289 | } 290 | 291 | func (q *queue) enqueueBatch(b entry.Batch) error { 292 | for attempt := 0; attempt < 2; attempt++ { 293 | back := q.segments.Back() 294 | if back == nil { 295 | return common.ErrQueueCorrupted 296 | } 297 | 298 | tail := back.Value.(*segment) 299 | 300 | code, err := tail.seg.WriteBatch(b) 301 | switch code { 302 | case common.NoError, common.EntryTooBig: 303 | return err 304 | 305 | default: // full? corrupted? 306 | // try to write new one 307 | seg, err := q.newSegment() 308 | if err != nil { 309 | return err 310 | } 311 | q.segments.PushBack(seg) 312 | } 313 | } 314 | 315 | return common.ErrQueueCorrupted 316 | } 317 | 318 | func (q *queue) newSegment() (*segment, error) { 319 | f, err := createFile(q.settings.DataDir, segPrefix) 320 | if err != nil { 321 | return nil, err 322 | } 323 | path := f.Name() 324 | 325 | // write header 326 | if err = q.segHeadWriter.WriteHeader(f, q.settings.SegmentFormat); err != nil { 327 | _ = os.Remove(path) 328 | return nil, err 329 | } 330 | 331 | // no problem -> add to segments list 332 | switch q.settings.SegmentFormat { 333 | case common.SegmentV1: 334 | seg, err := segv1.NewSegment(f, q.settings.EntryFormat, q.settings.MaxEntriesPerSegment) 335 | if err != nil { 336 | _ = f.Close() 337 | _ = os.Remove(path) 338 | return nil, err 339 | } 340 | 341 | return &segment{ 342 | path: path, 343 | seg: seg, 344 | }, nil 345 | 346 | default: 347 | _ = f.Close() 348 | _ = os.Remove(path) 349 | return nil, common.ErrSegmentUnsupportedFormat 350 | } 351 | } 352 | 353 | func loadOffsetTracker(path string) (offset int64, f *os.File, err error) { 354 | for attempt := 0; attempt < 2; attempt++ { 355 | f, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644) 356 | if err != nil { 357 | return 358 | } 359 | 360 | var info os.FileInfo 361 | info, err = f.Stat() 362 | if err == nil { 363 | if info.Size() < 8 { 364 | return 365 | } 366 | 367 | // seek and read stored-offset 368 | if _, err = f.Seek(-8, 2); err == nil { 369 | var buf [8]byte 370 | if _, err = io.ReadFull(f, buf[:]); err == nil { 371 | offset = int64(common.Endianese.Uint64(buf[:])) 372 | _, err = f.Seek(0, 2) // to the end 373 | } 374 | } 375 | } 376 | 377 | if err != nil { 378 | _ = f.Close() 379 | _ = os.Remove(path) 380 | } 381 | } 382 | return 383 | } 384 | 385 | func offsetFilePath(segmentFilePath string) string { 386 | return segmentFilePath + segOffsetFileSuffix 387 | } 388 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pqueue - a fast durable queue for Go 2 | 3 | [![](https://github.com/linxGnu/pqueue/workflows/Build/badge.svg)]() 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/linxGnu/pqueue)](https://goreportcard.com/report/github.com/linxGnu/pqueue) 5 | [![Coverage Status](https://coveralls.io/repos/github/linxGnu/pqueue/badge.svg?branch=main)](https://coveralls.io/github/linxGnu/pqueue?branch=main) 6 | 7 | `pqueue` is thread-safety, serves environments where more durability is required (e.g., outages last longer than memory queues can sustain) 8 | 9 | `pqueue` only consumes a bit of your memory. Most of the time, you are only bound to disk-size. 10 | 11 | ## Installation 12 | 13 | ``` 14 | go get -u github.com/linxGnu/pqueue 15 | ``` 16 | 17 | ## Usage 18 | 19 | ```go 20 | package main 21 | 22 | import ( 23 | "fmt" 24 | "log" 25 | 26 | "github.com/linxGnu/pqueue" 27 | "github.com/linxGnu/pqueue/entry" 28 | ) 29 | 30 | func main() { 31 | // 1000 entries per segment 32 | q, err := pqueue.New("/tmp", 1000) // anywhere you want, instead of /tmp 33 | if err != nil { 34 | log.Fatal(err) 35 | } 36 | defer q.Close() // it's important to close the queue before exit 37 | 38 | // enqueue 39 | if err = q.Enqueue([]byte{1, 2, 3, 4}); err != nil { 40 | log.Fatal(err) 41 | } 42 | if err = q.Enqueue([]byte{5, 6, 7, 8}); err != nil { 43 | log.Fatal(err) 44 | } 45 | 46 | // peek 47 | var v entry.Entry 48 | if hasItem := q.Peek(&v); hasItem { 49 | fmt.Println(v) // print: [1 2 3 4] 50 | } 51 | 52 | // dequeue 53 | if hasItem := q.Dequeue(&v); hasItem { 54 | fmt.Println(v) // print: [1 2 3 4] 55 | } 56 | if hasItem := q.Dequeue(&v); hasItem { 57 | fmt.Println(v) // print: [5 6 7 8] 58 | } 59 | } 60 | ``` 61 | 62 | ## Limitation 63 | - Entry size must not be larger than 1GB 64 | 65 | ## Benchmark 66 | 67 | ### Comparing 68 | 69 | - [github.com/grandecola/bigqueue](https://github.com/grandecola/bigqueue) - embedded, fast and persistent queue written in pure Go using memory mapped (`mmap`) files 70 | - bigqueue.SetPeriodicFlushOps(5): for data safety 71 | - bigqueue.SetMaxInMemArenas(256MB) 72 | - bigqueue.SetArenaSize(512MB) 73 | 74 | ### Result 75 | 76 | ### HDD - Hitachi HTS725050A7 77 | 78 | ``` 79 | Disk model: HGST HTS725050A7 80 | Units: sectors of 1 * 512 = 512 bytes 81 | Sector size (logical/physical): 512 bytes / 4096 bytes 82 | I/O size (minimum/optimal): 4096 bytes / 4096 bytes 83 | ``` 84 | 85 | ``` 86 | goos: linux 87 | goarch: amd64 88 | pkg: github.com/linxGnu/pqueue 89 | cpu: AMD Ryzen 9 3950X 16-Core Processor 90 | PASS 91 | benchmark iter time/iter bytes alloc allocs 92 | --------- ---- --------- ----------- ------ 93 | BenchmarkPQueueWriting_16-32 124 9.51 ms/op 102634 B/op 10146 allocs/op 94 | BenchmarkBigQueueWriting_16-32 1 1115.03 ms/op 5096 B/op 29 allocs/op 95 | BenchmarkPQueueWriting_64-32 133 10.01 ms/op 102433 B/op 10146 allocs/op 96 | BenchmarkBigQueueWriting_64-32 2 958.87 ms/op 11308 B/op 30 allocs/op 97 | BenchmarkPQueueWriting_256-32 120 10.51 ms/op 102616 B/op 10146 allocs/op 98 | BenchmarkBigQueueWriting_256-32 1 1170.48 ms/op 5336 B/op 29 allocs/op 99 | BenchmarkPQueueWriting_2048-32 60 20.25 ms/op 104512 B/op 10146 allocs/op 100 | BenchmarkBigQueueWriting_2048-32 1 2046.51 ms/op 7128 B/op 29 allocs/op 101 | BenchmarkPQueueWriting_16K-32 10 100.61 ms/op 120151 B/op 10146 allocs/op 102 | BenchmarkBigQueueWriting_16K-32 1 9634.72 ms/op 29776 B/op 32 allocs/op 103 | BenchmarkPQueueWriting_64K-32 4 349.45 ms/op 171772 B/op 10147 allocs/op 104 | BenchmarkBigQueueWriting_64K-32 1 29679.39 ms/op 83848 B/op 87 allocs/op 105 | BenchmarkPQueueRW_16-32 24 49.06 ms/op 273011 B/op 20238 allocs/op 106 | BenchmarkBigQueueRW_16-32 1 18573.37 ms/op 176096 B/op 10061 allocs/op 107 | BenchmarkPQueueRW_64-32 22 52.32 ms/op 272786 B/op 20236 allocs/op 108 | BenchmarkBigQueueRW_64-32 1 20714.97 ms/op 648248 B/op 10059 allocs/op 109 | BenchmarkPQueueRW_256-32 20 54.85 ms/op 273814 B/op 20245 allocs/op 110 | BenchmarkBigQueueRW_256-32 1 18967.45 ms/op 2573648 B/op 10033 allocs/op 111 | BenchmarkPQueueRW_2048-32 13 85.23 ms/op 279630 B/op 20240 allocs/op 112 | BenchmarkBigQueueRW_2048-32 1 23003.57 ms/op 20511568 B/op 10201 allocs/op 113 | BenchmarkPQueueRW_16K-32 4 286.53 ms/op 319328 B/op 20247 allocs/op 114 | BenchmarkBigQueueRW_16K-32 1 43056.72 ms/op 163882064 B/op 10161 allocs/op 115 | BenchmarkPQueueRW_64K-32 2 674.56 ms/op 473848 B/op 20236 allocs/op 116 | BenchmarkBigQueueRW_64K-32 1 71556.97 ms/op 655469672 B/op 10357 allocs/op 117 | ``` 118 | 119 | ### SSD - Samsung SSD 850 Pro 120 | 121 | ``` 122 | Disk model: Samsung SSD 850 Pro 123 | Units: sectors of 1 * 512 = 512 bytes 124 | Sector size (logical/physical): 512 bytes / 512 bytes 125 | I/O size (minimum/optimal): 512 bytes / 512 bytes 126 | ``` 127 | 128 | ``` 129 | goos: linux 130 | goarch: amd64 131 | pkg: github.com/linxGnu/pqueue 132 | cpu: AMD Ryzen 9 3950X 16-Core Processor 133 | PASS 134 | benchmark iter time/iter bytes alloc allocs 135 | --------- ---- --------- ----------- ------ 136 | BenchmarkPQueueWriting_16-32 122 9.91 ms/op 102507 B/op 10146 allocs/op 137 | BenchmarkBigQueueWriting_16-32 9 117.01 ms/op 4186 B/op 27 allocs/op 138 | BenchmarkPQueueWriting_64-32 120 9.62 ms/op 102424 B/op 10146 allocs/op 139 | BenchmarkBigQueueWriting_64-32 8 138.96 ms/op 4634 B/op 28 allocs/op 140 | BenchmarkPQueueWriting_256-32 106 10.79 ms/op 102647 B/op 10146 allocs/op 141 | BenchmarkBigQueueWriting_256-32 5 242.03 ms/op 2052 B/op 27 allocs/op 142 | BenchmarkPQueueWriting_2048-32 57 21.44 ms/op 104817 B/op 10146 allocs/op 143 | BenchmarkBigQueueWriting_2048-32 3 354.33 ms/op 10029 B/op 30 allocs/op 144 | BenchmarkPQueueWriting_16K-32 10 104.84 ms/op 120153 B/op 10146 allocs/op 145 | BenchmarkBigQueueWriting_16K-32 1 1963.32 ms/op 30640 B/op 41 allocs/op 146 | BenchmarkPQueueWriting_64K-32 3 349.73 ms/op 170405 B/op 10146 allocs/op 147 | BenchmarkBigQueueWriting_64K-32 1 6077.02 ms/op 86728 B/op 117 allocs/op 148 | BenchmarkPQueueRW_16-32 26 51.32 ms/op 271447 B/op 20240 allocs/op 149 | BenchmarkBigQueueRW_16-32 1 5947.32 ms/op 173408 B/op 10033 allocs/op 150 | BenchmarkPQueueRW_64-32 22 51.97 ms/op 272084 B/op 20240 allocs/op 151 | BenchmarkBigQueueRW_64-32 1 5586.13 ms/op 653456 B/op 10033 allocs/op 152 | BenchmarkPQueueRW_256-32 20 55.29 ms/op 273002 B/op 20241 allocs/op 153 | BenchmarkBigQueueRW_256-32 1 6269.66 ms/op 2568792 B/op 10066 allocs/op 154 | BenchmarkPQueueRW_2048-32 13 81.82 ms/op 278406 B/op 20240 allocs/op 155 | BenchmarkBigQueueRW_2048-32 1 6099.57 ms/op 20520656 B/op 10289 allocs/op 156 | BenchmarkPQueueRW_16K-32 4 287.24 ms/op 327796 B/op 20252 allocs/op 157 | BenchmarkBigQueueRW_16K-32 1 9192.42 ms/op 163882128 B/op 10155 allocs/op 158 | BenchmarkPQueueRW_64K-32 2 707.96 ms/op 473884 B/op 20277 allocs/op 159 | BenchmarkBigQueueRW_64K-32 1 14538.79 ms/op 655459096 B/op 10252 allocs/op 160 | ``` 161 | 162 | #### NVMe - Corsair Force MP600 163 | 164 | ``` 165 | Disk model: Corsair Force Series Gen.4 PCIe MP600 500GB NVMe M.2 SSD 166 | Units: sectors of 1 * 512 = 512 bytes 167 | Sector size (logical/physical): 512 bytes / 512 bytes 168 | I/O size (minimum/optimal): 512 bytes / 512 bytes 169 | ``` 170 | 171 | ``` 172 | goos: linux 173 | goarch: amd64 174 | pkg: github.com/linxGnu/pqueue 175 | cpu: AMD Ryzen 9 3950X 16-Core Processor 176 | PASS 177 | benchmark iter time/iter bytes alloc allocs 178 | --------- ---- --------- ----------- ------ 179 | BenchmarkPQueueWriting_16-32 225 5.23 ms/op 101927 B/op 10142 allocs/op 180 | BenchmarkBigQueueWriting_16-32 1089 1.03 ms/op 988 B/op 27 allocs/op 181 | BenchmarkPQueueWriting_64-32 232 5.15 ms/op 102055 B/op 10142 allocs/op 182 | BenchmarkBigQueueWriting_64-32 828 1.37 ms/op 984 B/op 27 allocs/op 183 | BenchmarkPQueueWriting_256-32 190 5.98 ms/op 102279 B/op 10142 allocs/op 184 | BenchmarkBigQueueWriting_256-32 482 2.50 ms/op 1176 B/op 27 allocs/op 185 | BenchmarkPQueueWriting_2048-32 97 11.93 ms/op 104170 B/op 10142 allocs/op 186 | BenchmarkBigQueueWriting_2048-32 80 13.67 ms/op 3508 B/op 27 allocs/op 187 | BenchmarkPQueueWriting_16K-32 18 64.92 ms/op 118953 B/op 10142 allocs/op 188 | BenchmarkBigQueueWriting_16K-32 10 101.22 ms/op 20366 B/op 30 allocs/op 189 | BenchmarkPQueueWriting_64K-32 5 209.72 ms/op 167192 B/op 10142 allocs/op 190 | BenchmarkBigQueueWriting_64K-32 3 364.77 ms/op 70128 B/op 56 allocs/op 191 | BenchmarkPQueueRW_16-32 36 31.39 ms/op 271413 B/op 20237 allocs/op 192 | BenchmarkBigQueueRW_16-32 100 11.76 ms/op 163011 B/op 10030 allocs/op 193 | BenchmarkPQueueRW_64-32 36 31.89 ms/op 270448 B/op 20241 allocs/op 194 | BenchmarkBigQueueRW_64-32 99 12.34 ms/op 647321 B/op 10033 allocs/op 195 | BenchmarkPQueueRW_256-32 33 33.75 ms/op 272010 B/op 20243 allocs/op 196 | BenchmarkBigQueueRW_256-32 81 14.75 ms/op 2571970 B/op 10035 allocs/op 197 | BenchmarkPQueueRW_2048-32 20 52.16 ms/op 278024 B/op 20237 allocs/op 198 | BenchmarkBigQueueRW_2048-32 33 35.82 ms/op 20497430 B/op 10054 allocs/op 199 | BenchmarkPQueueRW_16K-32 5 200.83 ms/op 323212 B/op 20244 allocs/op 200 | BenchmarkBigQueueRW_16K-32 6 175.49 ms/op 163876804 B/op 10105 allocs/op 201 | BenchmarkPQueueRW_64K-32 3 471.45 ms/op 469130 B/op 20253 allocs/op 202 | BenchmarkBigQueueRW_64K-32 2 570.38 ms/op 655472440 B/op 10391 allocs/op 203 | ``` 204 | -------------------------------------------------------------------------------- /queue_test.go: -------------------------------------------------------------------------------- 1 | package pqueue 2 | 3 | import ( 4 | "bytes" 5 | "container/list" 6 | "fmt" 7 | "io" 8 | "os" 9 | "path/filepath" 10 | "sync" 11 | "sync/atomic" 12 | "testing" 13 | "time" 14 | 15 | "github.com/linxGnu/pqueue/common" 16 | "github.com/linxGnu/pqueue/entry" 17 | 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | var tmpDir = os.TempDir() 22 | 23 | type mockWriterErr struct { 24 | buf *bytes.Buffer 25 | onWrite bool 26 | onSync bool 27 | onClose bool 28 | onSegmentHeader bool 29 | } 30 | 31 | func (m *mockWriterErr) Write(data []byte) (int, error) { 32 | if m.onWrite { 33 | return 0, fmt.Errorf("fake error") 34 | } 35 | return m.buf.Write(data) 36 | } 37 | 38 | func (m *mockWriterErr) Sync() error { 39 | if m.onSync { 40 | return fmt.Errorf("fake error") 41 | } 42 | return nil 43 | } 44 | 45 | func (m *mockWriterErr) Close() error { 46 | if m.onClose { 47 | return fmt.Errorf("fake error") 48 | } 49 | return nil 50 | } 51 | 52 | func (m *mockWriterErr) WriteHeader(io.WriteCloser, common.SegmentFormat) error { 53 | if m.onSegmentHeader { 54 | return fmt.Errorf("fake error") 55 | } 56 | return nil 57 | } 58 | 59 | func (m *mockWriterErr) ReadHeader(r io.ReadCloser) (format common.SegmentFormat, err error) { 60 | return 61 | } 62 | 63 | func TestNewSegment(t *testing.T) { 64 | t.Run("Error", func(t *testing.T) { 65 | var q queue 66 | q.segHeadWriter = &segmentHeader{} 67 | 68 | q.settings = QueueSettings{ 69 | DataDir: "/abc", 70 | } 71 | _, err := q.newSegment() 72 | require.Error(t, err) 73 | 74 | q.settings = QueueSettings{ 75 | DataDir: tmpDir, 76 | EntryFormat: 123, 77 | } 78 | _, err = q.newSegment() 79 | require.Error(t, err) 80 | 81 | q.settings = QueueSettings{ 82 | DataDir: tmpDir, 83 | SegmentFormat: 123, 84 | } 85 | _, err = q.newSegment() 86 | require.Error(t, err) 87 | 88 | q.settings = QueueSettings{ 89 | DataDir: tmpDir, 90 | SegmentFormat: common.SegmentV1, 91 | EntryFormat: common.EntryV1, 92 | } 93 | q.segHeadWriter = &mockWriterErr{onSegmentHeader: true} 94 | _, err = q.newSegment() 95 | require.Error(t, err) 96 | }) 97 | 98 | t.Run("OK", func(t *testing.T) { 99 | var q queue 100 | q.segHeadWriter = &segmentHeader{} 101 | 102 | q.settings = QueueSettings{ 103 | DataDir: tmpDir, 104 | SegmentFormat: common.SegmentV1, 105 | EntryFormat: common.EntryV1, 106 | } 107 | s, err := q.newSegment() 108 | require.NoError(t, err) 109 | require.False(t, s.readable) 110 | _ = os.Remove(s.path) 111 | }) 112 | } 113 | 114 | func TestQueueRace(t *testing.T) { 115 | size := 40000 116 | 117 | dataDir := filepath.Join(tmpDir, "pqueue_race_test") 118 | _ = os.RemoveAll(dataDir) 119 | 120 | err := os.MkdirAll(dataDir, 0o777) 121 | require.NoError(t, err) 122 | defer func() { 123 | _ = os.RemoveAll(dataDir) 124 | }() 125 | 126 | // prepare some files 127 | { 128 | f1, e := os.CreateTemp(dataDir, segPrefix) 129 | require.NoError(t, e) 130 | _ = f1.Close() 131 | 132 | f2, e := os.CreateTemp(dataDir, segPrefix) 133 | require.NoError(t, e) 134 | _, e = f2.Write([]byte{0, 1, 2, 3}) 135 | require.NoError(t, e) 136 | _ = f2.Close() 137 | 138 | f3, e := os.CreateTemp(dataDir, segPrefix) 139 | require.NoError(t, e) 140 | _, e = f3.Write([]byte{0, 0, 0, 0}) 141 | require.NoError(t, e) 142 | _ = f3.Close() 143 | } 144 | 145 | q, err := New(dataDir, 0) 146 | require.NoError(t, err) 147 | defer func() { 148 | _ = q.Close() 149 | }() 150 | 151 | // start readers 152 | var wg sync.WaitGroup 153 | 154 | collectValue := make([]int, size) 155 | var total uint32 156 | for i := 0; i < 8; i++ { 157 | wg.Add(1) 158 | go func() { 159 | defer wg.Done() 160 | 161 | var e entry.Entry 162 | for { 163 | if atomic.LoadUint32(&total) >= uint32(size) { 164 | return 165 | } 166 | 167 | if ok := q.Dequeue(&e); ok { 168 | value := common.Endianese.Uint32(e) 169 | require.Less(t, value, uint32(size)) 170 | collectValue[value]++ 171 | atomic.AddUint32(&total, 1) 172 | } else { 173 | time.Sleep(500 * time.Microsecond) 174 | } 175 | } 176 | }() 177 | } 178 | 179 | for i := 0; i < 8; i++ { 180 | wg.Add(1) 181 | go func() { 182 | defer wg.Done() 183 | 184 | var e entry.Entry 185 | for { 186 | if atomic.LoadUint32(&total) >= uint32(size) { 187 | return 188 | } 189 | 190 | if ok := q.Peek(&e); ok { 191 | value := common.Endianese.Uint32(e) 192 | require.True(t, value < uint32(size)) 193 | } 194 | } 195 | }() 196 | } 197 | 198 | // start writers 199 | ch := make(chan uint32, 1) 200 | for i := 0; i < 4; i++ { 201 | wg.Add(1) 202 | go func() { 203 | defer wg.Done() 204 | 205 | buf := make([]byte, 4<<10) 206 | for data := range ch { 207 | time.Sleep(time.Millisecond) 208 | 209 | common.Endianese.PutUint32(buf, data) 210 | err := q.Enqueue(buf) 211 | require.NoError(t, err) 212 | } 213 | }() 214 | } 215 | 216 | for i := 0; i < 4; i++ { 217 | wg.Add(1) 218 | go func() { 219 | defer wg.Done() 220 | 221 | buf := make([]byte, 4<<10) 222 | b := entry.NewBatch(1) 223 | for data := range ch { 224 | time.Sleep(time.Millisecond) 225 | 226 | common.Endianese.PutUint32(buf, data) 227 | b.Reset() 228 | b.Append(buf) 229 | 230 | err := q.EnqueueBatch(b) 231 | require.NoError(t, err) 232 | } 233 | }() 234 | } 235 | 236 | for i := 0; i < size; i++ { 237 | ch <- uint32(i) 238 | } 239 | close(ch) 240 | 241 | wg.Wait() 242 | 243 | for i := range collectValue { 244 | require.Equal(t, 1, collectValue[i]) 245 | } 246 | } 247 | 248 | func TestEnqueue(t *testing.T) { 249 | t.Run("NoSegment", func(t *testing.T) { 250 | q := &queue{segments: list.New()} 251 | require.Error(t, q.Enqueue([]byte{})) 252 | }) 253 | } 254 | 255 | func TestDequeue(t *testing.T) { 256 | t.Run("NoSegment", func(t *testing.T) { 257 | q := &queue{segments: list.New()} 258 | 259 | var e entry.Entry 260 | require.False(t, q.Dequeue(&e)) 261 | 262 | require.True(t, q.removeSegment(q.segments.PushBack(1))) 263 | }) 264 | } 265 | 266 | func TestQueueWriteLoad(t *testing.T) { 267 | size := 20000 268 | 269 | dataDir := filepath.Join(tmpDir, "pqueue_write_load") 270 | _ = os.RemoveAll(dataDir) 271 | 272 | err := os.MkdirAll(dataDir, 0o777) 273 | require.NoError(t, err) 274 | defer func() { 275 | _ = os.RemoveAll(dataDir) 276 | }() 277 | 278 | { 279 | q, err := New(dataDir, 0) 280 | require.NoError(t, err) 281 | 282 | buf := make([]byte, 2<<10) 283 | for data := 0; data < size; data++ { 284 | common.Endianese.PutUint32(buf, uint32(data)) 285 | err := q.Enqueue(buf) 286 | require.NoError(t, err) 287 | } 288 | _ = q.Close() 289 | } 290 | 291 | { 292 | q, err := New(dataDir, 0) 293 | require.NoError(t, err) 294 | 295 | var e entry.Entry 296 | for expect := 0; expect < size-100; expect++ { 297 | require.True(t, q.Dequeue(&e)) 298 | require.EqualValues(t, expect, common.Endianese.Uint32(e)) 299 | } 300 | 301 | _ = q.Close() 302 | } 303 | 304 | { 305 | q, err := New(dataDir, 0) 306 | require.NoError(t, err) 307 | 308 | var e entry.Entry 309 | for expect := size - 100; expect < size-20; expect++ { 310 | require.True(t, q.Dequeue(&e)) 311 | require.EqualValues(t, expect, common.Endianese.Uint32(e)) 312 | } 313 | 314 | _ = q.Close() 315 | } 316 | 317 | { 318 | q, err := New(dataDir, 0) 319 | require.NoError(t, err) 320 | 321 | var e entry.Entry 322 | for expect := size - 20; expect < size; expect++ { 323 | require.True(t, q.Dequeue(&e)) 324 | require.EqualValues(t, expect, common.Endianese.Uint32(e)) 325 | } 326 | require.False(t, q.Dequeue(&e)) 327 | 328 | _ = q.Close() 329 | } 330 | } 331 | 332 | func TestQueueExample(t *testing.T) { 333 | dataDir := filepath.Join(tmpDir, "pqueue_example") 334 | _ = os.RemoveAll(dataDir) 335 | err := os.MkdirAll(dataDir, 0o777) 336 | require.NoError(t, err) 337 | defer func() { 338 | _ = os.RemoveAll(dataDir) 339 | }() 340 | 341 | q, err := New(dataDir, 3) 342 | require.NoError(t, err) 343 | 344 | require.NoError(t, q.Enqueue([]byte{1, 2, 3})) 345 | require.NoError(t, q.Enqueue([]byte{4, 5, 6})) 346 | require.NoError(t, q.Enqueue([]byte{7, 8, 9, 10})) 347 | require.NoError(t, q.Enqueue([]byte{11})) 348 | 349 | // peek then dequeue 350 | var peek entry.Entry 351 | require.True(t, q.Peek(&peek)) 352 | require.EqualValues(t, []byte{1, 2, 3}, peek) 353 | require.True(t, q.Dequeue(&peek)) 354 | require.EqualValues(t, []byte{1, 2, 3}, peek) 355 | require.True(t, q.(*queue).peek == nil) 356 | 357 | // dequeue then peek 358 | require.True(t, q.Dequeue(&peek)) 359 | require.EqualValues(t, []byte{4, 5, 6}, peek) 360 | require.True(t, q.Peek(&peek)) 361 | require.EqualValues(t, []byte{7, 8, 9, 10}, peek) 362 | require.Equal(t, 2, q.(*queue).segments.Len()) 363 | 364 | // dequeue then peek again 365 | require.True(t, q.Dequeue(&peek)) 366 | require.EqualValues(t, []byte{7, 8, 9, 10}, peek) 367 | require.Equal(t, 2, q.(*queue).segments.Len()) // not remove yet 368 | 369 | require.True(t, q.Peek(&peek)) 370 | require.EqualValues(t, []byte{11}, peek) 371 | require.Equal(t, 1, q.(*queue).segments.Len()) // removed eof segment 372 | } 373 | 374 | func TestLoadOffsetFile(t *testing.T) { 375 | _, _, err := loadOffsetTracker("/") 376 | require.Error(t, err) 377 | } 378 | 379 | func TestQueueCorruptedWritingFile(t *testing.T) { 380 | dataDir := filepath.Join(tmpDir, "pqueue_hijack") 381 | _ = os.RemoveAll(dataDir) 382 | err := os.MkdirAll(dataDir, 0o777) 383 | require.NoError(t, err) 384 | defer func() { 385 | _ = os.RemoveAll(dataDir) 386 | }() 387 | 388 | q, err := New(dataDir, 3) 389 | require.NoError(t, err) 390 | 391 | require.NoError(t, q.Enqueue([]byte{1, 2, 3})) 392 | 393 | front := q.(*queue).segments.Front().Value.(*segment) 394 | f, err := os.OpenFile(front.path, os.O_RDWR, 0o644) 395 | require.NoError(t, err) 396 | _, err = f.Write([]byte{1, 2, 3, 4, 1, 1, 1, 1}) 397 | require.NoError(t, err) 398 | require.NoError(t, f.Close()) 399 | 400 | var e entry.Entry 401 | require.False(t, q.Dequeue(&e)) 402 | } 403 | 404 | func TestQueueReopen(t *testing.T) { 405 | dataDir := filepath.Join(tmpDir, "pqueue_reopen") 406 | _ = os.RemoveAll(dataDir) 407 | err := os.MkdirAll(dataDir, 0o777) 408 | require.NoError(t, err) 409 | defer func() { 410 | _ = os.RemoveAll(dataDir) 411 | }() 412 | 413 | q, err := New(dataDir, 3) 414 | require.NoError(t, err) 415 | 416 | require.NoError(t, q.Enqueue([]byte{1, 2, 3})) 417 | 418 | var e entry.Entry 419 | require.True(t, q.Dequeue(&e)) 420 | require.EqualValues(t, e, []byte{1, 2, 3}) 421 | 422 | err = q.Close() 423 | require.NoError(t, err) 424 | 425 | q, err = New(dataDir, 3) 426 | require.NoError(t, err) 427 | 428 | // only one value was enqueued and it was already dequeued, so there shouldn't be anything else 429 | require.False(t, q.Dequeue(&e)) 430 | q.Close() 431 | } 432 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------