├── docs ├── content │ ├── faq │ │ └── _index.md │ ├── contact │ │ ├── _index.md │ │ └── index.md │ ├── design │ │ ├── _index.md │ │ └── index.md │ ├── resources │ │ ├── _index.md │ │ └── index.md │ ├── get-started │ │ └── _index.md │ ├── projects-using-badger │ │ ├── _index.md │ │ └── index.md │ └── _index.md ├── .gitignore ├── themes │ ├── hugo-docs │ │ ├── layouts │ │ │ ├── _default │ │ │ │ ├── list.html │ │ │ │ ├── section.html │ │ │ │ └── article.html │ │ │ ├── .gitignore │ │ │ ├── shortcodes │ │ │ │ ├── version.html │ │ │ │ ├── load-img.html │ │ │ │ └── notice.html │ │ │ ├── partials │ │ │ │ ├── meta.html │ │ │ │ ├── suggest-edit.html │ │ │ │ ├── request-edit.html │ │ │ │ ├── topbar.html │ │ │ │ ├── sidebar.html │ │ │ │ ├── footer.html │ │ │ │ └── header.html │ │ │ ├── index.html │ │ │ └── 404.html │ │ ├── static │ │ │ ├── json │ │ │ │ └── search.json │ │ │ ├── images │ │ │ │ ├── badger.png │ │ │ │ ├── gopher-404.jpg │ │ │ │ ├── dgraph-black.png │ │ │ │ ├── diggy-shadow.png │ │ │ │ ├── favicons │ │ │ │ │ ├── favicon.ico │ │ │ │ │ ├── favicon-16x16.png │ │ │ │ │ ├── favicon-32x32.png │ │ │ │ │ ├── favicon-96x96.png │ │ │ │ │ ├── apple-touch-icon.png │ │ │ │ │ ├── favicon-194x194.png │ │ │ │ │ ├── android-chrome-36x36.png │ │ │ │ │ ├── android-chrome-48x48.png │ │ │ │ │ ├── android-chrome-72x72.png │ │ │ │ │ ├── android-chrome-96x96.png │ │ │ │ │ ├── android-chrome-144x144.png │ │ │ │ │ ├── android-chrome-192x192.png │ │ │ │ │ ├── apple-touch-icon-114x114.png │ │ │ │ │ ├── apple-touch-icon-120x120.png │ │ │ │ │ ├── apple-touch-icon-144x144.png │ │ │ │ │ ├── apple-touch-icon-152x152.png │ │ │ │ │ ├── apple-touch-icon-180x180.png │ │ │ │ │ ├── apple-touch-icon-57x57.png │ │ │ │ │ ├── apple-touch-icon-60x60.png │ │ │ │ │ ├── apple-touch-icon-72x72.png │ │ │ │ │ ├── apple-touch-icon-76x76.png │ │ │ │ │ ├── manifest.json │ │ │ │ │ └── safari-pinned-tab.svg │ │ │ │ └── Screenshot from 2020-07-07 19-14-26.png │ │ │ └── fonts │ │ │ │ ├── FontAwesome.otf │ │ │ │ ├── Inconsolata.eot │ │ │ │ ├── Inconsolata.ttf │ │ │ │ ├── Inconsolata.woff │ │ │ │ ├── Work_Sans_200.eot │ │ │ │ ├── Work_Sans_200.ttf │ │ │ │ ├── Work_Sans_200.woff │ │ │ │ ├── Work_Sans_300.eot │ │ │ │ ├── Work_Sans_300.ttf │ │ │ │ ├── Work_Sans_300.woff │ │ │ │ ├── Work_Sans_500.eot │ │ │ │ ├── Work_Sans_500.ttf │ │ │ │ ├── Work_Sans_500.woff │ │ │ │ ├── Work_Sans_200.woff2 │ │ │ │ ├── Work_Sans_300.woff2 │ │ │ │ ├── Work_Sans_500.woff2 │ │ │ │ ├── fontawesome-webfont.eot │ │ │ │ ├── fontawesome-webfont.ttf │ │ │ │ ├── fontawesome-webfont.woff │ │ │ │ ├── fontawesome-webfont.woff2 │ │ │ │ ├── Novecentosanswide-Normal-webfont.eot │ │ │ │ ├── Novecentosanswide-Normal-webfont.ttf │ │ │ │ ├── Novecentosanswide-Normal-webfont.woff │ │ │ │ ├── Novecentosanswide-Normal-webfont.woff2 │ │ │ │ ├── Novecentosanswide-UltraLight-webfont.eot │ │ │ │ ├── Novecentosanswide-UltraLight-webfont.ttf │ │ │ │ ├── Novecentosanswide-UltraLight-webfont.woff │ │ │ │ └── Novecentosanswide-UltraLight-webfont.woff2 │ │ ├── images │ │ │ ├── tn.png │ │ │ └── screenshot.png │ │ ├── archetypes │ │ │ └── default.md │ │ ├── theme.toml │ │ └── LICENSE.md │ └── .DS_Store ├── static │ └── images │ │ └── diggy-shadow.png ├── archetypes │ └── default.md ├── README.md ├── config.toml └── scripts │ ├── local.sh │ └── build.sh ├── badger ├── .gitignore ├── cmd │ ├── bench.go │ ├── root.go │ ├── flatten.go │ ├── rotate.go │ ├── backup.go │ ├── restore.go │ ├── stream.go │ └── rotate_test.go └── main.go ├── .gitignore ├── integration └── testgc │ └── .gitignore ├── images ├── diggy-shadow.png └── benchmarks-rocksdb.png ├── CODE_OF_CONDUCT.md ├── .github ├── ISSUE_TEMPLATE ├── CODEOWNERS ├── workflows │ └── main.yml └── stale.yml ├── pb ├── gen.sh └── badgerpb2.proto ├── .deepsource.toml ├── fb ├── gen.sh ├── flatbuffer.fbs ├── install_flatbuffers.sh ├── BlockOffset.go └── TableIndex.go ├── .golangci.yml ├── changes.sh ├── y ├── file_nodsync.go ├── file_dsync.go ├── event_log.go ├── zstd_nocgo.go ├── zstd_cgo.go ├── checksum.go ├── encrypt.go ├── iterator.go ├── error.go ├── metrics.go ├── bloom_test.go └── bloom.go ├── go.mod ├── test.sh ├── doc.go ├── appveyor.yml ├── discard_test.go ├── logger_test.go ├── trie ├── trie_test.go └── trie.go ├── options └── options.go ├── VERSIONING.md ├── .travis.yml ├── logger.go ├── publisher_test.go ├── CONTRIBUTING.md ├── managed_db.go ├── util.go ├── histogram_test.go ├── table └── README.md ├── dir_unix.go ├── dir_windows.go ├── publisher.go ├── skl ├── arena.go └── README.md ├── discard.go ├── batch_test.go ├── merge_test.go ├── key_registry_test.go └── dir_plan9.go /docs/content/faq/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /badger/.gitignore: -------------------------------------------------------------------------------- 1 | /badger 2 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | /public 2 | 3 | -------------------------------------------------------------------------------- /docs/content/contact/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/content/design/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/content/resources/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | p/ 2 | badger-test*/ 3 | -------------------------------------------------------------------------------- /docs/content/get-started/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /integration/testgc/.gitignore: -------------------------------------------------------------------------------- 1 | /testgc 2 | -------------------------------------------------------------------------------- /docs/content/projects-using-badger/_index.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/list.html: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/json/search.json: -------------------------------------------------------------------------------- 1 | [] 2 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/.gitignore: -------------------------------------------------------------------------------- 1 | /hugo-docs 2 | 3 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/version.html: -------------------------------------------------------------------------------- 1 | {{ getenv "CURRENT_VERSION" }} -------------------------------------------------------------------------------- /docs/themes/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/.DS_Store -------------------------------------------------------------------------------- /images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/images/diggy-shadow.png -------------------------------------------------------------------------------- /images/benchmarks-rocksdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/images/benchmarks-rocksdb.png -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Our Code of Conduct can be found here: 4 | 5 | https://dgraph.io/conduct 6 | -------------------------------------------------------------------------------- /docs/static/images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/static/images/diggy-shadow.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/images/tn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/images/tn.png -------------------------------------------------------------------------------- /docs/archetypes/default.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "{{ replace .Name "-" " " | title }}" 3 | date: {{ .Date }} 4 | draft: true 5 | --- 6 | 7 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/images/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/images/screenshot.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/badger.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/badger.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Inconsolata.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/gopher-404.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/gopher-404.jpg -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Inconsolata.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_200.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_200.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_300.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_300.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_500.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_500.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/dgraph-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/dgraph-black.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/diggy-shadow.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/favicon.ico -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE: -------------------------------------------------------------------------------- 1 | **GitHub Issues are deprecated. Use [Discuss Issues](https://discuss.dgraph.io/c/issues/badger/37) for reporting issues about this repository.** 2 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/load-img.html: -------------------------------------------------------------------------------- 1 | {{ $url := .Get 0}} 2 | {{ $alt := .Get 1}} 3 | 4 | {{ $alt }} 5 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/favicon-16x16.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/favicon-32x32.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/favicon-96x96.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-194x194.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/favicon-194x194.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/archetypes/default.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Some Title" 3 | weight: 5 4 | prev: /prev/path 5 | next: /next/path 6 | toc: true 7 | --- 8 | 9 | Lorem Ipsum 10 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/meta.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-36x36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-36x36.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-48x48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-48x48.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-72x72.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-96x96.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-144x144.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/android-chrome-192x192.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-114x114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-114x114.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-120x120.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-144x144.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-152x152.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-180x180.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-57x57.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-57x57.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-60x60.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-72x72.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-76x76.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/Screenshot from 2020-07-07 19-14-26.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/images/Screenshot from 2020-07-07 19-14-26.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/badger/master/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/section.html: -------------------------------------------------------------------------------- 1 | {{ partial "header.html" . }} 2 | 3 | {{ range .Data.Pages.ByWeight }} 4 | {{ .Render "article" }} 5 | {{ end }} 6 | 7 | {{ partial "footer.html" . }} 8 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS info: https://help.github.com/en/articles/about-code-owners 2 | # Owners are automatically requested for review for PRs that changes code 3 | # that they own. 4 | * @manishrjain @ashish-goswami @jarifibrahim 5 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/notice.html: -------------------------------------------------------------------------------- 1 | {{ $type := .Get 0}} 2 | 3 |
4 | {{ humanize $type }} {{ .Inner | markdownify }} 5 |
6 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/article.html: -------------------------------------------------------------------------------- 1 | 2 |
3 | {{ partial "request-edit.html" . }} 4 | {{ partial "suggest-edit.html" . }} 5 | 6 |

{{ .Title }}

7 | 8 |
{{ .Content }}
9 |
10 | -------------------------------------------------------------------------------- /pb/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run this script from its directory, so that badgerpb2.proto is where it's expected to 4 | # be. 5 | 6 | # You might need to go get -v github.com/gogo/protobuf/... 7 | 8 | protoc --gofast_out=plugins=grpc:. --gofast_opt=paths=source_relative -I=. badgerpb2.proto 9 | -------------------------------------------------------------------------------- /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | test_patterns = [ 4 | 'integration/testgc/**', 5 | '**/*_test.go' 6 | ] 7 | 8 | exclude_patterns = [ 9 | 10 | ] 11 | 12 | [[analyzers]] 13 | name = 'go' 14 | enabled = true 15 | 16 | 17 | [analyzers.meta] 18 | import_path = 'github.com/dgraph-io/badger' 19 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/suggest-edit.html: -------------------------------------------------------------------------------- 1 | {{ $currentBranch := getenv "CURRENT_BRANCH" }} 2 | 3 | 5 | Edit Page 6 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/index.html: -------------------------------------------------------------------------------- 1 | {{ partial "header.html" . }} 2 | 3 |
4 | {{ partial "request-edit.html" . }} 5 | {{ partial "suggest-edit.html" . }} 6 | 7 |

{{ .Title }}

8 | 9 |
{{ .Content }}
10 |
11 | 12 | 13 | {{ partial "footer.html" . }} 14 | -------------------------------------------------------------------------------- /fb/gen.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ## Install flatc if not present 6 | ## ref. https://google.github.io/flatbuffers/flatbuffers_guide_building.html 7 | command -v flatc > /dev/null || { ./install_flatbuffers.sh ; } 8 | 9 | flatc --go flatbuffer.fbs 10 | # Move files to the correct directory. 11 | mv fb/* ./ 12 | rmdir fb 13 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/request-edit.html: -------------------------------------------------------------------------------- 1 | {{ $currentBranch := getenv "CURRENT_BRANCH" }} 2 | 3 | 7 | Report Issue 8 | 9 | -------------------------------------------------------------------------------- /docs/content/contact/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Contact" 3 | aliases = ["/contact"] 4 | +++ 5 | 6 | - Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. 7 | - Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. 8 | - Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). 9 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | tests: false 3 | 4 | linters-settings: 5 | lll: 6 | line-length: 100 7 | 8 | linters: 9 | disable-all: true 10 | enable: 11 | - errcheck 12 | - ineffassign 13 | - gas 14 | - gofmt 15 | - golint 16 | - gosimple 17 | - govet 18 | - lll 19 | - varcheck 20 | - unused 21 | 22 | issues: 23 | exclude-rules: 24 | - linters: 25 | - gosec 26 | text: "G404: " 27 | -------------------------------------------------------------------------------- /changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | GHORG=${GHORG:-dgraph-io} 5 | GHREPO=${GHREPO:-badger} 6 | cat < /home/mrjn/go/src/github.com/dgraph-io/ristretto 6 | 7 | require ( 8 | github.com/DataDog/zstd v1.4.1 9 | github.com/cespare/xxhash v1.1.0 10 | github.com/dgraph-io/ristretto v0.0.4-0.20201012160933-079c5f0d0daf 11 | github.com/dustin/go-humanize v1.0.0 12 | github.com/golang/protobuf v1.3.1 13 | github.com/golang/snappy v0.0.1 14 | github.com/google/flatbuffers v1.12.0 15 | github.com/kr/pretty v0.1.0 // indirect 16 | github.com/pkg/errors v0.9.1 17 | github.com/spaolacci/murmur3 v1.1.0 // indirect 18 | github.com/spf13/cobra v0.0.5 19 | github.com/stretchr/testify v1.4.0 20 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859 21 | golang.org/x/sys v0.0.0-20200918174421-af09f7315aff 22 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 23 | ) 24 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: Issue Closer 4 | 5 | # Controls when the action will run. Triggers the workflow on push or pull request 6 | # events but only for the master branch 7 | on: 8 | issues: 9 | types: [ opened, reopened ] 10 | 11 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 12 | jobs: 13 | # This workflow contains a single job called "build" 14 | build: 15 | # The type of runner that the job will run on 16 | runs-on: ubuntu-latest 17 | 18 | # Steps represent a sequence of tasks that will be executed as part of the job 19 | steps: 20 | - name: Close Issue 21 | uses: peter-evans/close-issue@v1.0.1 22 | with: 23 | comment: | 24 | **Use [Discuss Issues](https://discuss.dgraph.io/c/issues/badger/37) for reporting issues about this repository.** 25 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 30 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - skip/stale 8 | - status/accepted 9 | # Label to use when marking an issue as stale 10 | staleLabel: status/stale 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: > 18 | This issue was marked as stale and no activity has occurred since then, 19 | therefore it will now be closed. Please, reopen if the issue is still 20 | relevant. 21 | -------------------------------------------------------------------------------- /docs/config.toml: -------------------------------------------------------------------------------- 1 | languageCode = "en-us" 2 | theme = "hugo-docs" 3 | canonifyURLs = true 4 | 5 | [markup.goldmark.renderer] 6 | unsafe = true 7 | 8 | [markup.highlight] 9 | noClasses = false 10 | [[menu.main]] 11 | name = "Home" 12 | url = "/" 13 | identifier = "home" 14 | weight = -1 15 | 16 | [[menu.main]] 17 | name = "Getting Started" 18 | url = "/get-started/" 19 | identifier = "get-started" 20 | weight = 1 21 | [[menu.main]] 22 | name = "Resources" 23 | url = "/resources/" 24 | identifier = "resources" 25 | weight = 2 26 | 27 | [[menu.main]] 28 | name = "Design" 29 | url = "/design/" 30 | identifier = "design" 31 | weight = 3 32 | 33 | [[menu.main]] 34 | name = "Projects using Badger" 35 | url = "/projects-using-badger/" 36 | identifier = "project-using-badger" 37 | weight = 4 38 | 39 | [[menu.main]] 40 | name = "Frequently Asked Questions" 41 | url = "/faq/" 42 | identifier = "faq" 43 | weight = 5 44 | -------------------------------------------------------------------------------- /docs/content/resources/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Resources" 3 | aliases = ["/resouces"] 4 | +++ 5 | 6 | 7 | ## Blog Posts 8 | 1. [Introducing Badger: A fast key-value store written natively in 9 | Go](https://open.dgraph.io/post/badger/) 10 | 2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) 11 | 3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) 12 | 4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) 13 | 14 | ## Contact 15 | - Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. 16 | - Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. 17 | - Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). 18 | 19 | ## Contributing 20 | 21 | If you're interested in contributing to Badger see [CONTRIBUTING.md](https://github.com/dgraph-io/badger/blob/master/CONTRIBUTING.md). -------------------------------------------------------------------------------- /y/event_log.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import "golang.org/x/net/trace" 20 | 21 | var ( 22 | NoEventLog trace.EventLog = nilEventLog{} 23 | ) 24 | 25 | type nilEventLog struct{} 26 | 27 | func (nel nilEventLog) Printf(format string, a ...interface{}) {} 28 | 29 | func (nel nilEventLog) Errorf(format string, a ...interface{}) {} 30 | 31 | func (nel nilEventLog) Finish() {} 32 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Dgraph", 3 | "icons": [ 4 | { 5 | "src": "\/images\/favicons\/android-chrome-36x36.png", 6 | "sizes": "36x36", 7 | "type": "image\/png", 8 | "density": 0.75 9 | }, 10 | { 11 | "src": "\/images\/favicons\/android-chrome-48x48.png", 12 | "sizes": "48x48", 13 | "type": "image\/png", 14 | "density": 1 15 | }, 16 | { 17 | "src": "\/images\/favicons\/android-chrome-72x72.png", 18 | "sizes": "72x72", 19 | "type": "image\/png", 20 | "density": 1.5 21 | }, 22 | { 23 | "src": "\/images\/favicons\/android-chrome-96x96.png", 24 | "sizes": "96x96", 25 | "type": "image\/png", 26 | "density": 2 27 | }, 28 | { 29 | "src": "\/images\/favicons\/android-chrome-144x144.png", 30 | "sizes": "144x144", 31 | "type": "image\/png", 32 | "density": 3 33 | }, 34 | { 35 | "src": "\/images\/favicons\/android-chrome-192x192.png", 36 | "sizes": "192x192", 37 | "type": "image\/png", 38 | "density": 4 39 | } 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /fb/flatbuffer.fbs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | namespace fb; 18 | 19 | table TableIndex { 20 | offsets:[BlockOffset]; 21 | bloom_filter:[ubyte]; 22 | estimated_size:uint32; 23 | max_version:uint64; 24 | uncompressed_size:uint32; 25 | key_count:uint32; 26 | } 27 | 28 | table BlockOffset { 29 | key:[ubyte]; 30 | offset:uint; 31 | len:uint; 32 | } 33 | 34 | root_type TableIndex; 35 | root_type BlockOffset; 36 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | go version 6 | 7 | packages=$(go list ./... | grep github.com/dgraph-io/badger/v2/) 8 | 9 | if [[ ! -z "$TEAMCITY_VERSION" ]]; then 10 | export GOFLAGS="-json" 11 | fi 12 | 13 | # Ensure that we can compile the binary. 14 | pushd badger 15 | go build -v . 16 | popd 17 | 18 | # Run the memory intensive tests first. 19 | go test -v -run='TestBigKeyValuePairs$' --manual=true 20 | go test -v -run='TestPushValueLogLimit' --manual=true 21 | 22 | # Run the special Truncate test. 23 | rm -rf p 24 | go test -v -run='TestTruncateVlogNoClose$' --manual=true 25 | truncate --size=4096 p/000000.vlog 26 | go test -v -run='TestTruncateVlogNoClose2$' --manual=true 27 | go test -v -run='TestTruncateVlogNoClose3$' --manual=true 28 | rm -rf p 29 | 30 | # Run the normal tests. 31 | echo "==> Starting tests.. " 32 | # go test -timeout=25m -v -race github.com/dgraph-io/badger/v2/... 33 | for pkg in $packages; do 34 | echo "===> Testing $pkg" 35 | go test -timeout=25m -v -race $pkg 36 | done 37 | 38 | echo "===> Testing root level" 39 | go test -timeout=25m -v . -race 40 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/topbar.html: -------------------------------------------------------------------------------- 1 | 32 | -------------------------------------------------------------------------------- /badger/cmd/bench.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "github.com/spf13/cobra" 21 | ) 22 | 23 | var benchCmd = &cobra.Command{ 24 | Use: "benchmark", 25 | Short: "Benchmark Badger database.", 26 | Long: `This command will benchmark Badger for different usecases. Currently only read benchmark 27 | is supported. Useful for testing and performance analysis.`, 28 | } 29 | 30 | func init() { 31 | RootCmd.AddCommand(benchCmd) 32 | } 33 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Grav 4 | Copyright (c) 2016 MATHIEU CORNIC 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | this software and associated documentation files (the "Software"), to deal in 8 | the Software without restriction, including without limitation the rights to 9 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 10 | the Software, and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 18 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 19 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /badger/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "fmt" 21 | "net/http" 22 | _ "net/http/pprof" 23 | "runtime" 24 | 25 | "github.com/dgraph-io/badger/v2/badger/cmd" 26 | ) 27 | 28 | func main() { 29 | go func() { 30 | for i := 8080; i < 9080; i++ { 31 | fmt.Printf("Listening for /debug HTTP requests at port: %d\n", i) 32 | if err := http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", i), nil); err != nil { 33 | fmt.Println("Port busy. Trying another one...") 34 | continue 35 | 36 | } 37 | } 38 | }() 39 | runtime.SetBlockProfileRate(100) 40 | runtime.GOMAXPROCS(128) 41 | cmd.Execute() 42 | } 43 | -------------------------------------------------------------------------------- /fb/install_flatbuffers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | install_mac() { 6 | command -v brew > /dev/null || \ 7 | { echo "[ERROR]: 'brew' command not not found. Exiting" 1>&2; exit 1; } 8 | brew install flatbuffers 9 | } 10 | 11 | install_linux() { 12 | for CMD in curl cmake g++ make; do 13 | command -v $CMD > /dev/null || \ 14 | { echo "[ERROR]: '$CMD' command not not found. Exiting" 1>&2; exit 1; } 15 | done 16 | 17 | ## Create Temp Build Directory 18 | BUILD_DIR=$(mktemp -d) 19 | pushd $BUILD_DIR 20 | 21 | ## Fetch Latest Tarball 22 | LATEST_VERSION=$(curl -s https://api.github.com/repos/google/flatbuffers/releases/latest | grep -oP '(?<=tag_name": ")[^"]+') 23 | curl -sLO https://github.com/google/flatbuffers/archive/$LATEST_VERSION.tar.gz 24 | tar xf $LATEST_VERSION.tar.gz 25 | 26 | ## Build Binaries 27 | cd flatbuffers-${LATEST_VERSION#v} 28 | cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release 29 | make 30 | ./flattests 31 | cp flatc /usr/local/bin/flatc 32 | 33 | ## Cleanup Temp Build Directory 34 | popd 35 | rm -rf $BUILD_DIR 36 | } 37 | 38 | SYSTEM=$(uname -s) 39 | 40 | case ${SYSTEM,,} in 41 | linux) 42 | sudo bash -c "$(declare -f install_linux); install_linux" 43 | ;; 44 | darwin) 45 | install_mac 46 | ;; 47 | esac 48 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package badger implements an embeddable, simple and fast key-value database, 3 | written in pure Go. It is designed to be highly performant for both reads and 4 | writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and 5 | supports transactions. It runs transactions concurrently, with serializable 6 | snapshot isolation guarantees. 7 | 8 | Badger uses an LSM tree along with a value log to separate keys from values, 9 | hence reducing both write amplification and the size of the LSM tree. This 10 | allows LSM tree to be served entirely from RAM, while the values are served 11 | from SSD. 12 | 13 | 14 | Usage 15 | 16 | Badger has the following main types: DB, Txn, Item and Iterator. DB contains 17 | keys that are associated with values. It must be opened with the appropriate 18 | options before it can be accessed. 19 | 20 | All operations happen inside a Txn. Txn represents a transaction, which can 21 | be read-only or read-write. Read-only transactions can read values for a 22 | given key (which are returned inside an Item), or iterate over a set of 23 | key-value pairs using an Iterator (which are returned as Item type values as 24 | well). Read-write transactions can also update and delete keys from the DB. 25 | 26 | See the examples for more usage details. 27 | */ 28 | package badger 29 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/sidebar.html: -------------------------------------------------------------------------------- 1 | 42 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 24 | 25 | 26 | 29 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /y/zstd_nocgo.go: -------------------------------------------------------------------------------- 1 | // +build !cgo 2 | 3 | /* 4 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package y 20 | 21 | // CgoEnabled is used to check if CGO is enabled while building badger. 22 | const CgoEnabled = false 23 | 24 | // ZSTDDecompress decompresses a block using ZSTD algorithm. 25 | func ZSTDDecompress(dst, src []byte) ([]byte, error) { 26 | return nil, ErrZstdCgo 27 | } 28 | 29 | // ZSTDCompress compresses a block using ZSTD algorithm. 30 | func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error) { 31 | return nil, ErrZstdCgo 32 | } 33 | 34 | // ZSTDCompressBound returns the worst case size needed for a destination buffer. 35 | func ZSTDCompressBound(srcSize int) int { 36 | panic("ZSTD only supported in Cgo.") 37 | } 38 | -------------------------------------------------------------------------------- /y/zstd_cgo.go: -------------------------------------------------------------------------------- 1 | // +build cgo 2 | 3 | /* 4 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package y 20 | 21 | import ( 22 | "github.com/DataDog/zstd" 23 | ) 24 | 25 | // CgoEnabled is used to check if CGO is enabled while building badger. 26 | const CgoEnabled = true 27 | 28 | // ZSTDDecompress decompresses a block using ZSTD algorithm. 29 | func ZSTDDecompress(dst, src []byte) ([]byte, error) { 30 | return zstd.Decompress(dst, src) 31 | } 32 | 33 | // ZSTDCompress compresses a block using ZSTD algorithm. 34 | func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error) { 35 | return zstd.CompressLevel(dst, src, compressionLevel) 36 | } 37 | 38 | // ZSTDCompressBound returns the worst case size needed for a destination buffer. 39 | func ZSTDCompressBound(srcSize int) int { 40 | return zstd.CompressBound(srcSize) 41 | } 42 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # version format 2 | version: "{build}" 3 | 4 | # Operating system (build VM template) 5 | os: Windows Server 2012 R2 6 | 7 | # Platform. 8 | platform: x64 9 | 10 | clone_folder: c:\gopath\src\github.com\dgraph-io\badger 11 | 12 | # Environment variables 13 | environment: 14 | GOVERSION: 1.12 15 | GOPATH: c:\gopath 16 | GO111MODULE: on 17 | 18 | # scripts that run after cloning repository 19 | install: 20 | - set PATH=%GOPATH%\bin;c:\go\bin;c:\msys64\mingw64\bin;%PATH% 21 | - go version 22 | - go env 23 | - python --version 24 | - gcc --version 25 | 26 | # To run your custom scripts instead of automatic MSBuild 27 | build_script: 28 | # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 29 | - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' 30 | - cd c:\gopath\src\github.com\dgraph-io\badger 31 | - git branch 32 | - go get -t ./... 33 | 34 | # To run your custom scripts instead of automatic tests 35 | test_script: 36 | # Unit tests 37 | - ps: Add-AppveyorTest "Unit Tests" -Outcome Running 38 | - go test -v github.com/dgraph-io/badger/... 39 | - go test -v -vlog_mmap=false github.com/dgraph-io/badger/... 40 | - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed 41 | 42 | notifications: 43 | - provider: Email 44 | to: 45 | - pawan@dgraph.io 46 | on_build_failure: true 47 | on_build_status_changed: true 48 | # to disable deployment 49 | deploy: off 50 | 51 | -------------------------------------------------------------------------------- /discard_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "io/ioutil" 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | func TestDiscardStats(t *testing.T) { 27 | dir, err := ioutil.TempDir("", "badger-test") 28 | require.NoError(t, err) 29 | defer removeDir(dir) 30 | 31 | opt := DefaultOptions(dir) 32 | ds, err := initDiscardStats(opt) 33 | require.NoError(t, err) 34 | for i := uint32(0); i < 20; i++ { 35 | require.Equal(t, int64(i*100), ds.Update(i, int64(i*100))) 36 | } 37 | ds.iterate(func(id, val uint64) { 38 | require.Equal(t, id*100, val) 39 | }) 40 | for i := uint32(0); i < 10; i++ { 41 | require.Equal(t, 0, int(ds.Update(i, -1))) 42 | } 43 | ds.iterate(func(id, val uint64) { 44 | if id < 10 { 45 | require.Zero(t, val) 46 | return 47 | } 48 | require.Equal(t, int(id*100), int(val)) 49 | }) 50 | } 51 | -------------------------------------------------------------------------------- /docs/scripts/local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | GREEN='\033[32;1m' 6 | RESET='\033[0m' 7 | 8 | VERSIONS_ARRAY=( 9 | 'preview' 10 | ) 11 | 12 | joinVersions() { 13 | versions=$(printf ",%s" "${VERSIONS_ARRAY[@]}") 14 | echo "${versions:1}" 15 | } 16 | 17 | VERSION_STRING=$(joinVersions) 18 | 19 | run() { 20 | export CURRENT_BRANCH="master" 21 | export CURRENT_VERSION=${VERSIONS_ARRAY[0]} 22 | export VERSIONS=${VERSION_STRING} 23 | export DGRAPH_ENDPOINT=${DGRAPH_ENDPOINT:-"https://play.dgraph.io/query?latency=true"} 24 | 25 | 26 | export HUGO_TITLE="Badger Doc - Preview" \ 27 | export VERSIONS=${VERSION_STRING} \ 28 | export CURRENT_BRANCH="master" \ 29 | export CURRENT_VERSION=${CURRENT_VERSION} 30 | 31 | pushd "$(dirname "$0")/.." > /dev/null 32 | pushd themes > /dev/null 33 | 34 | if [ ! -d "hugo-docs" ]; then 35 | echo -e "$(date) $GREEN Hugo-docs repository not found. Cloning the repo. $RESET" 36 | git clone https://github.com/dgraph-io/hugo-docs.git 37 | else 38 | echo -e "$(date) $GREEN Hugo-docs repository found. Pulling the latest version from master. $RESET" 39 | pushd hugo-docs > /dev/null 40 | git pull 41 | popd > /dev/null 42 | fi 43 | popd > /dev/null 44 | 45 | if [[ $1 == "-p" || $1 == "--preview" ]]; then 46 | echo -e "$(date) $GREEN Generating documentation static pages in the public folder. $RESET" 47 | hugo --destination=public --baseURL="$2" 1> /dev/null 48 | echo -e "$(date) $GREEN Done building. $RESET" 49 | else 50 | hugo server -w --baseURL=http://localhost:1313 51 | fi 52 | popd > /dev/null 53 | } 54 | 55 | run "$1" "$2" -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {{ partial "meta.html" . }} 6 | {{ .Title }} 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 |
37 |
38 |
39 |

Error

40 |

41 |

42 |

Woops. Looks like this page doesn't exist.

43 |

44 |

Go to homepage

45 |

46 |
47 |
48 | 49 |
50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /y/checksum.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "hash/crc32" 21 | 22 | "github.com/dgraph-io/badger/v2/pb" 23 | 24 | "github.com/cespare/xxhash" 25 | "github.com/pkg/errors" 26 | ) 27 | 28 | // ErrChecksumMismatch is returned at checksum mismatch. 29 | var ErrChecksumMismatch = errors.New("checksum mismatch") 30 | 31 | // CalculateChecksum calculates checksum for data using ct checksum type. 32 | func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64 { 33 | switch ct { 34 | case pb.Checksum_CRC32C: 35 | return uint64(crc32.Checksum(data, CastagnoliCrcTable)) 36 | case pb.Checksum_XXHash64: 37 | return xxhash.Sum64(data) 38 | default: 39 | panic("checksum type not supported") 40 | } 41 | } 42 | 43 | // VerifyChecksum validates the checksum for the data against the given expected checksum. 44 | func VerifyChecksum(data []byte, expected *pb.Checksum) error { 45 | actual := CalculateChecksum(data, expected.Algo) 46 | if actual != expected.Sum { 47 | return Wrapf(ErrChecksumMismatch, "actual: %d, expected: %d", actual, expected.Sum) 48 | } 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /y/encrypt.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "bytes" 21 | "crypto/aes" 22 | "crypto/cipher" 23 | "crypto/rand" 24 | "io" 25 | ) 26 | 27 | // XORBlock encrypts the given data with AES and XOR's with IV. 28 | // Can be used for both encryption and decryption. IV is of 29 | // AES block size. 30 | func XORBlock(dst, src, key, iv []byte) error { 31 | block, err := aes.NewCipher(key) 32 | if err != nil { 33 | return err 34 | } 35 | stream := cipher.NewCTR(block, iv) 36 | stream.XORKeyStream(dst, src) 37 | return nil 38 | } 39 | 40 | func XORBlockAllocate(src, key, iv []byte) ([]byte, error) { 41 | block, err := aes.NewCipher(key) 42 | if err != nil { 43 | return nil, err 44 | } 45 | stream := cipher.NewCTR(block, iv) 46 | dst := make([]byte, len(src)) 47 | stream.XORKeyStream(dst, src) 48 | return dst, nil 49 | } 50 | 51 | func XORBlockStream(w io.Writer, src, key, iv []byte) error { 52 | block, err := aes.NewCipher(key) 53 | if err != nil { 54 | return err 55 | } 56 | stream := cipher.NewCTR(block, iv) 57 | sw := cipher.StreamWriter{S: stream, W: w} 58 | _, err = io.Copy(sw, bytes.NewReader(src)) 59 | return Wrapf(err, "XORBlockStream") 60 | } 61 | 62 | // GenerateIV generates IV. 63 | func GenerateIV() ([]byte, error) { 64 | iv := make([]byte, aes.BlockSize) 65 | _, err := rand.Read(iv) 66 | return iv, err 67 | } 68 | -------------------------------------------------------------------------------- /badger/cmd/root.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "os" 23 | "strings" 24 | 25 | "github.com/spf13/cobra" 26 | ) 27 | 28 | var sstDir, vlogDir string 29 | 30 | // RootCmd represents the base command when called without any subcommands 31 | var RootCmd = &cobra.Command{ 32 | Use: "badger", 33 | Short: "Tools to manage Badger database.", 34 | PersistentPreRunE: validateRootCmdArgs, 35 | } 36 | 37 | // Execute adds all child commands to the root command and sets flags appropriately. 38 | // This is called by main.main(). It only needs to happen once to the rootCmd. 39 | func Execute() { 40 | if err := RootCmd.Execute(); err != nil { 41 | fmt.Println(err) 42 | os.Exit(1) 43 | } 44 | } 45 | 46 | func init() { 47 | RootCmd.PersistentFlags().StringVar(&sstDir, "dir", "", 48 | "Directory where the LSM tree files are located. (required)") 49 | 50 | RootCmd.PersistentFlags().StringVar(&vlogDir, "vlog-dir", "", 51 | "Directory where the value log files are located, if different from --dir") 52 | } 53 | 54 | func validateRootCmdArgs(cmd *cobra.Command, args []string) error { 55 | if strings.HasPrefix(cmd.Use, "help ") { // No need to validate if it is help 56 | return nil 57 | } 58 | if sstDir == "" { 59 | return errors.New("--dir not specified") 60 | } 61 | if vlogDir == "" { 62 | vlogDir = sstDir 63 | } 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /badger/cmd/flatten.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "fmt" 21 | "math" 22 | 23 | "github.com/dgraph-io/badger/v2" 24 | "github.com/spf13/cobra" 25 | ) 26 | 27 | var flattenCmd = &cobra.Command{ 28 | Use: "flatten", 29 | Short: "Flatten the LSM tree.", 30 | Long: ` 31 | This command would compact all the LSM tables into one level. 32 | `, 33 | RunE: flatten, 34 | } 35 | 36 | var numWorkers int 37 | 38 | func init() { 39 | RootCmd.AddCommand(flattenCmd) 40 | flattenCmd.Flags().IntVarP(&numWorkers, "num-workers", "w", 1, 41 | "Number of concurrent compactors to run. More compactors would use more"+ 42 | " server resources to potentially achieve faster compactions.") 43 | flattenCmd.Flags().IntVarP(&numVersions, "num_versions", "", 1, 44 | "Option to configure the maximum number of versions per key. "+ 45 | "Values <= 0 will be considered to have the max number of versions.") 46 | } 47 | 48 | func flatten(cmd *cobra.Command, args []string) error { 49 | if numVersions <= 0 { 50 | // Keep all versions. 51 | numVersions = math.MaxInt32 52 | } 53 | opt := badger.DefaultOptions(sstDir). 54 | WithValueDir(vlogDir). 55 | WithNumVersionsToKeep(numVersions). 56 | WithNumCompactors(0) 57 | fmt.Printf("Opening badger with options = %+v\n", opt) 58 | db, err := badger.Open(opt) 59 | if err != nil { 60 | return err 61 | } 62 | defer db.Close() 63 | 64 | return db.Flatten(numWorkers) 65 | } 66 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by potrace 1.11, written by Peter Selinger 2001-2013 9 | 10 | 12 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /logger_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | type mockLogger struct { 27 | output string 28 | } 29 | 30 | func (l *mockLogger) Errorf(f string, v ...interface{}) { 31 | l.output = fmt.Sprintf("ERROR: "+f, v...) 32 | } 33 | 34 | func (l *mockLogger) Infof(f string, v ...interface{}) { 35 | l.output = fmt.Sprintf("INFO: "+f, v...) 36 | } 37 | 38 | func (l *mockLogger) Warningf(f string, v ...interface{}) { 39 | l.output = fmt.Sprintf("WARNING: "+f, v...) 40 | } 41 | 42 | func (l *mockLogger) Debugf(f string, v ...interface{}) { 43 | l.output = fmt.Sprintf("DEBUG: "+f, v...) 44 | } 45 | 46 | // Test that the DB-specific log is used instead of the global log. 47 | func TestDbLog(t *testing.T) { 48 | l := &mockLogger{} 49 | opt := Options{Logger: l} 50 | 51 | opt.Errorf("test") 52 | require.Equal(t, "ERROR: test", l.output) 53 | opt.Infof("test") 54 | require.Equal(t, "INFO: test", l.output) 55 | opt.Warningf("test") 56 | require.Equal(t, "WARNING: test", l.output) 57 | } 58 | 59 | // Test that the global logger is used when no logger is specified in Options. 60 | func TestNoDbLog(t *testing.T) { 61 | l := &mockLogger{} 62 | opt := Options{} 63 | opt.Logger = l 64 | 65 | opt.Errorf("test") 66 | require.Equal(t, "ERROR: test", l.output) 67 | opt.Infof("test") 68 | require.Equal(t, "INFO: test", l.output) 69 | opt.Warningf("test") 70 | require.Equal(t, "WARNING: test", l.output) 71 | } 72 | -------------------------------------------------------------------------------- /trie/trie_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package trie 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/stretchr/testify/require" 23 | ) 24 | 25 | func TestGet(t *testing.T) { 26 | trie := NewTrie() 27 | trie.Add([]byte("hello"), 1) 28 | trie.Add([]byte("hello"), 3) 29 | trie.Add([]byte("hello"), 4) 30 | trie.Add([]byte("hel"), 20) 31 | trie.Add([]byte("he"), 20) 32 | trie.Add([]byte("badger"), 30) 33 | 34 | trie.Add(nil, 10) 35 | require.Equal(t, map[uint64]struct{}{10: {}}, trie.Get([]byte("A"))) 36 | 37 | ids := trie.Get([]byte("hel")) 38 | require.Equal(t, 2, len(ids)) 39 | require.Equal(t, map[uint64]struct{}{10: {}, 20: {}}, ids) 40 | 41 | ids = trie.Get([]byte("badger")) 42 | require.Equal(t, 2, len(ids)) 43 | require.Equal(t, map[uint64]struct{}{10: {}, 30: {}}, ids) 44 | 45 | ids = trie.Get([]byte("hello")) 46 | require.Equal(t, 5, len(ids)) 47 | require.Equal(t, map[uint64]struct{}{10: {}, 1: {}, 3: {}, 4: {}, 20: {}}, ids) 48 | 49 | trie.Add([]byte{}, 11) 50 | require.Equal(t, map[uint64]struct{}{10: {}, 11: {}}, trie.Get([]byte("A"))) 51 | 52 | } 53 | 54 | func TestTrieDelete(t *testing.T) { 55 | trie := NewTrie() 56 | trie.Add([]byte("hello"), 1) 57 | trie.Add([]byte("hello"), 3) 58 | trie.Add([]byte("hello"), 4) 59 | trie.Add(nil, 5) 60 | 61 | trie.Delete([]byte("hello"), 4) 62 | 63 | require.Equal(t, map[uint64]struct{}{5: {}, 1: {}, 3: {}}, trie.Get([]byte("hello"))) 64 | 65 | trie.Delete(nil, 5) 66 | require.Equal(t, map[uint64]struct{}{1: {}, 3: {}}, trie.Get([]byte("hello"))) 67 | } 68 | -------------------------------------------------------------------------------- /options/options.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package options 18 | 19 | // FileLoadingMode specifies how data in LSM table files and value log files should 20 | // be loaded. 21 | type FileLoadingMode int 22 | 23 | const ( 24 | // FileIO indicates that files must be loaded using standard I/O 25 | FileIO FileLoadingMode = iota 26 | // LoadToRAM indicates that file must be loaded into RAM 27 | LoadToRAM 28 | // MemoryMap indicates that that the file must be memory-mapped 29 | MemoryMap 30 | ) 31 | 32 | // ChecksumVerificationMode tells when should DB verify checksum for SSTable blocks. 33 | type ChecksumVerificationMode int 34 | 35 | const ( 36 | // NoVerification indicates DB should not verify checksum for SSTable blocks. 37 | NoVerification ChecksumVerificationMode = iota 38 | // OnTableRead indicates checksum should be verified while opening SSTtable. 39 | OnTableRead 40 | // OnBlockRead indicates checksum should be verified on every SSTable block read. 41 | OnBlockRead 42 | // OnTableAndBlockRead indicates checksum should be verified 43 | // on SSTable opening and on every block read. 44 | OnTableAndBlockRead 45 | ) 46 | 47 | // CompressionType specifies how a block should be compressed. 48 | type CompressionType uint32 49 | 50 | const ( 51 | // None mode indicates that a block is not compressed. 52 | None CompressionType = 0 53 | // Snappy mode indicates that a block is compressed using Snappy algorithm. 54 | Snappy CompressionType = 1 55 | // ZSTD mode indicates that a block is compressed using ZSTD algorithm. 56 | ZSTD CompressionType = 2 57 | ) 58 | -------------------------------------------------------------------------------- /pb/badgerpb2.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Use protos/gen.sh to generate .pb.go files. 18 | syntax = "proto3"; 19 | 20 | package badgerpb2; 21 | 22 | option go_package = "github.com/dgraph-io/badger/v2/pb"; 23 | 24 | message KV { 25 | bytes key = 1; 26 | bytes value = 2; 27 | bytes user_meta = 3; 28 | uint64 version = 4; 29 | uint64 expires_at = 5; 30 | bytes meta = 6; 31 | 32 | // Stream id is used to identify which stream the KV came from. 33 | uint32 stream_id = 10; 34 | // Stream done is used to indicate end of stream. 35 | bool stream_done = 11; 36 | } 37 | 38 | message KVList { 39 | repeated KV kv = 1; 40 | } 41 | 42 | message ManifestChangeSet { 43 | // A set of changes that are applied atomically. 44 | repeated ManifestChange changes = 1; 45 | } 46 | 47 | enum EncryptionAlgo { 48 | aes = 0; 49 | } 50 | 51 | message ManifestChange { 52 | uint64 Id = 1; // Table ID. 53 | enum Operation { 54 | CREATE = 0; 55 | DELETE = 1; 56 | } 57 | Operation Op = 2; 58 | uint32 Level = 3; // Only used for CREATE. 59 | uint64 key_id = 4; 60 | EncryptionAlgo encryption_algo = 5; 61 | uint32 compression = 6; // Only used for CREATE Op. 62 | } 63 | 64 | message Checksum { 65 | enum Algorithm { 66 | CRC32C = 0; 67 | XXHash64 = 1; 68 | } 69 | Algorithm algo = 1; // For storing type of Checksum algorithm used 70 | uint64 sum = 2; 71 | } 72 | 73 | message DataKey { 74 | uint64 key_id = 1; 75 | bytes data = 2; 76 | bytes iv = 3; 77 | int64 created_at = 4; 78 | } 79 | -------------------------------------------------------------------------------- /badger/cmd/rotate.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "io/ioutil" 21 | "os" 22 | "time" 23 | 24 | "github.com/dgraph-io/badger/v2" 25 | 26 | "github.com/spf13/cobra" 27 | ) 28 | 29 | var oldKeyPath string 30 | var newKeyPath string 31 | var rotateCmd = &cobra.Command{ 32 | Use: "rotate", 33 | Short: "Rotate encryption key.", 34 | Long: "Rotate will rotate the old key with new encryption key.", 35 | RunE: doRotate, 36 | } 37 | 38 | func init() { 39 | RootCmd.AddCommand(rotateCmd) 40 | rotateCmd.Flags().StringVarP(&oldKeyPath, "old-key-path", "o", 41 | "", "Path of the old key") 42 | rotateCmd.Flags().StringVarP(&newKeyPath, "new-key-path", "n", 43 | "", "Path of the new key") 44 | } 45 | 46 | func doRotate(cmd *cobra.Command, args []string) error { 47 | oldKey, err := getKey(oldKeyPath) 48 | if err != nil { 49 | return err 50 | } 51 | opt := badger.KeyRegistryOptions{ 52 | Dir: sstDir, 53 | ReadOnly: true, 54 | EncryptionKey: oldKey, 55 | EncryptionKeyRotationDuration: 10 * 24 * time.Hour, 56 | } 57 | kr, err := badger.OpenKeyRegistry(opt) 58 | if err != nil { 59 | return err 60 | } 61 | newKey, err := getKey(newKeyPath) 62 | if err != nil { 63 | return err 64 | } 65 | opt.EncryptionKey = newKey 66 | err = badger.WriteKeyRegistry(kr, opt) 67 | if err != nil { 68 | return err 69 | } 70 | return nil 71 | } 72 | 73 | func getKey(path string) ([]byte, error) { 74 | if path == "" { 75 | // Empty bytes for plain text to encryption(vice versa). 76 | return []byte{}, nil 77 | } 78 | fp, err := os.Open(path) 79 | if err != nil { 80 | return nil, err 81 | } 82 | return ioutil.ReadAll(fp) 83 | } 84 | -------------------------------------------------------------------------------- /badger/cmd/backup.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "bufio" 21 | "math" 22 | "os" 23 | 24 | "github.com/dgraph-io/badger/v2" 25 | "github.com/spf13/cobra" 26 | ) 27 | 28 | var backupFile string 29 | 30 | // backupCmd represents the backup command 31 | var backupCmd = &cobra.Command{ 32 | Use: "backup", 33 | Short: "Backup Badger database.", 34 | Long: `Backup Badger database to a file in a version-agnostic manner. 35 | 36 | Iterates over each key-value pair, encodes it along with its metadata and 37 | version in protocol buffers and writes them to a file. This file can later be 38 | used by the restore command to create an identical copy of the 39 | database.`, 40 | RunE: doBackup, 41 | } 42 | 43 | func init() { 44 | RootCmd.AddCommand(backupCmd) 45 | backupCmd.Flags().StringVarP(&backupFile, "backup-file", "f", 46 | "badger.bak", "File to backup to") 47 | backupCmd.Flags().IntVarP(&numVersions, "num-versions", "n", 48 | 0, "Number of versions to keep. A value <= 0 means keep all versions.") 49 | } 50 | 51 | func doBackup(cmd *cobra.Command, args []string) error { 52 | opt := badger.DefaultOptions(sstDir). 53 | WithValueDir(vlogDir). 54 | WithNumVersionsToKeep(math.MaxInt32) 55 | 56 | if numVersions > 0 { 57 | opt.NumVersionsToKeep = numVersions 58 | } 59 | 60 | // Open DB 61 | db, err := badger.Open(opt) 62 | if err != nil { 63 | return err 64 | } 65 | defer db.Close() 66 | 67 | // Create File 68 | f, err := os.Create(backupFile) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | bw := bufio.NewWriterSize(f, 64<<20) 74 | if _, err = db.Backup(bw, 0); err != nil { 75 | return err 76 | } 77 | 78 | if err = bw.Flush(); err != nil { 79 | return err 80 | } 81 | 82 | if err = f.Sync(); err != nil { 83 | return err 84 | } 85 | 86 | return f.Close() 87 | } 88 | -------------------------------------------------------------------------------- /VERSIONING.md: -------------------------------------------------------------------------------- 1 | # Serialization Versioning: Semantic Versioning for databases 2 | 3 | Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as 4 | a way to decide how to name software versions. The whole concept is very well summarized on 5 | semver.org with the following lines: 6 | 7 | > Given a version number MAJOR.MINOR.PATCH, increment the: 8 | > 9 | > 1. MAJOR version when you make incompatible API changes, 10 | > 2. MINOR version when you add functionality in a backwards-compatible manner, and 11 | > 3. PATCH version when you make backwards-compatible bug fixes. 12 | > 13 | > Additional labels for pre-release and build metadata are available as extensions to the 14 | > MAJOR.MINOR.PATCH format. 15 | 16 | Unfortunately, API changes are not the most important changes for libraries that serialize data for 17 | later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to 18 | handle than change to the data format used to store data on disk. 19 | 20 | ## Serialization Version specification 21 | 22 | Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them 23 | MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified: 24 | 25 | Given a version number MAJOR.MINOR.PATCH, increment the: 26 | 27 | - MAJOR version when you make changes that require a transformation of the dataset before it can be 28 | used again. 29 | - MINOR version when old datasets are still readable but the API might have changed in 30 | backwards-compatible or incompatible ways. 31 | - PATCH version when you make backwards-compatible bug fixes. 32 | 33 | Additional labels for pre-release and build metadata are available as extensions to the 34 | MAJOR.MINOR.PATCH format. 35 | 36 | Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your 37 | existing dataset, and as such has to be carefully planned. Migrations in between different minor 38 | versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once 39 | your code compiles there's no need for any data migration. Lastly, changes in between two different 40 | patch versions should never break your build or dataset. 41 | 42 | For more background on our decision to adopt Serialization Versioning, read the blog post 43 | [Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on 44 | [this comment on Dgraph's Discuss forum][discuss]. 45 | 46 | [blog]: https://blog.dgraph.io/post/serialization-versioning/ 47 | [discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7 -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - "1.12" 5 | - "1.13" 6 | - tip 7 | os: 8 | - osx 9 | env: 10 | jobs: 11 | - GOARCH=386 12 | - GOARCH=amd64 13 | global: 14 | - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8= 15 | 16 | 17 | jobs: 18 | allow_failures: 19 | - go: tip 20 | exclude: 21 | # Exclude builds for 386 architecture on go 1.12 and tip 22 | # Since we don't want it to run for 32 bit 23 | - go: "1.12" 24 | env: GOARCH=386 25 | - go: tip 26 | env: GOARCH=386 27 | include: 28 | # Define one extra linux build, which we use to run cross 29 | # compiled 32 bit tests 30 | - os: linux 31 | arch: arm64 32 | go: "1.14" 33 | env: go_32=yes 34 | 35 | notifications: 36 | email: false 37 | slack: 38 | secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk= 39 | 40 | script: >- 41 | if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then 42 | uname -a 43 | GOOS=linux GOARCH=arm go test -v ./... 44 | # Another round of tests after turning off mmap. 45 | GOOS=linux GOARCH=arm go test -v -vlog_mmap=false github.com/dgraph-io/badger 46 | else 47 | go test -v ./... 48 | # Another round of tests after turning off mmap. 49 | go test -v -vlog_mmap=false github.com/dgraph-io/badger 50 | # Cross-compile for Plan 9 51 | GOOS=plan9 go build ./... 52 | fi 53 | -------------------------------------------------------------------------------- /badger/cmd/restore.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "errors" 21 | "math" 22 | "os" 23 | "path" 24 | 25 | "github.com/dgraph-io/badger/v2" 26 | "github.com/spf13/cobra" 27 | ) 28 | 29 | var restoreFile string 30 | var maxPendingWrites int 31 | 32 | // restoreCmd represents the restore command 33 | var restoreCmd = &cobra.Command{ 34 | Use: "restore", 35 | Short: "Restore Badger database.", 36 | Long: `Restore Badger database from a file. 37 | 38 | It reads a file generated using the backup command (or by calling the 39 | DB.Backup() API method) and writes each key-value pair found in the file to 40 | the Badger database. 41 | 42 | Restore creates a new database, and currently does not work on an already 43 | existing database.`, 44 | RunE: doRestore, 45 | } 46 | 47 | func init() { 48 | RootCmd.AddCommand(restoreCmd) 49 | restoreCmd.Flags().StringVarP(&restoreFile, "backup-file", "f", 50 | "badger.bak", "File to restore from") 51 | // Default value for maxPendingWrites is 256, to minimise memory usage 52 | // and overall finish time. 53 | restoreCmd.Flags().IntVarP(&maxPendingWrites, "max-pending-writes", "w", 54 | 256, "Max number of pending writes at any time while restore") 55 | } 56 | 57 | func doRestore(cmd *cobra.Command, args []string) error { 58 | // Check if the DB already exists 59 | manifestFile := path.Join(sstDir, badger.ManifestFilename) 60 | if _, err := os.Stat(manifestFile); err == nil { // No error. File already exists. 61 | return errors.New("Cannot restore to an already existing database") 62 | } else if os.IsNotExist(err) { 63 | // pass 64 | } else { // Return an error if anything other than the error above 65 | return err 66 | } 67 | 68 | // Open DB 69 | db, err := badger.Open(badger.DefaultOptions(sstDir). 70 | WithValueDir(vlogDir). 71 | WithNumVersionsToKeep(math.MaxInt32)) 72 | if err != nil { 73 | return err 74 | } 75 | defer db.Close() 76 | 77 | // Open File 78 | f, err := os.Open(restoreFile) 79 | if err != nil { 80 | return err 81 | } 82 | defer f.Close() 83 | 84 | // Run restore 85 | return db.Load(f, maxPendingWrites) 86 | } 87 | -------------------------------------------------------------------------------- /y/iterator.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "bytes" 21 | "encoding/binary" 22 | ) 23 | 24 | // ValueStruct represents the value info that can be associated with a key, but also the internal 25 | // Meta field. 26 | type ValueStruct struct { 27 | Meta byte 28 | UserMeta byte 29 | ExpiresAt uint64 30 | Value []byte 31 | 32 | Version uint64 // This field is not serialized. Only for internal usage. 33 | } 34 | 35 | func sizeVarint(x uint64) (n int) { 36 | for { 37 | n++ 38 | x >>= 7 39 | if x == 0 { 40 | break 41 | } 42 | } 43 | return n 44 | } 45 | 46 | // EncodedSize is the size of the ValueStruct when encoded 47 | func (v *ValueStruct) EncodedSize() uint32 { 48 | sz := len(v.Value) + 2 // meta, usermeta. 49 | enc := sizeVarint(v.ExpiresAt) 50 | return uint32(sz + enc) 51 | } 52 | 53 | // Decode uses the length of the slice to infer the length of the Value field. 54 | func (v *ValueStruct) Decode(b []byte) { 55 | v.Meta = b[0] 56 | v.UserMeta = b[1] 57 | var sz int 58 | v.ExpiresAt, sz = binary.Uvarint(b[2:]) 59 | v.Value = b[2+sz:] 60 | } 61 | 62 | // Encode expects a slice of length at least v.EncodedSize(). 63 | func (v *ValueStruct) Encode(b []byte) uint32 { 64 | b[0] = v.Meta 65 | b[1] = v.UserMeta 66 | sz := binary.PutUvarint(b[2:], v.ExpiresAt) 67 | n := copy(b[2+sz:], v.Value) 68 | return uint32(2 + sz + n) 69 | } 70 | 71 | // EncodeTo should be kept in sync with the Encode function above. The reason 72 | // this function exists is to avoid creating byte arrays per key-value pair in 73 | // table/builder.go. 74 | func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) { 75 | buf.WriteByte(v.Meta) 76 | buf.WriteByte(v.UserMeta) 77 | var enc [binary.MaxVarintLen64]byte 78 | sz := binary.PutUvarint(enc[:], v.ExpiresAt) 79 | 80 | buf.Write(enc[:sz]) 81 | buf.Write(v.Value) 82 | } 83 | 84 | // Iterator is an interface for a basic iterator. 85 | type Iterator interface { 86 | Next() 87 | Rewind() 88 | Seek(key []byte) 89 | Key() []byte 90 | Value() ValueStruct 91 | Valid() bool 92 | 93 | // All iterators should be closed so that file garbage collection works. 94 | Close() error 95 | } 96 | -------------------------------------------------------------------------------- /y/error.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | // This file contains some functions for error handling. Note that we are moving 20 | // towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these 21 | // functions are useful for simple checks logged on one machine. 22 | // Some common use cases are: 23 | // (1) You receive an error from external lib, and would like to check/log fatal. 24 | // For this, use x.Check, x.Checkf. These will check for err != nil, which is 25 | // more common in Go. If you want to check for boolean being true, use 26 | // x.Assert, x.Assertf. 27 | // (2) You receive an error from external lib, and would like to pass on with some 28 | // stack trace information. In this case, use x.Wrap or x.Wrapf. 29 | // (3) You want to generate a new error with stack trace info. Use x.Errorf. 30 | 31 | import ( 32 | "fmt" 33 | "log" 34 | 35 | "github.com/pkg/errors" 36 | ) 37 | 38 | var debugMode = false 39 | 40 | // Check logs fatal if err != nil. 41 | func Check(err error) { 42 | if err != nil { 43 | log.Fatalf("%+v", Wrap(err, "")) 44 | } 45 | } 46 | 47 | // Check2 acts as convenience wrapper around Check, using the 2nd argument as error. 48 | func Check2(_ interface{}, err error) { 49 | Check(err) 50 | } 51 | 52 | // AssertTrue asserts that b is true. Otherwise, it would log fatal. 53 | func AssertTrue(b bool) { 54 | if !b { 55 | log.Fatalf("%+v", errors.Errorf("Assert failed")) 56 | } 57 | } 58 | 59 | // AssertTruef is AssertTrue with extra info. 60 | func AssertTruef(b bool, format string, args ...interface{}) { 61 | if !b { 62 | log.Fatalf("%+v", errors.Errorf(format, args...)) 63 | } 64 | } 65 | 66 | // Wrap wraps errors from external lib. 67 | func Wrap(err error, msg string) error { 68 | if !debugMode { 69 | if err == nil { 70 | return nil 71 | } 72 | return fmt.Errorf("%s err: %+v", msg, err) 73 | } 74 | return errors.Wrap(err, msg) 75 | } 76 | 77 | // Wrapf is Wrap with extra info. 78 | func Wrapf(err error, format string, args ...interface{}) error { 79 | if !debugMode { 80 | if err == nil { 81 | return nil 82 | } 83 | return fmt.Errorf(format+" error: %+v", append(args, err)...) 84 | } 85 | return errors.Wrapf(err, format, args...) 86 | } 87 | -------------------------------------------------------------------------------- /y/metrics.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import "expvar" 20 | 21 | var ( 22 | // LSMSize has size of the LSM in bytes 23 | LSMSize *expvar.Map 24 | // VlogSize has size of the value log in bytes 25 | VlogSize *expvar.Map 26 | // PendingWrites tracks the number of pending writes. 27 | PendingWrites *expvar.Map 28 | 29 | // These are cumulative 30 | 31 | // NumReads has cumulative number of reads 32 | NumReads *expvar.Int 33 | // NumWrites has cumulative number of writes 34 | NumWrites *expvar.Int 35 | // NumBytesRead has cumulative number of bytes read 36 | NumBytesRead *expvar.Int 37 | // NumBytesWritten has cumulative number of bytes written 38 | NumBytesWritten *expvar.Int 39 | // NumLSMGets is number of LMS gets 40 | NumLSMGets *expvar.Map 41 | // NumLSMBloomHits is number of LMS bloom hits 42 | NumLSMBloomHits *expvar.Map 43 | // NumGets is number of gets 44 | NumGets *expvar.Int 45 | // NumPuts is number of puts 46 | NumPuts *expvar.Int 47 | // NumBlockedPuts is number of blocked puts 48 | NumBlockedPuts *expvar.Int 49 | // NumMemtableGets is number of memtable gets 50 | NumMemtableGets *expvar.Int 51 | // NumCompactionTables is the number of tables being compacted 52 | NumCompactionTables *expvar.Int 53 | ) 54 | 55 | // These variables are global and have cumulative values for all kv stores. 56 | func init() { 57 | NumReads = expvar.NewInt("badger_v2_disk_reads_total") 58 | NumWrites = expvar.NewInt("badger_v2_disk_writes_total") 59 | NumBytesRead = expvar.NewInt("badger_v2_read_bytes") 60 | NumBytesWritten = expvar.NewInt("badger_v2_written_bytes") 61 | NumLSMGets = expvar.NewMap("badger_v2_lsm_level_gets_total") 62 | NumLSMBloomHits = expvar.NewMap("badger_v2_lsm_bloom_hits_total") 63 | NumGets = expvar.NewInt("badger_v2_gets_total") 64 | NumPuts = expvar.NewInt("badger_v2_puts_total") 65 | NumBlockedPuts = expvar.NewInt("badger_v2_blocked_puts_total") 66 | NumMemtableGets = expvar.NewInt("badger_v2_memtable_gets_total") 67 | LSMSize = expvar.NewMap("badger_v2_lsm_size_bytes") 68 | VlogSize = expvar.NewMap("badger_v2_vlog_size_bytes") 69 | PendingWrites = expvar.NewMap("badger_v2_pending_writes_total") 70 | NumCompactionTables = expvar.NewInt("badger_v2_compactions_current") 71 | } 72 | -------------------------------------------------------------------------------- /trie/trie.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package trie 18 | 19 | type node struct { 20 | children map[byte]*node 21 | ids []uint64 22 | } 23 | 24 | func newNode() *node { 25 | return &node{ 26 | children: make(map[byte]*node), 27 | ids: []uint64{}, 28 | } 29 | } 30 | 31 | // Trie datastructure. 32 | type Trie struct { 33 | root *node 34 | } 35 | 36 | // NewTrie returns Trie. 37 | func NewTrie() *Trie { 38 | return &Trie{ 39 | root: newNode(), 40 | } 41 | } 42 | 43 | // Add adds the id in the trie for the given prefix path. 44 | func (t *Trie) Add(prefix []byte, id uint64) { 45 | node := t.root 46 | for _, val := range prefix { 47 | child, ok := node.children[val] 48 | if !ok { 49 | child = newNode() 50 | node.children[val] = child 51 | } 52 | node = child 53 | } 54 | // We only need to add the id to the last node of the given prefix. 55 | node.ids = append(node.ids, id) 56 | } 57 | 58 | // Get returns prefix matched ids for the given key. 59 | func (t *Trie) Get(key []byte) map[uint64]struct{} { 60 | out := make(map[uint64]struct{}) 61 | node := t.root 62 | // If root has ids that means we have subscribers for "nil/[]byte{}" 63 | // prefix. Add them to the list. 64 | if len(node.ids) > 0 { 65 | for _, i := range node.ids { 66 | out[i] = struct{}{} 67 | } 68 | } 69 | for _, val := range key { 70 | child, ok := node.children[val] 71 | if !ok { 72 | break 73 | } 74 | // We need ids of the all the node in the matching key path. 75 | for _, id := range child.ids { 76 | out[id] = struct{}{} 77 | } 78 | node = child 79 | } 80 | return out 81 | } 82 | 83 | // Delete will delete the id if the id exist in the given index path. 84 | func (t *Trie) Delete(index []byte, id uint64) { 85 | node := t.root 86 | for _, val := range index { 87 | child, ok := node.children[val] 88 | if !ok { 89 | return 90 | } 91 | node = child 92 | } 93 | // We're just removing the id not the hanging path. 94 | out := node.ids[:0] 95 | for _, val := range node.ids { 96 | if val != id { 97 | out = append(out, val) 98 | } 99 | } 100 | for i := len(out); i < len(node.ids); i++ { 101 | node.ids[i] = 0 // garbage collecting 102 | } 103 | node.ids = out 104 | } 105 | -------------------------------------------------------------------------------- /fb/BlockOffset.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package fb 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type BlockOffset struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsBlockOffset(buf []byte, offset flatbuffers.UOffsetT) *BlockOffset { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &BlockOffset{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *BlockOffset) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *BlockOffset) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *BlockOffset) Key(j int) byte { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | a := rcv._tab.Vector(o) 33 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 34 | } 35 | return 0 36 | } 37 | 38 | func (rcv *BlockOffset) KeyLength() int { 39 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 40 | if o != 0 { 41 | return rcv._tab.VectorLen(o) 42 | } 43 | return 0 44 | } 45 | 46 | func (rcv *BlockOffset) KeyBytes() []byte { 47 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 48 | if o != 0 { 49 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 50 | } 51 | return nil 52 | } 53 | 54 | func (rcv *BlockOffset) MutateKey(j int, n byte) bool { 55 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 56 | if o != 0 { 57 | a := rcv._tab.Vector(o) 58 | return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) 59 | } 60 | return false 61 | } 62 | 63 | func (rcv *BlockOffset) Offset() uint32 { 64 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 65 | if o != 0 { 66 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 67 | } 68 | return 0 69 | } 70 | 71 | func (rcv *BlockOffset) MutateOffset(n uint32) bool { 72 | return rcv._tab.MutateUint32Slot(6, n) 73 | } 74 | 75 | func (rcv *BlockOffset) Len() uint32 { 76 | o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) 77 | if o != 0 { 78 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 79 | } 80 | return 0 81 | } 82 | 83 | func (rcv *BlockOffset) MutateLen(n uint32) bool { 84 | return rcv._tab.MutateUint32Slot(8, n) 85 | } 86 | 87 | func BlockOffsetStart(builder *flatbuffers.Builder) { 88 | builder.StartObject(3) 89 | } 90 | func BlockOffsetAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { 91 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) 92 | } 93 | func BlockOffsetStartKeyVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 94 | return builder.StartVector(1, numElems, 1) 95 | } 96 | func BlockOffsetAddOffset(builder *flatbuffers.Builder, offset uint32) { 97 | builder.PrependUint32Slot(1, offset, 0) 98 | } 99 | func BlockOffsetAddLen(builder *flatbuffers.Builder, len uint32) { 100 | builder.PrependUint32Slot(2, len, 0) 101 | } 102 | func BlockOffsetEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 103 | return builder.EndObject() 104 | } 105 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "log" 21 | "os" 22 | ) 23 | 24 | // Logger is implemented by any logging system that is used for standard logs. 25 | type Logger interface { 26 | Errorf(string, ...interface{}) 27 | Warningf(string, ...interface{}) 28 | Infof(string, ...interface{}) 29 | Debugf(string, ...interface{}) 30 | } 31 | 32 | // Errorf logs an ERROR log message to the logger specified in opts or to the 33 | // global logger if no logger is specified in opts. 34 | func (opt *Options) Errorf(format string, v ...interface{}) { 35 | if opt.Logger == nil { 36 | return 37 | } 38 | opt.Logger.Errorf(format, v...) 39 | } 40 | 41 | // Infof logs an INFO message to the logger specified in opts. 42 | func (opt *Options) Infof(format string, v ...interface{}) { 43 | if opt.Logger == nil { 44 | return 45 | } 46 | opt.Logger.Infof(format, v...) 47 | } 48 | 49 | // Warningf logs a WARNING message to the logger specified in opts. 50 | func (opt *Options) Warningf(format string, v ...interface{}) { 51 | if opt.Logger == nil { 52 | return 53 | } 54 | opt.Logger.Warningf(format, v...) 55 | } 56 | 57 | // Debugf logs a DEBUG message to the logger specified in opts. 58 | func (opt *Options) Debugf(format string, v ...interface{}) { 59 | if opt.Logger == nil { 60 | return 61 | } 62 | opt.Logger.Debugf(format, v...) 63 | } 64 | 65 | type loggingLevel int 66 | 67 | const ( 68 | DEBUG loggingLevel = iota 69 | INFO 70 | WARNING 71 | ERROR 72 | ) 73 | 74 | type defaultLog struct { 75 | *log.Logger 76 | level loggingLevel 77 | } 78 | 79 | func defaultLogger(level loggingLevel) *defaultLog { 80 | return &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags), level: level} 81 | } 82 | 83 | func (l *defaultLog) Errorf(f string, v ...interface{}) { 84 | if l.level <= ERROR { 85 | l.Printf("ERROR: "+f, v...) 86 | } 87 | } 88 | 89 | func (l *defaultLog) Warningf(f string, v ...interface{}) { 90 | if l.level <= WARNING { 91 | l.Printf("WARNING: "+f, v...) 92 | } 93 | } 94 | 95 | func (l *defaultLog) Infof(f string, v ...interface{}) { 96 | if l.level <= INFO { 97 | l.Printf("INFO: "+f, v...) 98 | } 99 | } 100 | 101 | func (l *defaultLog) Debugf(f string, v ...interface{}) { 102 | if l.level <= DEBUG { 103 | l.Printf("DEBUG: "+f, v...) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /publisher_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package badger 17 | 18 | import ( 19 | "context" 20 | "fmt" 21 | "sync" 22 | "testing" 23 | 24 | "github.com/stretchr/testify/require" 25 | 26 | "github.com/dgraph-io/badger/v2/pb" 27 | ) 28 | 29 | func TestPublisherOrdering(t *testing.T) { 30 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 31 | order := []string{} 32 | var wg sync.WaitGroup 33 | wg.Add(1) 34 | var subWg sync.WaitGroup 35 | subWg.Add(1) 36 | go func() { 37 | subWg.Done() 38 | updates := 0 39 | err := db.Subscribe(context.Background(), func(kvs *pb.KVList) error { 40 | updates += len(kvs.GetKv()) 41 | for _, kv := range kvs.GetKv() { 42 | order = append(order, string(kv.Value)) 43 | } 44 | if updates == 5 { 45 | wg.Done() 46 | } 47 | return nil 48 | }, []byte("ke")) 49 | if err != nil { 50 | require.Equal(t, err.Error(), context.Canceled.Error()) 51 | } 52 | }() 53 | subWg.Wait() 54 | for i := 0; i < 5; i++ { 55 | db.Update(func(txn *Txn) error { 56 | e := NewEntry([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i))) 57 | return txn.SetEntry(e) 58 | }) 59 | } 60 | wg.Wait() 61 | for i := 0; i < 5; i++ { 62 | require.Equal(t, fmt.Sprintf("value%d", i), order[i]) 63 | } 64 | }) 65 | } 66 | 67 | func TestMultiplePrefix(t *testing.T) { 68 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 69 | var wg sync.WaitGroup 70 | wg.Add(1) 71 | var subWg sync.WaitGroup 72 | subWg.Add(1) 73 | go func() { 74 | subWg.Done() 75 | updates := 0 76 | err := db.Subscribe(context.Background(), func(kvs *pb.KVList) error { 77 | updates += len(kvs.GetKv()) 78 | for _, kv := range kvs.GetKv() { 79 | if string(kv.Key) == "key" { 80 | require.Equal(t, string(kv.Value), "value") 81 | } else { 82 | require.Equal(t, string(kv.Value), "badger") 83 | } 84 | } 85 | if updates == 2 { 86 | wg.Done() 87 | } 88 | return nil 89 | }, []byte("ke"), []byte("hel")) 90 | if err != nil { 91 | require.Equal(t, err.Error(), context.Canceled.Error()) 92 | } 93 | }() 94 | subWg.Wait() 95 | db.Update(func(txn *Txn) error { 96 | return txn.SetEntry(NewEntry([]byte("key"), []byte("value"))) 97 | }) 98 | db.Update(func(txn *Txn) error { 99 | return txn.SetEntry(NewEntry([]byte("hello"), []byte("badger"))) 100 | }) 101 | wg.Wait() 102 | }) 103 | } 104 | -------------------------------------------------------------------------------- /badger/cmd/stream.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "io" 21 | "math" 22 | "os" 23 | 24 | "github.com/dgraph-io/badger/v2" 25 | "github.com/dgraph-io/badger/v2/options" 26 | "github.com/dgraph-io/badger/v2/y" 27 | "github.com/pkg/errors" 28 | "github.com/spf13/cobra" 29 | ) 30 | 31 | var streamCmd = &cobra.Command{ 32 | Use: "stream", 33 | Short: "Stream DB into another DB with different options", 34 | Long: ` 35 | This command streams the contents of this DB into another DB with the given options. 36 | `, 37 | RunE: stream, 38 | } 39 | 40 | var outDir string 41 | var compressionType uint32 42 | 43 | func init() { 44 | // TODO: Add more options. 45 | RootCmd.AddCommand(streamCmd) 46 | streamCmd.Flags().StringVarP(&outDir, "out", "o", "", 47 | "Path to output DB. The directory should be empty.") 48 | streamCmd.Flags().BoolVarP(&readOnly, "read_only", "", true, 49 | "Option to open input DB in read-only mode") 50 | streamCmd.Flags().IntVarP(&numVersions, "num_versions", "", 0, 51 | "Option to configure the maximum number of versions per key. "+ 52 | "Values <= 0 will be considered to have the max number of versions.") 53 | streamCmd.Flags().Uint32VarP(&compressionType, "compression", "", 0, 54 | "Option to configure the compression type in output DB. "+ 55 | "0 to disable, 1 for Snappy, and 2 for ZSTD.") 56 | } 57 | 58 | func stream(cmd *cobra.Command, args []string) error { 59 | // Check that outDir doesn't exist or is empty. 60 | if _, err := os.Stat(outDir); err == nil { 61 | f, err := os.Open(outDir) 62 | if err != nil { 63 | return err 64 | } 65 | defer f.Close() 66 | 67 | _, err = f.Readdirnames(1) 68 | if err != io.EOF { 69 | return errors.Errorf("cannot run stream tool on non-empty output directory %s", outDir) 70 | } 71 | } 72 | 73 | // Options for input DB. 74 | if numVersions <= 0 { 75 | numVersions = math.MaxInt32 76 | } 77 | inOpt := badger.DefaultOptions(sstDir). 78 | WithReadOnly(readOnly). 79 | WithValueThreshold(1 << 10 /* 1KB */). 80 | WithNumVersionsToKeep(numVersions) 81 | 82 | // Options for output DB. 83 | if compressionType < 0 || compressionType > 2 { 84 | return errors.Errorf( 85 | "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") 86 | } 87 | outOpt := inOpt.WithDir(outDir).WithValueDir(outDir). 88 | WithCompression(options.CompressionType(compressionType)).WithReadOnly(false) 89 | 90 | inDB, err := badger.OpenManaged(inOpt) 91 | if err != nil { 92 | return y.Wrapf(err, "cannot open DB at %s", sstDir) 93 | } 94 | defer inDB.Close() 95 | return inDB.StreamDB(outOpt) 96 | } 97 | -------------------------------------------------------------------------------- /docs/content/design/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Design" 3 | aliases = ["/design"] 4 | +++ 5 | 6 | Badger was written with these design goals in mind: 7 | 8 | - Write a key-value database in pure Go. 9 | - Use latest research to build the fastest KV database for data sets spanning terabytes. 10 | - Optimize for SSDs. 11 | 12 | Badger’s design is based on a paper titled _[WiscKey: Separating Keys from 13 | Values in SSD-conscious Storage][wisckey]_. 14 | 15 | [wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf 16 | 17 | ## Comparisons 18 | | Feature | Badger | RocksDB | BoltDB | 19 | | ------- | ------ | ------- | ------ | 20 | | Design | LSM tree with value log | LSM tree only | B+ tree | 21 | | High Read throughput | Yes | No | Yes | 22 | | High Write throughput | Yes | Yes | No | 23 | | Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | 24 | | Embeddable | Yes | Yes | Yes | 25 | | Sorted KV access | Yes | Yes | Yes | 26 | | Pure Go (no Cgo) | Yes | No | Yes | 27 | | Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | 28 | | Snapshots | Yes | Yes | Yes | 29 | | TTL support | Yes | Yes | No | 30 | | 3D access (key-value-version) | Yes4 | No | No | 31 | 32 | 1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big 33 | wins with separating values from keys, significantly reducing the write 34 | amplification compared to a typical LSM tree. 35 | 36 | 2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. 37 | As such RocksDB's design isn't aimed at SSDs. 38 | 39 | 3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) 40 | 41 | 4 Badger provides direct access to value versions via its Iterator API. 42 | Users can also specify how many versions to keep per key via Options. 43 | 44 | ## Benchmarks 45 | We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The 46 | benchmarking code, and the detailed logs for the benchmarks can be found in the 47 | [badger-bench] repo. More explanation, including graphs can be found the blog posts (linked 48 | above). 49 | 50 | [badger-bench]: https://github.com/dgraph-io/badger-bench 51 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guide 2 | 3 | * [Before you get started](#before-you-get-started) 4 | * [Code of Conduct](#code-of-conduct) 5 | * [Your First Contribution](#your-first-contribution) 6 | * [Find a good first topic](#find-a-good-first-topic) 7 | * [Setting up your development environment](#setting-up-your-development-environment) 8 | * [Fork the project](#fork-the-project) 9 | * [Clone the project](#clone-the-project) 10 | * [New branch for a new code](#new-branch-for-a-new-code) 11 | * [Test](#test) 12 | * [Commit and push](#commit-and-push) 13 | * [Create a Pull Request](#create-a-pull-request) 14 | * [Sign the CLA](#sign-the-cla) 15 | * [Get a code review](#get-a-code-review) 16 | 17 | ## Before you get started 18 | 19 | ### Code of Conduct 20 | 21 | Please make sure to read and observe our [Code of Conduct](./CODE_OF_CONDUCT.md). 22 | 23 | ## Your First Contribution 24 | 25 | ### Find a good first topic 26 | 27 | You can start by finding an existing issue with the 28 | [good first issue](https://github.com/dgraph-io/badger/labels/good%20first%20issue) or [help wanted](https://github.com/dgraph-io/badger/labels/help%20wanted) labels. These issues are well suited for new contributors. 29 | 30 | 31 | ## Setting up your development environment 32 | 33 | Badger uses [`Go Modules`](https://github.com/golang/go/wiki/Modules) 34 | to manage dependencies. The version of Go should be **1.12** or above. 35 | 36 | ### Fork the project 37 | 38 | - Visit https://github.com/dgraph-io/badger 39 | - Click the `Fork` button (top right) to create a fork of the repository 40 | 41 | ### Clone the project 42 | 43 | ```sh 44 | $ git clone https://github.com/$GITHUB_USER/badger 45 | $ cd badger 46 | $ git remote add upstream git@github.com:dgraph-io/badger.git 47 | 48 | # Never push to the upstream master 49 | git remote set-url --push upstream no_push 50 | ``` 51 | 52 | ### New branch for a new code 53 | 54 | Get your local master up to date: 55 | 56 | ```sh 57 | $ git fetch upstream 58 | $ git checkout master 59 | $ git rebase upstream/master 60 | ``` 61 | 62 | Create a new branch from the master: 63 | 64 | ```sh 65 | $ git checkout -b my_new_feature 66 | ``` 67 | 68 | And now you can finally add your changes to project. 69 | 70 | ### Test 71 | 72 | Build and run all tests: 73 | 74 | ```sh 75 | $ ./test.sh 76 | ``` 77 | 78 | ### Commit and push 79 | 80 | Commit your changes: 81 | 82 | ```sh 83 | $ git commit 84 | ``` 85 | 86 | When the changes are ready to review: 87 | 88 | ```sh 89 | $ git push origin my_new_feature 90 | ``` 91 | 92 | ### Create a Pull Request 93 | 94 | Just open `https://github.com/$GITHUB_USER/badger/pull/new/my_new_feature` and 95 | fill the PR description. 96 | 97 | ### Sign the CLA 98 | 99 | Click the **Sign in with Github to agree** button to sign the CLA. [An example](https://cla-assistant.io/dgraph-io/badger?pullRequest=1377). 100 | 101 | ### Get a code review 102 | 103 | If your pull request (PR) is opened, it will be assigned to one or more 104 | reviewers. Those reviewers will do a code review. 105 | 106 | To address review comments, you should commit the changes to the same branch of 107 | the PR on your fork. 108 | -------------------------------------------------------------------------------- /managed_db.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | // OpenManaged returns a new DB, which allows more control over setting 20 | // transaction timestamps, aka managed mode. 21 | // 22 | // This is only useful for databases built on top of Badger (like Dgraph), and 23 | // can be ignored by most users. 24 | func OpenManaged(opts Options) (*DB, error) { 25 | opts.managedTxns = true 26 | return Open(opts) 27 | } 28 | 29 | // NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the 30 | // provided read timestamp. 31 | // 32 | // This is only useful for databases built on top of Badger (like Dgraph), and 33 | // can be ignored by most users. 34 | func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn { 35 | if !db.opt.managedTxns { 36 | panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.") 37 | } 38 | txn := db.newTransaction(update, true) 39 | txn.readTs = readTs 40 | return txn 41 | } 42 | 43 | // NewWriteBatchAt is similar to NewWriteBatch but it allows user to set the commit timestamp. 44 | // NewWriteBatchAt is supposed to be used only in the managed mode. 45 | func (db *DB) NewWriteBatchAt(commitTs uint64) *WriteBatch { 46 | if !db.opt.managedTxns { 47 | panic("cannot use NewWriteBatchAt with managedDB=false. Use NewWriteBatch instead") 48 | } 49 | 50 | wb := db.newWriteBatch(true) 51 | wb.commitTs = commitTs 52 | wb.txn.commitTs = commitTs 53 | return wb 54 | } 55 | func (db *DB) NewManagedWriteBatch() *WriteBatch { 56 | if !db.opt.managedTxns { 57 | panic("cannot use NewManagedWriteBatch with managedDB=false. Use NewWriteBatch instead") 58 | } 59 | 60 | wb := db.newWriteBatch(true) 61 | return wb 62 | } 63 | 64 | // CommitAt commits the transaction, following the same logic as Commit(), but 65 | // at the given commit timestamp. This will panic if not used with managed transactions. 66 | // 67 | // This is only useful for databases built on top of Badger (like Dgraph), and 68 | // can be ignored by most users. 69 | func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error { 70 | if !txn.db.opt.managedTxns { 71 | panic("Cannot use CommitAt with managedDB=false. Use Commit instead.") 72 | } 73 | txn.commitTs = commitTs 74 | if callback == nil { 75 | return txn.Commit() 76 | } 77 | txn.CommitWith(callback) 78 | return nil 79 | } 80 | 81 | // SetDiscardTs sets a timestamp at or below which, any invalid or deleted 82 | // versions can be discarded from the LSM tree, and thence from the value log to 83 | // reclaim disk space. Can only be used with managed transactions. 84 | func (db *DB) SetDiscardTs(ts uint64) { 85 | if !db.opt.managedTxns { 86 | panic("Cannot use SetDiscardTs with managedDB=false.") 87 | } 88 | db.orc.setDiscardTs(ts) 89 | } 90 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "encoding/hex" 21 | "io/ioutil" 22 | "math/rand" 23 | "sync/atomic" 24 | "time" 25 | 26 | "github.com/dgraph-io/badger/v2/table" 27 | "github.com/dgraph-io/badger/v2/y" 28 | "github.com/pkg/errors" 29 | ) 30 | 31 | func (s *levelsController) validate() error { 32 | for _, l := range s.levels { 33 | if err := l.validate(); err != nil { 34 | return y.Wrap(err, "Levels Controller") 35 | } 36 | } 37 | return nil 38 | } 39 | 40 | // Check does some sanity check on one level of data or in-memory index. 41 | func (s *levelHandler) validate() error { 42 | if s.level == 0 { 43 | return nil 44 | } 45 | 46 | s.RLock() 47 | defer s.RUnlock() 48 | numTables := len(s.tables) 49 | for j := 1; j < numTables; j++ { 50 | if j >= len(s.tables) { 51 | return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) 52 | } 53 | 54 | if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { 55 | return errors.Errorf( 56 | "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d", 57 | hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()), 58 | s.level, j, numTables) 59 | } 60 | 61 | if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { 62 | return errors.Errorf( 63 | "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d", 64 | hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables) 65 | } 66 | } 67 | return nil 68 | } 69 | 70 | // func (s *KV) debugPrintMore() { s.lc.debugPrintMore() } 71 | 72 | // // debugPrintMore shows key ranges of each level. 73 | // func (s *levelsController) debugPrintMore() { 74 | // s.Lock() 75 | // defer s.Unlock() 76 | // for i := 0; i < s.kv.opt.MaxLevels; i++ { 77 | // s.levels[i].debugPrintMore() 78 | // } 79 | // } 80 | 81 | // func (s *levelHandler) debugPrintMore() { 82 | // s.RLock() 83 | // defer s.RUnlock() 84 | // s.elog.Printf("Level %d:", s.level) 85 | // for _, t := range s.tables { 86 | // y.Printf(" [%s, %s]", t.Smallest(), t.Biggest()) 87 | // } 88 | // y.Printf("\n") 89 | // } 90 | 91 | // reserveFileID reserves a unique file id. 92 | func (s *levelsController) reserveFileID() uint64 { 93 | id := atomic.AddUint64(&s.nextFileID, 1) 94 | return id - 1 95 | } 96 | 97 | func getIDMap(dir string) map[uint64]struct{} { 98 | fileInfos, err := ioutil.ReadDir(dir) 99 | y.Check(err) 100 | idMap := make(map[uint64]struct{}) 101 | for _, info := range fileInfos { 102 | if info.IsDir() { 103 | continue 104 | } 105 | fileID, ok := table.ParseFileID(info.Name()) 106 | if !ok { 107 | continue 108 | } 109 | idMap[fileID] = struct{}{} 110 | } 111 | return idMap 112 | } 113 | 114 | func init() { 115 | rand.Seed(time.Now().UnixNano()) 116 | } 117 | -------------------------------------------------------------------------------- /histogram_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/stretchr/testify/require" 23 | ) 24 | 25 | func TestBuildKeyValueSizeHistogram(t *testing.T) { 26 | t.Run("All same size key-values", func(t *testing.T) { 27 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 28 | entries := int64(40) 29 | err := db.Update(func(txn *Txn) error { 30 | for i := rune(0); i < rune(entries); i++ { 31 | err := txn.SetEntry(NewEntry([]byte(string(i)), []byte("B"))) 32 | if err != nil { 33 | return err 34 | } 35 | } 36 | return nil 37 | }) 38 | require.NoError(t, err) 39 | 40 | histogram := db.buildHistogram(nil) 41 | keyHistogram := histogram.keySizeHistogram 42 | valueHistogram := histogram.valueSizeHistogram 43 | 44 | require.Equal(t, entries, keyHistogram.totalCount) 45 | require.Equal(t, entries, valueHistogram.totalCount) 46 | 47 | // Each entry is of size one. So the sum of sizes should be the same 48 | // as number of entries 49 | require.Equal(t, entries, valueHistogram.sum) 50 | require.Equal(t, entries, keyHistogram.sum) 51 | 52 | // All value sizes are same. The first bin should have all the values. 53 | require.Equal(t, entries, valueHistogram.countPerBin[0]) 54 | require.Equal(t, entries, keyHistogram.countPerBin[0]) 55 | 56 | require.Equal(t, int64(1), keyHistogram.max) 57 | require.Equal(t, int64(1), keyHistogram.min) 58 | require.Equal(t, int64(1), valueHistogram.max) 59 | require.Equal(t, int64(1), valueHistogram.min) 60 | }) 61 | }) 62 | 63 | t.Run("different size key-values", func(t *testing.T) { 64 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 65 | entries := int64(3) 66 | err := db.Update(func(txn *Txn) error { 67 | if err := txn.SetEntry(NewEntry([]byte("A"), []byte("B"))); err != nil { 68 | return err 69 | } 70 | 71 | if err := txn.SetEntry(NewEntry([]byte("AA"), []byte("BB"))); err != nil { 72 | return err 73 | } 74 | 75 | return txn.SetEntry(NewEntry([]byte("AAA"), []byte("BBB"))) 76 | }) 77 | require.NoError(t, err) 78 | 79 | histogram := db.buildHistogram(nil) 80 | keyHistogram := histogram.keySizeHistogram 81 | valueHistogram := histogram.valueSizeHistogram 82 | 83 | require.Equal(t, entries, keyHistogram.totalCount) 84 | require.Equal(t, entries, valueHistogram.totalCount) 85 | 86 | // Each entry is of size one. So the sum of sizes should be the same 87 | // as number of entries 88 | require.Equal(t, int64(6), valueHistogram.sum) 89 | require.Equal(t, int64(6), keyHistogram.sum) 90 | 91 | // Length 1 key is in first bucket, length 2 and 3 are in the second 92 | // bucket 93 | require.Equal(t, int64(1), valueHistogram.countPerBin[0]) 94 | require.Equal(t, int64(2), valueHistogram.countPerBin[1]) 95 | require.Equal(t, int64(1), keyHistogram.countPerBin[0]) 96 | require.Equal(t, int64(2), keyHistogram.countPerBin[1]) 97 | 98 | require.Equal(t, int64(3), keyHistogram.max) 99 | require.Equal(t, int64(1), keyHistogram.min) 100 | require.Equal(t, int64(3), valueHistogram.max) 101 | require.Equal(t, int64(1), valueHistogram.min) 102 | }) 103 | }) 104 | } 105 | -------------------------------------------------------------------------------- /y/bloom_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The LevelDB-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package y 6 | 7 | import ( 8 | "testing" 9 | ) 10 | 11 | func (f Filter) String() string { 12 | s := make([]byte, 8*len(f)) 13 | for i, x := range f { 14 | for j := 0; j < 8; j++ { 15 | if x&(1<> 0) 72 | b[1] = uint8(uint32(i) >> 8) 73 | b[2] = uint8(uint32(i) >> 16) 74 | b[3] = uint8(uint32(i) >> 24) 75 | return b 76 | } 77 | 78 | nMediocreFilters, nGoodFilters := 0, 0 79 | loop: 80 | for length := 1; length <= 10000; length = nextLength(length) { 81 | keys := make([][]byte, 0, length) 82 | for i := 0; i < length; i++ { 83 | keys = append(keys, le32(i)) 84 | } 85 | var hashes []uint32 86 | for _, key := range keys { 87 | hashes = append(hashes, Hash(key)) 88 | } 89 | f := NewFilter(hashes, 10) 90 | 91 | if len(f) > (length*10/8)+40 { 92 | t.Errorf("length=%d: len(f)=%d is too large", length, len(f)) 93 | continue 94 | } 95 | 96 | // All added keys must match. 97 | for _, key := range keys { 98 | if !f.MayContainKey(key) { 99 | t.Errorf("length=%d: did not contain key %q", length, key) 100 | continue loop 101 | } 102 | } 103 | 104 | // Check false positive rate. 105 | nFalsePositive := 0 106 | for i := 0; i < 10000; i++ { 107 | if f.MayContainKey(le32(1e9 + i)) { 108 | nFalsePositive++ 109 | } 110 | } 111 | if nFalsePositive > 0.02*10000 { 112 | t.Errorf("length=%d: %d false positives in 10000", length, nFalsePositive) 113 | continue 114 | } 115 | if nFalsePositive > 0.0125*10000 { 116 | nMediocreFilters++ 117 | } else { 118 | nGoodFilters++ 119 | } 120 | } 121 | 122 | if nMediocreFilters > nGoodFilters/5 { 123 | t.Errorf("%d mediocre filters but only %d good filters", nMediocreFilters, nGoodFilters) 124 | } 125 | } 126 | 127 | func TestHash(t *testing.T) { 128 | // The magic want numbers come from running the C++ leveldb code in hash.cc. 129 | testCases := []struct { 130 | s string 131 | want uint32 132 | }{ 133 | {"", 0xbc9f1d34}, 134 | {"g", 0xd04a8bda}, 135 | {"go", 0x3e0b0745}, 136 | {"gop", 0x0c326610}, 137 | {"goph", 0x8c9d6390}, 138 | {"gophe", 0x9bfd4b0a}, 139 | {"gopher", 0xa78edc7c}, 140 | {"I had a dream it would end this way.", 0xe14a9db9}, 141 | } 142 | for _, tc := range testCases { 143 | if got := Hash([]byte(tc.s)); got != tc.want { 144 | t.Errorf("s=%q: got 0x%08x, want 0x%08x", tc.s, got, tc.want) 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /docs/content/projects-using-badger/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Projects Using Badger" 3 | aliases = ["/project-using-badger"] 4 | +++ 5 | 6 | Below is a list of known projects that use Badger: 7 | 8 | * [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database. 9 | * [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform. 10 | * [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol. 11 | * [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine. 12 | * [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger. 13 | * [OctoSQL](https://github.com/cube2222/octosql) - Query tool that allows you to join, analyse and transform data from multiple databases using SQL. 14 | * [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system. 15 | * [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. 16 | * [TalariaDB](https://github.com/grab/talaria) - Grab's Distributed, low latency time-series database. 17 | * [Sloop](https://github.com/salesforce/sloop) - Salesforce's Kubernetes History Visualization Project. 18 | * [Immudb](https://github.com/codenotary/immudb) - Lightweight, high-speed immutable database for systems and applications. 19 | * [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger. 20 | * [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go. 21 | * [0-stor](https://github.com/zero-os/0-stor) - Single device object store. 22 | * [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics. 23 | * [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go. 24 | * [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol. 25 | * [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft. 26 | * [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications. 27 | * [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain. 28 | * [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language. 29 | * [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots. 30 | * [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform. 31 | * [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains. 32 | * [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp. 33 | * [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications. 34 | * [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects. 35 | * [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger 36 | * [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB 37 | * [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger. 38 | * [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft. 39 | * [Volument](https://volument.com/) - A new take on website analytics backed by Badger. 40 | * [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger. 41 | 42 | If you are using Badger in a project please send a pull request to add it to the list. -------------------------------------------------------------------------------- /table/README.md: -------------------------------------------------------------------------------- 1 | Size of table is 123,217,667 bytes for all benchmarks. 2 | 3 | # BenchmarkRead 4 | ``` 5 | $ go test -bench ^BenchmarkRead$ -run ^$ -count 3 6 | goos: linux 7 | goarch: amd64 8 | pkg: github.com/dgraph-io/badger/table 9 | BenchmarkRead-16 10 154074944 ns/op 10 | BenchmarkRead-16 10 154340411 ns/op 11 | BenchmarkRead-16 10 151914489 ns/op 12 | PASS 13 | ok github.com/dgraph-io/badger/table 22.467s 14 | ``` 15 | 16 | Size of table is 123,217,667 bytes, which is ~118MB. 17 | 18 | The rate is ~762MB/s using LoadToRAM (when table is in RAM). 19 | 20 | To read a 64MB table, this would take ~0.084s, which is negligible. 21 | 22 | # BenchmarkReadAndBuild 23 | ```go 24 | $ go test -bench BenchmarkReadAndBuild -run ^$ -count 3 25 | goos: linux 26 | goarch: amd64 27 | pkg: github.com/dgraph-io/badger/table 28 | BenchmarkReadAndBuild-16 1 1026755231 ns/op 29 | BenchmarkReadAndBuild-16 1 1009543316 ns/op 30 | BenchmarkReadAndBuild-16 1 1039920546 ns/op 31 | PASS 32 | ok github.com/dgraph-io/badger/table 12.081s 33 | ``` 34 | 35 | The rate is ~123MB/s. To build a 64MB table, this would take ~0.56s. Note that this 36 | does NOT include the flushing of the table to disk. All we are doing above is 37 | reading one table (which is in RAM) and write one table in memory. 38 | 39 | The table building takes 0.56-0.084s ~ 0.4823s. 40 | 41 | # BenchmarkReadMerged 42 | Below, we merge 5 tables. The total size remains unchanged at ~122M. 43 | 44 | ```go 45 | $ go test -bench ReadMerged -run ^$ -count 3 46 | goos: linux 47 | goarch: amd64 48 | pkg: github.com/dgraph-io/badger/table 49 | BenchmarkReadMerged-16 2 977588975 ns/op 50 | BenchmarkReadMerged-16 2 982140738 ns/op 51 | BenchmarkReadMerged-16 2 962046017 ns/op 52 | PASS 53 | ok github.com/dgraph-io/badger/table 27.433s 54 | ``` 55 | 56 | The rate is ~120MB/s. To read a 64MB table using merge iterator, this would take ~0.53s. 57 | 58 | # BenchmarkRandomRead 59 | 60 | ```go 61 | go test -bench BenchmarkRandomRead$ -run ^$ -count 3 62 | goos: linux 63 | goarch: amd64 64 | pkg: github.com/dgraph-io/badger/table 65 | BenchmarkRandomRead-16 500000 2645 ns/op 66 | BenchmarkRandomRead-16 500000 2648 ns/op 67 | BenchmarkRandomRead-16 500000 2614 ns/op 68 | PASS 69 | ok github.com/dgraph-io/badger/table 50.850s 70 | ``` 71 | For random read benchmarking, we are randomly reading a key and verifying its value. 72 | 73 | # DB Open benchmark 74 | 1. Create badger DB with 2 billion key-value pairs (about 380GB of data) 75 | ``` 76 | badger fill -m 2000 --dir="/tmp/data" --sorted 77 | ``` 78 | 2. Clear buffers and swap memory 79 | ``` 80 | free -mh && sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo swapoff -a && sudo swapon -a && free -mh 81 | ``` 82 | Also flush disk buffers 83 | ``` 84 | blockdev --flushbufs /dev/nvme0n1p4 85 | ``` 86 | 3. Run the benchmark 87 | ``` 88 | go test -run=^$ github.com/dgraph-io/badger -bench ^BenchmarkDBOpen$ -benchdir="/tmp/data" -v 89 | 90 | badger 2019/06/04 17:15:56 INFO: 126 tables out of 1028 opened in 3.017s 91 | badger 2019/06/04 17:15:59 INFO: 257 tables out of 1028 opened in 6.014s 92 | badger 2019/06/04 17:16:02 INFO: 387 tables out of 1028 opened in 9.017s 93 | badger 2019/06/04 17:16:05 INFO: 516 tables out of 1028 opened in 12.025s 94 | badger 2019/06/04 17:16:08 INFO: 645 tables out of 1028 opened in 15.013s 95 | badger 2019/06/04 17:16:11 INFO: 775 tables out of 1028 opened in 18.008s 96 | badger 2019/06/04 17:16:14 INFO: 906 tables out of 1028 opened in 21.003s 97 | badger 2019/06/04 17:16:17 INFO: All 1028 tables opened in 23.851s 98 | badger 2019/06/04 17:16:17 INFO: Replaying file id: 1998 at offset: 332000 99 | badger 2019/06/04 17:16:17 INFO: Replay took: 9.81µs 100 | goos: linux 101 | goarch: amd64 102 | pkg: github.com/dgraph-io/badger 103 | BenchmarkDBOpen-16 1 23930082140 ns/op 104 | PASS 105 | ok github.com/dgraph-io/badger 24.076s 106 | 107 | ``` 108 | It takes about 23.851s to open a DB with 2 billion sorted key-value entries. 109 | -------------------------------------------------------------------------------- /dir_unix.go: -------------------------------------------------------------------------------- 1 | // +build !windows,!plan9 2 | 3 | /* 4 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package badger 20 | 21 | import ( 22 | "fmt" 23 | "io/ioutil" 24 | "os" 25 | "path/filepath" 26 | 27 | "github.com/dgraph-io/badger/v2/y" 28 | "golang.org/x/sys/unix" 29 | ) 30 | 31 | // directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part 32 | // of the locking mechanism, it's just advisory. 33 | type directoryLockGuard struct { 34 | // File handle on the directory, which we've flocked. 35 | f *os.File 36 | // The absolute path to our pid file. 37 | path string 38 | // Was this a shared lock for a read-only database? 39 | readOnly bool 40 | } 41 | 42 | // acquireDirectoryLock gets a lock on the directory (using flock). If 43 | // this is not read-only, it will also write our pid to 44 | // dirPath/pidFileName for convenience. 45 | func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( 46 | *directoryLockGuard, error) { 47 | // Convert to absolute path so that Release still works even if we do an unbalanced 48 | // chdir in the meantime. 49 | absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) 50 | if err != nil { 51 | return nil, y.Wrapf(err, "cannot get absolute path for pid lock file") 52 | } 53 | f, err := os.Open(dirPath) 54 | if err != nil { 55 | return nil, y.Wrapf(err, "cannot open directory %q", dirPath) 56 | } 57 | opts := unix.LOCK_EX | unix.LOCK_NB 58 | if readOnly { 59 | opts = unix.LOCK_SH | unix.LOCK_NB 60 | } 61 | 62 | err = unix.Flock(int(f.Fd()), opts) 63 | if err != nil { 64 | f.Close() 65 | return nil, y.Wrapf(err, 66 | "Cannot acquire directory lock on %q. Another process is using this Badger database.", 67 | dirPath) 68 | } 69 | 70 | if !readOnly { 71 | // Yes, we happily overwrite a pre-existing pid file. We're the 72 | // only read-write badger process using this directory. 73 | err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) 74 | if err != nil { 75 | f.Close() 76 | return nil, y.Wrapf(err, 77 | "Cannot write pid file %q", absPidFilePath) 78 | } 79 | } 80 | return &directoryLockGuard{f, absPidFilePath, readOnly}, nil 81 | } 82 | 83 | // Release deletes the pid file and releases our lock on the directory. 84 | func (guard *directoryLockGuard) release() error { 85 | var err error 86 | if !guard.readOnly { 87 | // It's important that we remove the pid file first. 88 | err = os.Remove(guard.path) 89 | } 90 | 91 | if closeErr := guard.f.Close(); err == nil { 92 | err = closeErr 93 | } 94 | guard.path = "" 95 | guard.f = nil 96 | 97 | return err 98 | } 99 | 100 | // openDir opens a directory for syncing. 101 | func openDir(path string) (*os.File, error) { return os.Open(path) } 102 | 103 | // When you create or delete a file, you have to ensure the directory entry for the file is synced 104 | // in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, 105 | // or see https://github.com/coreos/etcd/issues/6368 for an example.) 106 | func syncDir(dir string) error { 107 | f, err := openDir(dir) 108 | if err != nil { 109 | return y.Wrapf(err, "While opening directory: %s.", dir) 110 | } 111 | 112 | err = f.Sync() 113 | closeErr := f.Close() 114 | if err != nil { 115 | return y.Wrapf(err, "While syncing directory: %s.", dir) 116 | } 117 | return y.Wrapf(closeErr, "While closing directory: %s.", dir) 118 | } 119 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {{ .Hugo.Generator }} 7 | {{ partial "meta.html" . }} 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | {{.Section | default "Badgerdb Documentation" | humanize}} — {{ .Site.Title }} 36 | 37 | 38 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | {{ partial "topbar.html" . }} 53 | {{ partial "sidebar.html" . }} 54 | 55 |
56 |
57 | -------------------------------------------------------------------------------- /docs/scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script runs in a loop (configurable with LOOP), checks for updates to the 3 | # Hugo docs theme or to the docs on certain branches and rebuilds the public 4 | # folder for them. It has be made more generalized, so that we don't have to 5 | # hardcode versions. 6 | 7 | # Warning - Changes should not be made on the server on which this script is running 8 | # becauses this script does git checkout and merge. 9 | 10 | set -e 11 | 12 | GREEN='\033[32;1m' 13 | RESET='\033[0m' 14 | HOST="${HOST:-https://dgraph.io/docs/badger}" 15 | # Name of output public directory 16 | PUBLIC="${PUBLIC:-public}" 17 | # LOOP true makes this script run in a loop to check for updates 18 | LOOP="${LOOP:-true}" 19 | # Binary of hugo command to run. 20 | HUGO="${HUGO:-hugo}" 21 | 22 | # TODO - Maybe get list of released versions from Github API and filter 23 | # those which have docs. 24 | 25 | # Place the latest version at the beginning so that version selector can 26 | # append '(latest)' to the version string, followed by the master version, 27 | # and then the older versions in descending order, such that the 28 | # build script can place the artifact in an appropriate location. 29 | VERSIONS_ARRAY=( 30 | 'master' 31 | ) 32 | 33 | joinVersions() { 34 | versions=$(printf ",%s" "${VERSIONS_ARRAY[@]}") 35 | echo "${versions:1}" 36 | } 37 | 38 | function version { echo "$@" | gawk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; } 39 | 40 | rebuild() { 41 | echo -e "$(date) $GREEN Updating docs for branch: $1.$RESET" 42 | 43 | # The latest documentation is generated in the root of /public dir 44 | # Older documentations are generated in their respective `/public/vx.x.x` dirs 45 | dir='' 46 | if [[ $2 != "${VERSIONS_ARRAY[0]}" ]]; then 47 | dir=$2 48 | fi 49 | 50 | VERSION_STRING=$(joinVersions) 51 | # In Unix environments, env variables should also be exported to be seen by Hugo 52 | export CURRENT_BRANCH=${1} 53 | export CURRENT_VERSION=${2} 54 | export VERSIONS=${VERSION_STRING} 55 | 56 | HUGO_TITLE="Badger Doc ${2}"\ 57 | VERSIONS=${VERSION_STRING}\ 58 | CURRENT_BRANCH=${1}\ 59 | CURRENT_VERSION=${2} ${HUGO} \ 60 | --destination="${PUBLIC}"/"$dir"\ 61 | --baseURL="$HOST"/"$dir" 1> /dev/null 62 | } 63 | 64 | branchUpdated() 65 | { 66 | local branch="$1" 67 | git checkout -q "$1" 68 | UPSTREAM=$(git rev-parse "@{u}") 69 | LOCAL=$(git rev-parse "@") 70 | 71 | if [ "$LOCAL" != "$UPSTREAM" ] ; then 72 | git merge -q origin/"$branch" 73 | return 0 74 | else 75 | return 1 76 | fi 77 | } 78 | 79 | publicFolder() 80 | { 81 | dir='' 82 | if [[ $1 == "${VERSIONS_ARRAY[0]}" ]]; then 83 | echo "${PUBLIC}" 84 | else 85 | echo "${PUBLIC}/$1" 86 | fi 87 | } 88 | 89 | checkAndUpdate() 90 | { 91 | local version="$1" 92 | local branch="" 93 | 94 | if [[ $version == "master" ]]; then 95 | branch="master" 96 | else 97 | branch="release/$version" 98 | fi 99 | 100 | if branchUpdated "$branch" ; then 101 | git merge -q origin/"$branch" 102 | rebuild "$branch" "$version" 103 | fi 104 | 105 | folder=$(publicFolder "$version") 106 | if [ "$firstRun" = 1 ] || [ "$themeUpdated" = 0 ] || [ ! -d "$folder" ] ; then 107 | rebuild "$branch" "$version" 108 | fi 109 | } 110 | 111 | 112 | firstRun=1 113 | while true; do 114 | # Lets move to the docs directory. 115 | pushd "$(dirname "$0")/.." > /dev/null 116 | 117 | currentBranch=$(git rev-parse --abbrev-ref HEAD) 118 | 119 | # Lets check if the theme was updated. 120 | pushd themes/hugo-docs > /dev/null 121 | git remote update > /dev/null 122 | themeUpdated=1 123 | if branchUpdated "master" ; then 124 | echo -e "$(date) $GREEN Theme has been updated. Now will update the docs.$RESET" 125 | themeUpdated=0 126 | fi 127 | popd > /dev/null 128 | 129 | # Now lets check the theme. 130 | echo -e "$(date) Starting to check branches." 131 | git remote update > /dev/null 132 | 133 | for version in "${VERSIONS_ARRAY[@]}" 134 | do 135 | checkAndUpdate "$version" 136 | done 137 | 138 | echo -e "$(date) Done checking branches.\n" 139 | 140 | git checkout -q "$currentBranch" 141 | popd > /dev/null 142 | 143 | firstRun=0 144 | if ! $LOOP; then 145 | exit 146 | fi 147 | sleep 60 148 | done -------------------------------------------------------------------------------- /dir_windows.go: -------------------------------------------------------------------------------- 1 | // +build windows 2 | 3 | /* 4 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package badger 20 | 21 | // OpenDir opens a directory in windows with write access for syncing. 22 | import ( 23 | "os" 24 | "path/filepath" 25 | "syscall" 26 | 27 | "github.com/dgraph-io/badger/v2/y" 28 | ) 29 | 30 | // FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage. 31 | // FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are 32 | // closed, which includes the specified handle and any other open or duplicated handles. 33 | // See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants 34 | // NOTE: Added here to avoid importing golang.org/x/sys/windows 35 | const ( 36 | FILE_ATTRIBUTE_TEMPORARY = 0x00000100 37 | FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 38 | ) 39 | 40 | func openDir(path string) (*os.File, error) { 41 | fd, err := openDirWin(path) 42 | if err != nil { 43 | return nil, err 44 | } 45 | return os.NewFile(uintptr(fd), path), nil 46 | } 47 | 48 | func openDirWin(path string) (fd syscall.Handle, err error) { 49 | if len(path) == 0 { 50 | return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND 51 | } 52 | pathp, err := syscall.UTF16PtrFromString(path) 53 | if err != nil { 54 | return syscall.InvalidHandle, err 55 | } 56 | access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) 57 | sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) 58 | createmode := uint32(syscall.OPEN_EXISTING) 59 | fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) 60 | return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) 61 | } 62 | 63 | // DirectoryLockGuard holds a lock on the directory. 64 | type directoryLockGuard struct { 65 | h syscall.Handle 66 | path string 67 | } 68 | 69 | // AcquireDirectoryLock acquires exclusive access to a directory. 70 | func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { 71 | if readOnly { 72 | return nil, ErrWindowsNotSupported 73 | } 74 | 75 | // Convert to absolute path so that Release still works even if we do an unbalanced 76 | // chdir in the meantime. 77 | absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) 78 | if err != nil { 79 | return nil, y.Wrap(err, "Cannot get absolute path for pid lock file") 80 | } 81 | 82 | // This call creates a file handler in memory that only one process can use at a time. When 83 | // that process ends, the file is deleted by the system. 84 | // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. 85 | // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete 86 | // the file when all processes holding the handler are closed. 87 | // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. 88 | h, err := syscall.CreateFile( 89 | syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, 90 | syscall.OPEN_ALWAYS, 91 | uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE), 92 | 0) 93 | if err != nil { 94 | return nil, y.Wrapf(err, 95 | "Cannot create lock file %q. Another process is using this Badger database", 96 | absLockFilePath) 97 | } 98 | 99 | return &directoryLockGuard{h: h, path: absLockFilePath}, nil 100 | } 101 | 102 | // Release removes the directory lock. 103 | func (g *directoryLockGuard) release() error { 104 | g.path = "" 105 | return syscall.CloseHandle(g.h) 106 | } 107 | 108 | // Windows doesn't support syncing directories to the file system. See 109 | // https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details. 110 | func syncDir(dir string) error { return nil } 111 | -------------------------------------------------------------------------------- /publisher.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "sync" 21 | 22 | "github.com/dgraph-io/badger/v2/pb" 23 | "github.com/dgraph-io/badger/v2/trie" 24 | "github.com/dgraph-io/badger/v2/y" 25 | "github.com/dgraph-io/ristretto/z" 26 | ) 27 | 28 | type subscriber struct { 29 | prefixes [][]byte 30 | sendCh chan<- *pb.KVList 31 | subCloser *z.Closer 32 | } 33 | 34 | type publisher struct { 35 | sync.Mutex 36 | pubCh chan requests 37 | subscribers map[uint64]subscriber 38 | nextID uint64 39 | indexer *trie.Trie 40 | } 41 | 42 | func newPublisher() *publisher { 43 | return &publisher{ 44 | pubCh: make(chan requests, 1000), 45 | subscribers: make(map[uint64]subscriber), 46 | nextID: 0, 47 | indexer: trie.NewTrie(), 48 | } 49 | } 50 | 51 | func (p *publisher) listenForUpdates(c *z.Closer) { 52 | defer func() { 53 | p.cleanSubscribers() 54 | c.Done() 55 | }() 56 | slurp := func(batch requests) { 57 | for { 58 | select { 59 | case reqs := <-p.pubCh: 60 | batch = append(batch, reqs...) 61 | default: 62 | p.publishUpdates(batch) 63 | return 64 | } 65 | } 66 | } 67 | for { 68 | select { 69 | case <-c.HasBeenClosed(): 70 | return 71 | case reqs := <-p.pubCh: 72 | slurp(reqs) 73 | } 74 | } 75 | } 76 | 77 | func (p *publisher) publishUpdates(reqs requests) { 78 | p.Lock() 79 | defer func() { 80 | p.Unlock() 81 | // Release all the request. 82 | reqs.DecrRef() 83 | }() 84 | batchedUpdates := make(map[uint64]*pb.KVList) 85 | for _, req := range reqs { 86 | for _, e := range req.Entries { 87 | ids := p.indexer.Get(e.Key) 88 | if len(ids) > 0 { 89 | k := y.SafeCopy(nil, e.Key) 90 | kv := &pb.KV{ 91 | Key: y.ParseKey(k), 92 | Value: y.SafeCopy(nil, e.Value), 93 | Meta: []byte{e.UserMeta}, 94 | ExpiresAt: e.ExpiresAt, 95 | Version: y.ParseTs(k), 96 | } 97 | for id := range ids { 98 | if _, ok := batchedUpdates[id]; !ok { 99 | batchedUpdates[id] = &pb.KVList{} 100 | } 101 | batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv) 102 | } 103 | } 104 | } 105 | } 106 | 107 | for id, kvs := range batchedUpdates { 108 | p.subscribers[id].sendCh <- kvs 109 | } 110 | } 111 | 112 | func (p *publisher) newSubscriber(c *z.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) { 113 | p.Lock() 114 | defer p.Unlock() 115 | ch := make(chan *pb.KVList, 1000) 116 | id := p.nextID 117 | // Increment next ID. 118 | p.nextID++ 119 | p.subscribers[id] = subscriber{ 120 | prefixes: prefixes, 121 | sendCh: ch, 122 | subCloser: c, 123 | } 124 | for _, prefix := range prefixes { 125 | p.indexer.Add(prefix, id) 126 | } 127 | return ch, id 128 | } 129 | 130 | // cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. 131 | func (p *publisher) cleanSubscribers() { 132 | p.Lock() 133 | defer p.Unlock() 134 | for id, s := range p.subscribers { 135 | for _, prefix := range s.prefixes { 136 | p.indexer.Delete(prefix, id) 137 | } 138 | delete(p.subscribers, id) 139 | s.subCloser.SignalAndWait() 140 | } 141 | } 142 | 143 | func (p *publisher) deleteSubscriber(id uint64) { 144 | p.Lock() 145 | defer p.Unlock() 146 | if s, ok := p.subscribers[id]; ok { 147 | for _, prefix := range s.prefixes { 148 | p.indexer.Delete(prefix, id) 149 | } 150 | } 151 | delete(p.subscribers, id) 152 | } 153 | 154 | func (p *publisher) sendUpdates(reqs requests) { 155 | if p.noOfSubscribers() != 0 { 156 | reqs.IncrRef() 157 | p.pubCh <- reqs 158 | } 159 | } 160 | 161 | func (p *publisher) noOfSubscribers() int { 162 | p.Lock() 163 | defer p.Unlock() 164 | return len(p.subscribers) 165 | } 166 | -------------------------------------------------------------------------------- /skl/arena.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package skl 18 | 19 | import ( 20 | "sync/atomic" 21 | "unsafe" 22 | 23 | "github.com/dgraph-io/badger/v2/y" 24 | ) 25 | 26 | const ( 27 | offsetSize = int(unsafe.Sizeof(uint32(0))) 28 | 29 | // Always align nodes on 64-bit boundaries, even on 32-bit architectures, 30 | // so that the node.value field is 64-bit aligned. This is necessary because 31 | // node.getValueOffset uses atomic.LoadUint64, which expects its input 32 | // pointer to be 64-bit aligned. 33 | nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 34 | ) 35 | 36 | // Arena should be lock-free. 37 | type Arena struct { 38 | n uint32 39 | buf []byte 40 | } 41 | 42 | // newArena returns a new arena. 43 | func newArena(n int64) *Arena { 44 | // Don't store data at position 0 in order to reserve offset=0 as a kind 45 | // of nil pointer. 46 | out := &Arena{ 47 | n: 1, 48 | buf: make([]byte, n), 49 | } 50 | return out 51 | } 52 | 53 | func (s *Arena) size() int64 { 54 | return int64(atomic.LoadUint32(&s.n)) 55 | } 56 | 57 | // putNode allocates a node in the arena. The node is aligned on a pointer-sized 58 | // boundary. The arena offset of the node is returned. 59 | func (s *Arena) putNode(height int) uint32 { 60 | // Compute the amount of the tower that will never be used, since the height 61 | // is less than maxHeight. 62 | unusedSize := (maxHeight - height) * offsetSize 63 | 64 | // Pad the allocation with enough bytes to ensure pointer alignment. 65 | l := uint32(MaxNodeSize - unusedSize + nodeAlign) 66 | n := atomic.AddUint32(&s.n, l) 67 | y.AssertTruef(int(n) <= len(s.buf), 68 | "Arena too small, toWrite:%d newTotal:%d limit:%d", 69 | l, n, len(s.buf)) 70 | 71 | // Return the aligned offset. 72 | m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) 73 | return m 74 | } 75 | 76 | // Put will *copy* val into arena. To make better use of this, reuse your input 77 | // val buffer. Returns an offset into buf. User is responsible for remembering 78 | // size of val. We could also store this size inside arena but the encoding and 79 | // decoding will incur some overhead. 80 | func (s *Arena) putVal(v y.ValueStruct) uint32 { 81 | l := uint32(v.EncodedSize()) 82 | n := atomic.AddUint32(&s.n, l) 83 | y.AssertTruef(int(n) <= len(s.buf), 84 | "Arena too small, toWrite:%d newTotal:%d limit:%d", 85 | l, n, len(s.buf)) 86 | m := n - l 87 | v.Encode(s.buf[m:]) 88 | return m 89 | } 90 | 91 | func (s *Arena) putKey(key []byte) uint32 { 92 | l := uint32(len(key)) 93 | n := atomic.AddUint32(&s.n, l) 94 | y.AssertTruef(int(n) <= len(s.buf), 95 | "Arena too small, toWrite:%d newTotal:%d limit:%d", 96 | l, n, len(s.buf)) 97 | // m is the offset where you should write. 98 | // n = new len - key len give you the offset at which you should write. 99 | m := n - l 100 | // Copy to buffer from m:n 101 | y.AssertTrue(len(key) == copy(s.buf[m:n], key)) 102 | return m 103 | } 104 | 105 | // getNode returns a pointer to the node located at offset. If the offset is 106 | // zero, then the nil node pointer is returned. 107 | func (s *Arena) getNode(offset uint32) *node { 108 | if offset == 0 { 109 | return nil 110 | } 111 | 112 | return (*node)(unsafe.Pointer(&s.buf[offset])) 113 | } 114 | 115 | // getKey returns byte slice at offset. 116 | func (s *Arena) getKey(offset uint32, size uint16) []byte { 117 | return s.buf[offset : offset+uint32(size)] 118 | } 119 | 120 | // getVal returns byte slice at offset. The given size should be just the value 121 | // size and should NOT include the meta bytes. 122 | func (s *Arena) getVal(offset uint32, size uint32) (ret y.ValueStruct) { 123 | ret.Decode(s.buf[offset : offset+size]) 124 | return 125 | } 126 | 127 | // getNodeOffset returns the offset of node in the arena. If the node pointer is 128 | // nil, then the zero offset is returned. 129 | func (s *Arena) getNodeOffset(nd *node) uint32 { 130 | if nd == nil { 131 | return 0 132 | } 133 | 134 | return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0]))) 135 | } 136 | -------------------------------------------------------------------------------- /badger/cmd/rotate_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "io/ioutil" 21 | "math/rand" 22 | "os" 23 | "testing" 24 | 25 | "github.com/dgraph-io/badger/v2" 26 | "github.com/dgraph-io/badger/v2/y" 27 | "github.com/stretchr/testify/require" 28 | ) 29 | 30 | func TestRotate(t *testing.T) { 31 | dir, err := ioutil.TempDir("", "badger-test") 32 | require.NoError(t, err) 33 | defer os.RemoveAll(dir) 34 | 35 | // Creating sample key. 36 | key := make([]byte, 32) 37 | _, err = rand.Read(key) 38 | require.NoError(t, err) 39 | 40 | fp, err := ioutil.TempFile("", "*.key") 41 | require.NoError(t, err) 42 | _, err = fp.Write(key) 43 | require.NoError(t, err) 44 | defer fp.Close() 45 | 46 | // Opening DB with the encryption key. 47 | opts := badger.DefaultOptions(dir) 48 | opts.EncryptionKey = key 49 | opts.BlockCacheSize = 1 << 20 50 | 51 | db, err := badger.Open(opts) 52 | require.NoError(t, err) 53 | // Closing the db. 54 | require.NoError(t, db.Close()) 55 | 56 | // Opening the db again for the successful open. 57 | db, err = badger.Open(opts) 58 | require.NoError(t, err) 59 | // Closing so that we can open another db 60 | require.NoError(t, db.Close()) 61 | 62 | // Creating another sample key. 63 | key2 := make([]byte, 32) 64 | _, err = rand.Read(key2) 65 | require.NoError(t, err) 66 | fp2, err := ioutil.TempFile("", "*.key") 67 | require.NoError(t, err) 68 | _, err = fp2.Write(key2) 69 | require.NoError(t, err) 70 | defer fp2.Close() 71 | oldKeyPath = fp2.Name() 72 | sstDir = dir 73 | 74 | // Check whether we able to rotate the key with some sample key. We should get mismatch 75 | // error. 76 | require.EqualError(t, doRotate(nil, []string{}), badger.ErrEncryptionKeyMismatch.Error()) 77 | 78 | // rotating key with proper key. 79 | oldKeyPath = fp.Name() 80 | newKeyPath = fp2.Name() 81 | require.NoError(t, doRotate(nil, []string{})) 82 | 83 | // Checking whether db opens with the new key. 84 | opts.EncryptionKey = key2 85 | db, err = badger.Open(opts) 86 | require.NoError(t, err) 87 | require.NoError(t, db.Close()) 88 | 89 | // Checking for plain text rotation. 90 | oldKeyPath = newKeyPath 91 | newKeyPath = "" 92 | require.NoError(t, doRotate(nil, []string{})) 93 | opts.EncryptionKey = []byte{} 94 | db, err = badger.Open(opts) 95 | require.NoError(t, err) 96 | defer db.Close() 97 | } 98 | 99 | // This test shows that rotate tool can be used to enable encryption. 100 | func TestRotatePlainTextToEncrypted(t *testing.T) { 101 | dir, err := ioutil.TempDir("", "badger-test") 102 | require.NoError(t, err) 103 | defer os.RemoveAll(dir) 104 | 105 | // Open DB without encryption. 106 | opts := badger.DefaultOptions(dir) 107 | db, err := badger.Open(opts) 108 | require.NoError(t, err) 109 | 110 | db.Update(func(txn *badger.Txn) error { 111 | return txn.Set([]byte("foo"), []byte("bar")) 112 | }) 113 | 114 | require.NoError(t, db.Close()) 115 | 116 | // Create an encryption key. 117 | key := make([]byte, 32) 118 | y.Check2(rand.Read(key)) 119 | fp, err := ioutil.TempFile("", "*.key") 120 | require.NoError(t, err) 121 | _, err = fp.Write(key) 122 | require.NoError(t, err) 123 | defer fp.Close() 124 | 125 | oldKeyPath = "" 126 | newKeyPath = fp.Name() 127 | sstDir = dir 128 | 129 | // Enable encryption. newKeyPath is encrypted. 130 | require.Nil(t, doRotate(nil, []string{})) 131 | 132 | // Try opening DB without the key. 133 | opts.BlockCacheSize = 1 << 20 134 | _, err = badger.Open(opts) 135 | require.EqualError(t, err, badger.ErrEncryptionKeyMismatch.Error()) 136 | 137 | // Check whether db opens with the new key. 138 | opts.EncryptionKey = key 139 | db, err = badger.Open(opts) 140 | require.NoError(t, err) 141 | 142 | db.View(func(txn *badger.Txn) error { 143 | iopt := badger.DefaultIteratorOptions 144 | it := txn.NewIterator(iopt) 145 | defer it.Close() 146 | count := 0 147 | for it.Rewind(); it.Valid(); it.Next() { 148 | count++ 149 | } 150 | require.Equal(t, 1, count) 151 | return nil 152 | }) 153 | require.NoError(t, db.Close()) 154 | } 155 | -------------------------------------------------------------------------------- /discard.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "encoding/binary" 21 | "os" 22 | "path" 23 | "sort" 24 | "sync" 25 | 26 | "github.com/dgraph-io/badger/v2/y" 27 | "github.com/dgraph-io/ristretto/z" 28 | ) 29 | 30 | // discardStats keeps track of the amount of data that could be discarded for 31 | // a given logfile. 32 | type discardStats struct { 33 | sync.Mutex 34 | 35 | *z.MmapFile 36 | opt Options 37 | nextEmptySlot int 38 | } 39 | 40 | const discardFname string = "DISCARD" 41 | const discardFsize int = 1 << 30 42 | const maxSlot int = 64 << 20 43 | 44 | // TODO(naman): Add a test for this. 45 | func initDiscardStats(opt Options) (*discardStats, error) { 46 | fname := path.Join(opt.ValueDir, discardFname) 47 | 48 | // 1GB file can store 67M discard entries. Each entry is 16 bytes. 49 | mf, err := z.OpenMmapFile(fname, os.O_CREATE|os.O_RDWR, discardFsize) 50 | lf := &discardStats{ 51 | MmapFile: mf, 52 | opt: opt, 53 | } 54 | if err == z.NewFile { 55 | // We don't need to zero out the entire 1GB. 56 | lf.zeroOut() 57 | 58 | } else if err != nil { 59 | return nil, y.Wrapf(err, "while opening file: %s\n", discardFname) 60 | } 61 | 62 | for slot := 0; slot < maxSlot; slot++ { 63 | if lf.get(16*slot) == 0 { 64 | lf.nextEmptySlot = slot 65 | break 66 | } 67 | } 68 | sort.Sort(lf) 69 | opt.Infof("Discard stats nextEmptySlot: %d\n", lf.nextEmptySlot) 70 | return lf, nil 71 | } 72 | 73 | func (lf *discardStats) Len() int { 74 | return lf.nextEmptySlot 75 | } 76 | func (lf *discardStats) Less(i, j int) bool { 77 | return lf.get(16*i) < lf.get(16*j) 78 | } 79 | func (lf *discardStats) Swap(i, j int) { 80 | left := lf.Data[16*i : 16*i+16] 81 | right := lf.Data[16*j : 16*j+16] 82 | var tmp [16]byte 83 | copy(tmp[:], left) 84 | copy(left, right) 85 | copy(right, tmp[:]) 86 | } 87 | 88 | // offset is not slot. 89 | func (lf *discardStats) get(offset int) uint64 { 90 | return binary.BigEndian.Uint64(lf.Data[offset : offset+8]) 91 | } 92 | func (lf *discardStats) set(offset int, val uint64) { 93 | binary.BigEndian.PutUint64(lf.Data[offset:offset+8], val) 94 | } 95 | 96 | // zeroOut would zero out the next slot. 97 | func (lf *discardStats) zeroOut() { 98 | lf.set(lf.nextEmptySlot*16, 0) 99 | lf.set(lf.nextEmptySlot*16+8, 0) 100 | } 101 | 102 | // Update would update the discard stats for the given file id. If discard is 103 | // 0, it would return the current value of discard for the file. If discard is 104 | // < 0, it would set the current value of discard to zero for the file. 105 | func (lf *discardStats) Update(fidu uint32, discard int64) int64 { 106 | fid := uint64(fidu) 107 | lf.Lock() 108 | defer lf.Unlock() 109 | 110 | idx := sort.Search(lf.nextEmptySlot, func(slot int) bool { 111 | return lf.get(slot*16) >= fid 112 | }) 113 | if idx < lf.nextEmptySlot && lf.get(idx*16) == fid { 114 | off := idx*16 + 8 115 | curDisc := lf.get(off) 116 | if discard == 0 { 117 | return int64(curDisc) 118 | } 119 | if discard < 0 { 120 | lf.set(off, 0) 121 | return 0 122 | } 123 | lf.set(off, curDisc+uint64(discard)) 124 | return int64(curDisc + uint64(discard)) 125 | } 126 | if discard <= 0 { 127 | // No need to add a new entry. 128 | return 0 129 | } 130 | 131 | // Could not find the fid. Add the entry. 132 | idx = lf.nextEmptySlot 133 | lf.set(idx*16, uint64(fid)) 134 | lf.set(idx*16+8, uint64(discard)) 135 | 136 | // Move to next slot. 137 | lf.nextEmptySlot++ 138 | y.AssertTrue(lf.nextEmptySlot < maxSlot) 139 | lf.zeroOut() 140 | 141 | sort.Sort(lf) 142 | return int64(discard) 143 | } 144 | 145 | func (lf *discardStats) iterate(f func(fid, stats uint64)) { 146 | for slot := 0; slot < lf.nextEmptySlot; slot++ { 147 | idx := 16 * slot 148 | f(lf.get(idx), lf.get(idx+8)) 149 | } 150 | } 151 | 152 | // MaxDiscard returns the file id with maximum discard bytes. 153 | func (lf *discardStats) MaxDiscard() (uint32, int64) { 154 | lf.Lock() 155 | defer lf.Unlock() 156 | 157 | var maxFid, maxVal uint64 158 | lf.iterate(func(fid, val uint64) { 159 | if maxVal < val { 160 | maxVal = val 161 | maxFid = fid 162 | } 163 | }) 164 | return uint32(maxFid), int64(maxVal) 165 | } 166 | -------------------------------------------------------------------------------- /fb/TableIndex.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package fb 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type TableIndex struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsTableIndex(buf []byte, offset flatbuffers.UOffsetT) *TableIndex { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &TableIndex{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *TableIndex) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *TableIndex) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *TableIndex) Offsets(obj *BlockOffset, j int) bool { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | x := rcv._tab.Vector(o) 33 | x += flatbuffers.UOffsetT(j) * 4 34 | x = rcv._tab.Indirect(x) 35 | obj.Init(rcv._tab.Bytes, x) 36 | return true 37 | } 38 | return false 39 | } 40 | 41 | func (rcv *TableIndex) OffsetsLength() int { 42 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 43 | if o != 0 { 44 | return rcv._tab.VectorLen(o) 45 | } 46 | return 0 47 | } 48 | 49 | func (rcv *TableIndex) BloomFilter(j int) byte { 50 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 51 | if o != 0 { 52 | a := rcv._tab.Vector(o) 53 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 54 | } 55 | return 0 56 | } 57 | 58 | func (rcv *TableIndex) BloomFilterLength() int { 59 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 60 | if o != 0 { 61 | return rcv._tab.VectorLen(o) 62 | } 63 | return 0 64 | } 65 | 66 | func (rcv *TableIndex) BloomFilterBytes() []byte { 67 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 68 | if o != 0 { 69 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 70 | } 71 | return nil 72 | } 73 | 74 | func (rcv *TableIndex) MutateBloomFilter(j int, n byte) bool { 75 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 76 | if o != 0 { 77 | a := rcv._tab.Vector(o) 78 | return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) 79 | } 80 | return false 81 | } 82 | 83 | func (rcv *TableIndex) EstimatedSize() uint32 { 84 | o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) 85 | if o != 0 { 86 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 87 | } 88 | return 0 89 | } 90 | 91 | func (rcv *TableIndex) MutateEstimatedSize(n uint32) bool { 92 | return rcv._tab.MutateUint32Slot(8, n) 93 | } 94 | 95 | func (rcv *TableIndex) MaxVersion() uint64 { 96 | o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) 97 | if o != 0 { 98 | return rcv._tab.GetUint64(o + rcv._tab.Pos) 99 | } 100 | return 0 101 | } 102 | 103 | func (rcv *TableIndex) MutateMaxVersion(n uint64) bool { 104 | return rcv._tab.MutateUint64Slot(10, n) 105 | } 106 | 107 | func (rcv *TableIndex) UncompressedSize() uint32 { 108 | o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) 109 | if o != 0 { 110 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 111 | } 112 | return 0 113 | } 114 | 115 | func (rcv *TableIndex) MutateUncompressedSize(n uint32) bool { 116 | return rcv._tab.MutateUint32Slot(12, n) 117 | } 118 | 119 | func (rcv *TableIndex) KeyCount() uint32 { 120 | o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) 121 | if o != 0 { 122 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 123 | } 124 | return 0 125 | } 126 | 127 | func (rcv *TableIndex) MutateKeyCount(n uint32) bool { 128 | return rcv._tab.MutateUint32Slot(14, n) 129 | } 130 | 131 | func TableIndexStart(builder *flatbuffers.Builder) { 132 | builder.StartObject(6) 133 | } 134 | func TableIndexAddOffsets(builder *flatbuffers.Builder, offsets flatbuffers.UOffsetT) { 135 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(offsets), 0) 136 | } 137 | func TableIndexStartOffsetsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 138 | return builder.StartVector(4, numElems, 4) 139 | } 140 | func TableIndexAddBloomFilter(builder *flatbuffers.Builder, bloomFilter flatbuffers.UOffsetT) { 141 | builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(bloomFilter), 0) 142 | } 143 | func TableIndexStartBloomFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 144 | return builder.StartVector(1, numElems, 1) 145 | } 146 | func TableIndexAddEstimatedSize(builder *flatbuffers.Builder, estimatedSize uint32) { 147 | builder.PrependUint32Slot(2, estimatedSize, 0) 148 | } 149 | func TableIndexAddMaxVersion(builder *flatbuffers.Builder, maxVersion uint64) { 150 | builder.PrependUint64Slot(3, maxVersion, 0) 151 | } 152 | func TableIndexAddUncompressedSize(builder *flatbuffers.Builder, uncompressedSize uint32) { 153 | builder.PrependUint32Slot(4, uncompressedSize, 0) 154 | } 155 | func TableIndexAddKeyCount(builder *flatbuffers.Builder, keyCount uint32) { 156 | builder.PrependUint32Slot(5, keyCount, 0) 157 | } 158 | func TableIndexEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 159 | return builder.EndObject() 160 | } 161 | -------------------------------------------------------------------------------- /docs/content/_index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "BadgerDB Documentation" 3 | date: 2020-07-06T17:43:29+05:30 4 | draft: false 5 | --- 6 | 7 | ![Badger mascot](/images/diggy-shadow.png) 8 | 9 | **Welcome to the official Badger documentation.** 10 | 11 | BadgerDB is an embeddable, persistent and fast key-value (KV) database written 12 | in pure Go. It is the underlying database for [Dgraph](https://dgraph.io), a 13 | fast, distributed graph database. It's meant to be a performant alternative to 14 | non-Go-based key-value stores like RocksDB. 15 | 16 | ## Table of Contents 17 | 18 |
19 |
20 |
21 |
22 |
23 | 28 |

29 | A single page quickstart guide to get started with BadgerDB 30 |

31 |
32 |
33 |
34 |
35 | 40 |

41 | Additional resources and information 42 |

43 |
44 |
45 |
46 |
47 | 52 |

53 | Design goals behind BadgerDB 54 |

55 |
56 |
57 |
58 |
59 | 64 |

65 | A list of known projects that use BadgerDB 66 |

67 |
68 |
69 |
70 |
71 |
72 | 73 | FAQ 74 | 75 |
76 |

77 | Frequently asked questions 78 |

79 |
80 |
81 |
82 |
83 | 88 |

89 | Embeddable, persistent and fast key-value database that powers Dgraph 90 |

91 |
92 |
93 |
94 |
95 |
96 | 97 | ## Changelog 98 | 99 | The [Changelog] is kept fairly up-to-date. 100 | 101 | - Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible 102 | with v1.0 is v1.6.0. 103 | - Badger v2.0 was released in Nov 2019 with a new storage format which won't 104 | be compatible with all of the v1.x. Badger v2.0 supports compression, encryption and uses a cache to speed up lookup. 105 | 106 | For more details on our version naming schema please read [Choosing a version]({{< relref "get-started/index.md#choosing-a-version" >}}). 107 | 108 | [Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md 109 | 110 | ## Contribute 111 | 112 |
113 |
114 |
115 |
116 |
117 | 122 |

123 | Get started with contributing fixes and enhancements to Badger and related software. 124 |

125 |
126 |
127 |
128 |
129 |
130 | 131 | ## Our Community 132 | 133 | **Badger is made better every day by the growing community and the contributors all over the world.** 134 | 135 |
136 |
137 |
138 |
139 |
140 | 145 |

146 | Discuss Badger on the official community. 147 |

148 |
149 |
150 |
151 |
152 |
153 | -------------------------------------------------------------------------------- /batch_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "io/ioutil" 22 | "testing" 23 | "time" 24 | 25 | "github.com/dgraph-io/badger/v2/y" 26 | 27 | "github.com/stretchr/testify/require" 28 | ) 29 | 30 | func TestWriteBatch(t *testing.T) { 31 | key := func(i int) []byte { 32 | return []byte(fmt.Sprintf("%10d", i)) 33 | } 34 | val := func(i int) []byte { 35 | return []byte(fmt.Sprintf("%128d", i)) 36 | } 37 | 38 | test := func(t *testing.T, db *DB) { 39 | wb := db.NewWriteBatch() 40 | defer wb.Cancel() 41 | 42 | // Sanity check for SetEntryAt. 43 | require.Error(t, wb.SetEntryAt(&Entry{}, 12)) 44 | 45 | N, M := 50000, 1000 46 | start := time.Now() 47 | 48 | for i := 0; i < N; i++ { 49 | require.NoError(t, wb.Set(key(i), val(i))) 50 | } 51 | for i := 0; i < M; i++ { 52 | require.NoError(t, wb.Delete(key(i))) 53 | } 54 | require.NoError(t, wb.Flush()) 55 | t.Logf("Time taken for %d writes (w/ test options): %s\n", N+M, time.Since(start)) 56 | 57 | err := db.View(func(txn *Txn) error { 58 | itr := txn.NewIterator(DefaultIteratorOptions) 59 | defer itr.Close() 60 | 61 | i := M 62 | for itr.Rewind(); itr.Valid(); itr.Next() { 63 | item := itr.Item() 64 | require.Equal(t, string(key(i)), string(item.Key())) 65 | valcopy, err := item.ValueCopy(nil) 66 | require.NoError(t, err) 67 | require.Equal(t, val(i), valcopy) 68 | i++ 69 | } 70 | require.Equal(t, N, i) 71 | return nil 72 | }) 73 | require.NoError(t, err) 74 | } 75 | t.Run("disk mode", func(t *testing.T) { 76 | opt := getTestOptions("") 77 | // Set value threshold to 32 bytes otherwise write batch will generate 78 | // too many files and we will crash with too many files open error. 79 | opt.ValueThreshold = 32 80 | runBadgerTest(t, &opt, func(t *testing.T, db *DB) { 81 | test(t, db) 82 | }) 83 | }) 84 | t.Run("InMemory mode", func(t *testing.T) { 85 | opt := getTestOptions("") 86 | opt.InMemory = true 87 | db, err := Open(opt) 88 | require.NoError(t, err) 89 | test(t, db) 90 | require.NoError(t, db.Close()) 91 | }) 92 | } 93 | 94 | // This test ensures we don't end up in deadlock in case of empty writebatch. 95 | func TestEmptyWriteBatch(t *testing.T) { 96 | t.Run("normal mode", func(t *testing.T) { 97 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 98 | wb := db.NewWriteBatch() 99 | require.NoError(t, wb.Flush()) 100 | wb = db.NewWriteBatch() 101 | require.NoError(t, wb.Flush()) 102 | wb = db.NewWriteBatch() 103 | require.NoError(t, wb.Flush()) 104 | }) 105 | }) 106 | t.Run("managed mode", func(t *testing.T) { 107 | opt := getTestOptions("") 108 | opt.managedTxns = true 109 | runBadgerTest(t, &opt, func(t *testing.T, db *DB) { 110 | t.Run("WriteBatchAt", func(t *testing.T) { 111 | wb := db.NewWriteBatchAt(2) 112 | require.NoError(t, wb.Flush()) 113 | wb = db.NewWriteBatchAt(208) 114 | require.NoError(t, wb.Flush()) 115 | wb = db.NewWriteBatchAt(31) 116 | require.NoError(t, wb.Flush()) 117 | }) 118 | t.Run("ManagedWriteBatch", func(t *testing.T) { 119 | wb := db.NewManagedWriteBatch() 120 | require.NoError(t, wb.Flush()) 121 | wb = db.NewManagedWriteBatch() 122 | require.NoError(t, wb.Flush()) 123 | wb = db.NewManagedWriteBatch() 124 | require.NoError(t, wb.Flush()) 125 | }) 126 | }) 127 | }) 128 | } 129 | 130 | // This test ensures we don't panic during flush. 131 | // See issue: https://github.com/dgraph-io/badger/issues/1394 132 | func TestFlushPanic(t *testing.T) { 133 | t.Run("flush after flush", func(t *testing.T) { 134 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 135 | wb := db.NewWriteBatch() 136 | wb.Flush() 137 | require.Error(t, y.ErrCommitAfterFinish, wb.Flush()) 138 | }) 139 | }) 140 | t.Run("flush after cancel", func(t *testing.T) { 141 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 142 | wb := db.NewWriteBatch() 143 | wb.Cancel() 144 | require.Error(t, y.ErrCommitAfterFinish, wb.Flush()) 145 | }) 146 | }) 147 | } 148 | 149 | func TestBatchErrDeadlock(t *testing.T) { 150 | dir, err := ioutil.TempDir("", "badger-test") 151 | require.NoError(t, err) 152 | defer removeDir(dir) 153 | 154 | opt := DefaultOptions(dir) 155 | db, err := OpenManaged(opt) 156 | require.NoError(t, err) 157 | 158 | wb := db.NewManagedWriteBatch() 159 | require.NoError(t, wb.SetEntryAt(&Entry{Key: []byte("foo")}, 0)) 160 | require.Error(t, wb.Flush()) 161 | require.NoError(t, db.Close()) 162 | } 163 | -------------------------------------------------------------------------------- /skl/README.md: -------------------------------------------------------------------------------- 1 | This is much better than `skiplist` and `slist`. 2 | 3 | ``` 4 | BenchmarkReadWrite/frac_0-8 3000000 537 ns/op 5 | BenchmarkReadWrite/frac_1-8 3000000 503 ns/op 6 | BenchmarkReadWrite/frac_2-8 3000000 492 ns/op 7 | BenchmarkReadWrite/frac_3-8 3000000 475 ns/op 8 | BenchmarkReadWrite/frac_4-8 3000000 440 ns/op 9 | BenchmarkReadWrite/frac_5-8 5000000 442 ns/op 10 | BenchmarkReadWrite/frac_6-8 5000000 380 ns/op 11 | BenchmarkReadWrite/frac_7-8 5000000 338 ns/op 12 | BenchmarkReadWrite/frac_8-8 5000000 294 ns/op 13 | BenchmarkReadWrite/frac_9-8 10000000 268 ns/op 14 | BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op 15 | ``` 16 | 17 | And even better than a simple map with read-write lock: 18 | 19 | ``` 20 | BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op 21 | BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op 22 | BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op 23 | BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op 24 | BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op 25 | BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op 26 | BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op 27 | BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op 28 | BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op 29 | BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op 30 | BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op 31 | ``` 32 | 33 | # Node Pooling 34 | 35 | Command used 36 | 37 | ``` 38 | rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10 39 | ``` 40 | 41 | For pprof results, we run without using /usr/bin/time. There are four runs below. 42 | 43 | Results seem to vary quite a bit between runs. 44 | 45 | ## Before node pooling 46 | 47 | ``` 48 | 1311.53MB of 1338.69MB total (97.97%) 49 | Dropped 30 nodes (cum <= 6.69MB) 50 | Showing top 10 nodes out of 37 (cum >= 12.50MB) 51 | flat flat% sum% cum cum% 52 | 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put 53 | 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte 54 | 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put 55 | 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E 56 | 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice 57 | 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue 58 | 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV 59 | 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next 60 | 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read 61 | 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode 62 | 63 | 128.31 real 329.37 user 17.11 sys 64 | 3355660288 maximum resident set size 65 | 0 average shared memory size 66 | 0 average unshared data size 67 | 0 average unshared stack size 68 | 2203080 page reclaims 69 | 764 page faults 70 | 0 swaps 71 | 275 block input operations 72 | 76 block output operations 73 | 0 messages sent 74 | 0 messages received 75 | 0 signals received 76 | 49173 voluntary context switches 77 | 599922 involuntary context switches 78 | ``` 79 | 80 | ## After node pooling 81 | 82 | ``` 83 | 1963.13MB of 2026.09MB total (96.89%) 84 | Dropped 29 nodes (cum <= 10.13MB) 85 | Showing top 10 nodes out of 41 (cum >= 185.62MB) 86 | flat flat% sum% cum cum% 87 | 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1 88 | 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E 89 | 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte 90 | 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put 91 | 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice 92 | 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode 93 | 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue 94 | 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV 95 | 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read 96 | 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next 97 | 98 | 135.58 real 374.29 user 17.65 sys 99 | 3740614656 maximum resident set size 100 | 0 average shared memory size 101 | 0 average unshared data size 102 | 0 average unshared stack size 103 | 2276566 page reclaims 104 | 770 page faults 105 | 0 swaps 106 | 128 block input operations 107 | 90 block output operations 108 | 0 messages sent 109 | 0 messages received 110 | 0 signals received 111 | 46434 voluntary context switches 112 | 597049 involuntary context switches 113 | ``` 114 | -------------------------------------------------------------------------------- /y/bloom.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The LevelDB-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package y 6 | 7 | import "math" 8 | 9 | // Filter is an encoded set of []byte keys. 10 | type Filter []byte 11 | 12 | func (f Filter) MayContainKey(k []byte) bool { 13 | return f.MayContain(Hash(k)) 14 | } 15 | 16 | // MayContain returns whether the filter may contain given key. False positives 17 | // are possible, where it returns true for keys not in the original set. 18 | func (f Filter) MayContain(h uint32) bool { 19 | if len(f) < 2 { 20 | return false 21 | } 22 | k := f[len(f)-1] 23 | if k > 30 { 24 | // This is reserved for potentially new encodings for short Bloom filters. 25 | // Consider it a match. 26 | return true 27 | } 28 | nBits := uint32(8 * (len(f) - 1)) 29 | delta := h>>17 | h<<15 30 | for j := uint8(0); j < k; j++ { 31 | bitPos := h % nBits 32 | if f[bitPos/8]&(1<<(bitPos%8)) == 0 { 33 | return false 34 | } 35 | h += delta 36 | } 37 | return true 38 | } 39 | 40 | // NewFilter returns a new Bloom filter that encodes a set of []byte keys with 41 | // the given number of bits per key, approximately. 42 | // 43 | // A good bitsPerKey value is 10, which yields a filter with ~ 1% false 44 | // positive rate. 45 | func NewFilter(keys []uint32, bitsPerKey int) Filter { 46 | return Filter(appendFilter(nil, keys, bitsPerKey)) 47 | } 48 | 49 | // BloomBitsPerKey returns the bits per key required by bloomfilter based on 50 | // the false positive rate. 51 | func BloomBitsPerKey(numEntries int, fp float64) int { 52 | size := -1 * float64(numEntries) * math.Log(fp) / math.Pow(float64(0.69314718056), 2) 53 | locs := math.Ceil(float64(0.69314718056) * size / float64(numEntries)) 54 | return int(locs) 55 | } 56 | 57 | func appendFilter(buf []byte, keys []uint32, bitsPerKey int) []byte { 58 | if bitsPerKey < 0 { 59 | bitsPerKey = 0 60 | } 61 | // 0.69 is approximately ln(2). 62 | k := uint32(float64(bitsPerKey) * 0.69) 63 | if k < 1 { 64 | k = 1 65 | } 66 | if k > 30 { 67 | k = 30 68 | } 69 | 70 | nBits := len(keys) * int(bitsPerKey) 71 | // For small len(keys), we can see a very high false positive rate. Fix it 72 | // by enforcing a minimum bloom filter length. 73 | if nBits < 64 { 74 | nBits = 64 75 | } 76 | nBytes := (nBits + 7) / 8 77 | nBits = nBytes * 8 78 | buf, filter := extend(buf, nBytes+1) 79 | 80 | for _, h := range keys { 81 | delta := h>>17 | h<<15 82 | for j := uint32(0); j < k; j++ { 83 | bitPos := h % uint32(nBits) 84 | filter[bitPos/8] |= 1 << (bitPos % 8) 85 | h += delta 86 | } 87 | } 88 | filter[nBytes] = uint8(k) 89 | 90 | return buf 91 | } 92 | 93 | // extend appends n zero bytes to b. It returns the overall slice (of length 94 | // n+len(originalB)) and the slice of n trailing zeroes. 95 | func extend(b []byte, n int) (overall, trailer []byte) { 96 | want := n + len(b) 97 | if want <= cap(b) { 98 | overall = b[:want] 99 | trailer = overall[len(b):] 100 | for i := range trailer { 101 | trailer[i] = 0 102 | } 103 | } else { 104 | // Grow the capacity exponentially, with a 1KiB minimum. 105 | c := 1024 106 | for c < want { 107 | c += c / 4 108 | } 109 | overall = make([]byte, want, c) 110 | trailer = overall[len(b):] 111 | copy(overall, b) 112 | } 113 | return overall, trailer 114 | } 115 | 116 | // hash implements a hashing algorithm similar to the Murmur hash. 117 | func Hash(b []byte) uint32 { 118 | const ( 119 | seed = 0xbc9f1d34 120 | m = 0xc6a4a793 121 | ) 122 | h := uint32(seed) ^ uint32(len(b)*m) 123 | for ; len(b) >= 4; b = b[4:] { 124 | h += uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 125 | h *= m 126 | h ^= h >> 16 127 | } 128 | switch len(b) { 129 | case 3: 130 | h += uint32(b[2]) << 16 131 | fallthrough 132 | case 2: 133 | h += uint32(b[1]) << 8 134 | fallthrough 135 | case 1: 136 | h += uint32(b[0]) 137 | h *= m 138 | h ^= h >> 24 139 | } 140 | return h 141 | } 142 | 143 | // FilterPolicy implements the db.FilterPolicy interface from the leveldb/db 144 | // package. 145 | // 146 | // The integer value is the approximate number of bits used per key. A good 147 | // value is 10, which yields a filter with ~ 1% false positive rate. 148 | // 149 | // It is valid to use the other API in this package (leveldb/bloom) without 150 | // using this type or the leveldb/db package. 151 | 152 | // type FilterPolicy int 153 | 154 | // // Name implements the db.FilterPolicy interface. 155 | // func (p FilterPolicy) Name() string { 156 | // // This string looks arbitrary, but its value is written to LevelDB .ldb 157 | // // files, and should be this exact value to be compatible with those files 158 | // // and with the C++ LevelDB code. 159 | // return "leveldb.BuiltinBloomFilter2" 160 | // } 161 | 162 | // // AppendFilter implements the db.FilterPolicy interface. 163 | // func (p FilterPolicy) AppendFilter(dst []byte, keys [][]byte) []byte { 164 | // return appendFilter(dst, keys, int(p)) 165 | // } 166 | 167 | // // MayContain implements the db.FilterPolicy interface. 168 | // func (p FilterPolicy) MayContain(filter, key []byte) bool { 169 | // return Filter(filter).MayContain(key) 170 | // } 171 | -------------------------------------------------------------------------------- /merge_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "encoding/binary" 21 | "io/ioutil" 22 | "testing" 23 | "time" 24 | 25 | "github.com/stretchr/testify/require" 26 | ) 27 | 28 | func TestGetMergeOperator(t *testing.T) { 29 | t.Run("Get before Add", func(t *testing.T) { 30 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 31 | m := db.GetMergeOperator([]byte("merge"), add, 200*time.Millisecond) 32 | defer m.Stop() 33 | 34 | val, err := m.Get() 35 | require.Equal(t, ErrKeyNotFound, err) 36 | require.Nil(t, val) 37 | }) 38 | }) 39 | t.Run("Add and Get", func(t *testing.T) { 40 | key := []byte("merge") 41 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 42 | m := db.GetMergeOperator(key, add, 200*time.Millisecond) 43 | defer m.Stop() 44 | 45 | err := m.Add(uint64ToBytes(1)) 46 | require.NoError(t, err) 47 | m.Add(uint64ToBytes(2)) 48 | require.NoError(t, err) 49 | m.Add(uint64ToBytes(3)) 50 | require.NoError(t, err) 51 | 52 | res, err := m.Get() 53 | require.NoError(t, err) 54 | require.Equal(t, uint64(6), bytesToUint64(res)) 55 | }) 56 | 57 | }) 58 | t.Run("Add and Get slices", func(t *testing.T) { 59 | // Merge function to merge two byte slices 60 | add := func(originalValue, newValue []byte) []byte { 61 | return append(originalValue, newValue...) 62 | } 63 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 64 | m := db.GetMergeOperator([]byte("fooprefix"), add, 2*time.Millisecond) 65 | defer m.Stop() 66 | 67 | require.Nil(t, m.Add([]byte("A"))) 68 | require.Nil(t, m.Add([]byte("B"))) 69 | require.Nil(t, m.Add([]byte("C"))) 70 | 71 | value, err := m.Get() 72 | require.Nil(t, err) 73 | require.Equal(t, "ABC", string(value)) 74 | }) 75 | }) 76 | t.Run("Get Before Compact", func(t *testing.T) { 77 | key := []byte("merge") 78 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 79 | m := db.GetMergeOperator(key, add, 500*time.Millisecond) 80 | defer m.Stop() 81 | 82 | err := m.Add(uint64ToBytes(1)) 83 | require.NoError(t, err) 84 | m.Add(uint64ToBytes(2)) 85 | require.NoError(t, err) 86 | m.Add(uint64ToBytes(3)) 87 | require.NoError(t, err) 88 | 89 | res, err := m.Get() 90 | require.NoError(t, err) 91 | require.Equal(t, uint64(6), bytesToUint64(res)) 92 | }) 93 | }) 94 | 95 | t.Run("Get after Stop", func(t *testing.T) { 96 | key := []byte("merge") 97 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 98 | m := db.GetMergeOperator(key, add, 1*time.Second) 99 | 100 | err := m.Add(uint64ToBytes(1)) 101 | require.NoError(t, err) 102 | m.Add(uint64ToBytes(2)) 103 | require.NoError(t, err) 104 | m.Add(uint64ToBytes(3)) 105 | require.NoError(t, err) 106 | 107 | m.Stop() 108 | res, err := m.Get() 109 | require.NoError(t, err) 110 | require.Equal(t, uint64(6), bytesToUint64(res)) 111 | }) 112 | }) 113 | t.Run("Old keys should be removed after compaction", func(t *testing.T) { 114 | dir, err := ioutil.TempDir("", "badger-test") 115 | require.NoError(t, err) 116 | defer removeDir(dir) 117 | 118 | opts := getTestOptions(dir) 119 | db, err := Open(opts) 120 | require.NoError(t, err) 121 | mergeKey := []byte("foo") 122 | m := db.GetMergeOperator(mergeKey, add, 2*time.Millisecond) 123 | 124 | count := 5000 // This will cause compaction from L0->L1 125 | for i := 0; i < count; i++ { 126 | require.NoError(t, m.Add(uint64ToBytes(1))) 127 | } 128 | value, err := m.Get() 129 | require.Nil(t, err) 130 | require.Equal(t, uint64(count), bytesToUint64(value)) 131 | m.Stop() 132 | 133 | // Force compaction by closing DB. The compaction should discard all the old merged values 134 | require.Nil(t, db.Close()) 135 | db, err = Open(opts) 136 | require.NoError(t, err) 137 | defer db.Close() 138 | 139 | keyCount := 0 140 | txn := db.NewTransaction(false) 141 | defer txn.Discard() 142 | iopt := DefaultIteratorOptions 143 | iopt.AllVersions = true 144 | it := txn.NewKeyIterator(mergeKey, iopt) 145 | defer it.Close() 146 | for it.Rewind(); it.Valid(); it.Next() { 147 | keyCount++ 148 | } 149 | // We should have only one key in badger. All the other keys should've been removed by 150 | // compaction 151 | require.Equal(t, 1, keyCount) 152 | }) 153 | 154 | } 155 | 156 | func uint64ToBytes(i uint64) []byte { 157 | var buf [8]byte 158 | binary.BigEndian.PutUint64(buf[:], i) 159 | return buf[:] 160 | } 161 | 162 | func bytesToUint64(b []byte) uint64 { 163 | return binary.BigEndian.Uint64(b) 164 | } 165 | 166 | // Merge function to add two uint64 numbers 167 | func add(existing, new []byte) []byte { 168 | return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new)) 169 | } 170 | -------------------------------------------------------------------------------- /key_registry_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package badger 17 | 18 | import ( 19 | "io/ioutil" 20 | "math/rand" 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | func getRegistryTestOptions(dir string, key []byte) KeyRegistryOptions { 27 | return KeyRegistryOptions{ 28 | Dir: dir, 29 | EncryptionKey: key, 30 | ReadOnly: false, 31 | } 32 | } 33 | func TestBuildRegistry(t *testing.T) { 34 | encryptionKey := make([]byte, 32) 35 | dir, err := ioutil.TempDir("", "badger-test") 36 | require.NoError(t, err) 37 | defer removeDir(dir) 38 | 39 | _, err = rand.Read(encryptionKey) 40 | require.NoError(t, err) 41 | opt := getRegistryTestOptions(dir, encryptionKey) 42 | 43 | kr, err := OpenKeyRegistry(opt) 44 | require.NoError(t, err) 45 | dk, err := kr.latestDataKey() 46 | require.NoError(t, err) 47 | // We're resetting the last created timestamp. So, it creates 48 | // new datakey. 49 | kr.lastCreated = 0 50 | dk1, err := kr.latestDataKey() 51 | // We generated two key. So, checking the length. 52 | require.Equal(t, 2, len(kr.dataKeys)) 53 | require.NoError(t, err) 54 | require.NoError(t, kr.Close()) 55 | 56 | kr2, err := OpenKeyRegistry(opt) 57 | require.NoError(t, err) 58 | require.Equal(t, 2, len(kr2.dataKeys)) 59 | // Asserting the correctness of the datakey after opening the registry. 60 | require.Equal(t, dk.Data, kr.dataKeys[dk.KeyId].Data) 61 | require.Equal(t, dk1.Data, kr.dataKeys[dk1.KeyId].Data) 62 | require.NoError(t, kr2.Close()) 63 | } 64 | 65 | func TestRewriteRegistry(t *testing.T) { 66 | encryptionKey := make([]byte, 32) 67 | dir, err := ioutil.TempDir("", "badger-test") 68 | require.NoError(t, err) 69 | defer removeDir(dir) 70 | _, err = rand.Read(encryptionKey) 71 | require.NoError(t, err) 72 | opt := getRegistryTestOptions(dir, encryptionKey) 73 | kr, err := OpenKeyRegistry(opt) 74 | require.NoError(t, err) 75 | _, err = kr.latestDataKey() 76 | require.NoError(t, err) 77 | // We're resetting the last created timestamp. So, it creates 78 | // new datakey. 79 | kr.lastCreated = 0 80 | _, err = kr.latestDataKey() 81 | require.NoError(t, err) 82 | require.NoError(t, kr.Close()) 83 | delete(kr.dataKeys, 1) 84 | require.NoError(t, WriteKeyRegistry(kr, opt)) 85 | kr2, err := OpenKeyRegistry(opt) 86 | require.NoError(t, err) 87 | require.Equal(t, 1, len(kr2.dataKeys)) 88 | require.NoError(t, kr2.Close()) 89 | } 90 | 91 | func TestMismatch(t *testing.T) { 92 | encryptionKey := make([]byte, 32) 93 | dir, err := ioutil.TempDir("", "badger-test") 94 | require.NoError(t, err) 95 | defer removeDir(dir) 96 | _, err = rand.Read(encryptionKey) 97 | require.NoError(t, err) 98 | opt := getRegistryTestOptions(dir, encryptionKey) 99 | kr, err := OpenKeyRegistry(opt) 100 | require.NoError(t, err) 101 | require.NoError(t, kr.Close()) 102 | // Opening with the same key and asserting. 103 | kr, err = OpenKeyRegistry(opt) 104 | require.NoError(t, err) 105 | require.NoError(t, kr.Close()) 106 | // Opening with the invalid key and asserting. 107 | encryptionKey = make([]byte, 32) 108 | _, err = rand.Read(encryptionKey) 109 | require.NoError(t, err) 110 | opt.EncryptionKey = encryptionKey 111 | _, err = OpenKeyRegistry(opt) 112 | require.Error(t, err) 113 | require.EqualError(t, err, ErrEncryptionKeyMismatch.Error()) 114 | } 115 | 116 | func TestEncryptionAndDecryption(t *testing.T) { 117 | encryptionKey := make([]byte, 32) 118 | dir, err := ioutil.TempDir("", "badger-test") 119 | require.NoError(t, err) 120 | defer removeDir(dir) 121 | _, err = rand.Read(encryptionKey) 122 | require.NoError(t, err) 123 | opt := getRegistryTestOptions(dir, encryptionKey) 124 | kr, err := OpenKeyRegistry(opt) 125 | require.NoError(t, err) 126 | dk, err := kr.latestDataKey() 127 | require.NoError(t, err) 128 | require.NoError(t, kr.Close()) 129 | // Checking the correctness of the datakey after closing and 130 | // opening the key registry. 131 | kr, err = OpenKeyRegistry(opt) 132 | require.NoError(t, err) 133 | dk1, err := kr.dataKey(dk.GetKeyId()) 134 | require.NoError(t, err) 135 | require.Equal(t, dk.Data, dk1.Data) 136 | require.NoError(t, kr.Close()) 137 | } 138 | 139 | func TestKeyRegistryInMemory(t *testing.T) { 140 | encryptionKey := make([]byte, 32) 141 | _, err := rand.Read(encryptionKey) 142 | require.NoError(t, err) 143 | 144 | opt := getRegistryTestOptions("", encryptionKey) 145 | opt.InMemory = true 146 | 147 | kr, err := OpenKeyRegistry(opt) 148 | require.NoError(t, err) 149 | _, err = kr.latestDataKey() 150 | require.NoError(t, err) 151 | // We're resetting the last created timestamp. So, it creates 152 | // new datakey. 153 | kr.lastCreated = 0 154 | _, err = kr.latestDataKey() 155 | // We generated two key. So, checking the length. 156 | require.Equal(t, 2, len(kr.dataKeys)) 157 | require.NoError(t, err) 158 | require.NoError(t, kr.Close()) 159 | } 160 | -------------------------------------------------------------------------------- /dir_plan9.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "os" 22 | "path/filepath" 23 | "strings" 24 | 25 | "github.com/dgraph-io/badger/v2/y" 26 | ) 27 | 28 | // directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part 29 | // of the locking mechanism, it's just advisory. 30 | type directoryLockGuard struct { 31 | // File handle on the directory, which we've locked. 32 | f *os.File 33 | // The absolute path to our pid file. 34 | path string 35 | } 36 | 37 | // acquireDirectoryLock gets a lock on the directory. 38 | // It will also write our pid to dirPath/pidFileName for convenience. 39 | // readOnly is not supported on Plan 9. 40 | func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( 41 | *directoryLockGuard, error) { 42 | if readOnly { 43 | return nil, ErrPlan9NotSupported 44 | } 45 | 46 | // Convert to absolute path so that Release still works even if we do an unbalanced 47 | // chdir in the meantime. 48 | absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) 49 | if err != nil { 50 | return nil, y.Wrap(err, "cannot get absolute path for pid lock file") 51 | } 52 | 53 | // If the file was unpacked or created by some other program, it might not 54 | // have the ModeExclusive bit set. Set it before we call OpenFile, so that we 55 | // can be confident that a successful OpenFile implies exclusive use. 56 | // 57 | // OpenFile fails if the file ModeExclusive bit set *and* the file is already open. 58 | // So, if the file is closed when the DB crashed, we're fine. When the process 59 | // that was managing the DB crashes, the OS will close the file for us. 60 | // 61 | // This bit of code is copied from Go's lockedfile internal package: 62 | // https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L58 63 | if fi, err := os.Stat(absPidFilePath); err == nil { 64 | if fi.Mode()&os.ModeExclusive == 0 { 65 | if err := os.Chmod(absPidFilePath, fi.Mode()|os.ModeExclusive); err != nil { 66 | return nil, y.Wrapf(err, "could not set exclusive mode bit") 67 | } 68 | } 69 | } else if !os.IsNotExist(err) { 70 | return nil, err 71 | } 72 | f, err := os.OpenFile(absPidFilePath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666|os.ModeExclusive) 73 | if err != nil { 74 | if isLocked(err) { 75 | return nil, y.Wrapf(err, 76 | "Cannot open pid lock file %q. Another process is using this Badger database", 77 | absPidFilePath) 78 | } 79 | return nil, y.Wrapf(err, "Cannot open pid lock file %q", absPidFilePath) 80 | } 81 | 82 | if _, err = fmt.Fprintf(f, "%d\n", os.Getpid()); err != nil { 83 | f.Close() 84 | return nil, y.Wrapf(err, "could not write pid") 85 | } 86 | return &directoryLockGuard{f, absPidFilePath}, nil 87 | } 88 | 89 | // Release deletes the pid file and releases our lock on the directory. 90 | func (guard *directoryLockGuard) release() error { 91 | // It's important that we remove the pid file first. 92 | err := os.Remove(guard.path) 93 | 94 | if closeErr := guard.f.Close(); err == nil { 95 | err = closeErr 96 | } 97 | guard.path = "" 98 | guard.f = nil 99 | 100 | return err 101 | } 102 | 103 | // openDir opens a directory for syncing. 104 | func openDir(path string) (*os.File, error) { return os.Open(path) } 105 | 106 | // When you create or delete a file, you have to ensure the directory entry for the file is synced 107 | // in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, 108 | // or see https://github.com/coreos/etcd/issues/6368 for an example.) 109 | func syncDir(dir string) error { 110 | f, err := openDir(dir) 111 | if err != nil { 112 | return y.Wrapf(err, "While opening directory: %s.", dir) 113 | } 114 | 115 | err = f.Sync() 116 | closeErr := f.Close() 117 | if err != nil { 118 | return y.Wrapf(err, "While syncing directory: %s.", dir) 119 | } 120 | return y.Wrapf(closeErr, "While closing directory: %s.", dir) 121 | } 122 | 123 | // Opening an exclusive-use file returns an error. 124 | // The expected error strings are: 125 | // 126 | // - "open/create -- file is locked" (cwfs, kfs) 127 | // - "exclusive lock" (fossil) 128 | // - "exclusive use file already open" (ramfs) 129 | // 130 | // See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16 131 | var lockedErrStrings = [...]string{ 132 | "file is locked", 133 | "exclusive lock", 134 | "exclusive use file already open", 135 | } 136 | 137 | // Even though plan9 doesn't support the Lock/RLock/Unlock functions to 138 | // manipulate already-open files, IsLocked is still meaningful: os.OpenFile 139 | // itself may return errors that indicate that a file with the ModeExclusive bit 140 | // set is already open. 141 | func isLocked(err error) bool { 142 | s := err.Error() 143 | 144 | for _, frag := range lockedErrStrings { 145 | if strings.Contains(s, frag) { 146 | return true 147 | } 148 | } 149 | return false 150 | } 151 | --------------------------------------------------------------------------------