├── .deepsource.toml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE ├── stale.yml └── workflows │ └── main.yml ├── .gitignore ├── .golangci.yml ├── .travis.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── VERSIONING.md ├── appveyor.yml ├── badger ├── .gitignore ├── cmd │ ├── bench.go │ ├── flatten.go │ ├── info.go │ ├── pick_table_bench.go │ ├── read_bench.go │ ├── root.go │ ├── rotate.go │ ├── rotate_test.go │ ├── stream.go │ └── write_bench.go └── main.go ├── batch.go ├── batch_test.go ├── changes.sh ├── compaction.go ├── db.go ├── db2_test.go ├── db_test.go ├── dir_plan9.go ├── dir_unix.go ├── dir_windows.go ├── doc.go ├── docs ├── .gitignore ├── README.md ├── archetypes │ └── default.md ├── config.toml ├── content │ ├── _index.md │ ├── contact │ │ ├── _index.md │ │ └── index.md │ ├── design │ │ ├── _index.md │ │ └── index.md │ ├── faq │ │ ├── _index.md │ │ └── index.md │ ├── get-started │ │ ├── _index.md │ │ └── index.md │ ├── projects-using-badger │ │ ├── _index.md │ │ └── index.md │ └── resources │ │ ├── _index.md │ │ └── index.md ├── scripts │ ├── build.sh │ └── local.sh ├── static │ └── images │ │ └── diggy-shadow.png └── themes │ ├── .DS_Store │ └── hugo-docs │ ├── LICENSE.md │ ├── archetypes │ └── default.md │ ├── images │ ├── screenshot.png │ └── tn.png │ ├── layouts │ ├── .gitignore │ ├── 404.html │ ├── _default │ │ ├── article.html │ │ ├── list.html │ │ └── section.html │ ├── index.html │ ├── partials │ │ ├── footer.html │ │ ├── header.html │ │ ├── meta.html │ │ ├── request-edit.html │ │ ├── sidebar.html │ │ ├── suggest-edit.html │ │ └── topbar.html │ └── shortcodes │ │ ├── load-img.html │ │ ├── notice.html │ │ └── version.html │ ├── static │ ├── css │ │ └── theme.css │ ├── fonts │ │ ├── FontAwesome.otf │ │ ├── Inconsolata.eot │ │ ├── Inconsolata.svg │ │ ├── Inconsolata.ttf │ │ ├── Inconsolata.woff │ │ ├── Novecentosanswide-Normal-webfont.eot │ │ ├── Novecentosanswide-Normal-webfont.svg │ │ ├── Novecentosanswide-Normal-webfont.ttf │ │ ├── Novecentosanswide-Normal-webfont.woff │ │ ├── Novecentosanswide-Normal-webfont.woff2 │ │ ├── Novecentosanswide-UltraLight-webfont.eot │ │ ├── Novecentosanswide-UltraLight-webfont.svg │ │ ├── Novecentosanswide-UltraLight-webfont.ttf │ │ ├── Novecentosanswide-UltraLight-webfont.woff │ │ ├── Novecentosanswide-UltraLight-webfont.woff2 │ │ ├── Work_Sans_200.eot │ │ ├── Work_Sans_200.svg │ │ ├── Work_Sans_200.ttf │ │ ├── Work_Sans_200.woff │ │ ├── Work_Sans_200.woff2 │ │ ├── Work_Sans_300.eot │ │ ├── Work_Sans_300.svg │ │ ├── Work_Sans_300.ttf │ │ ├── Work_Sans_300.woff │ │ ├── Work_Sans_300.woff2 │ │ ├── Work_Sans_500.eot │ │ ├── Work_Sans_500.svg │ │ ├── Work_Sans_500.ttf │ │ ├── Work_Sans_500.woff │ │ ├── Work_Sans_500.woff2 │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ ├── fontawesome-webfont.woff │ │ └── fontawesome-webfont.woff2 │ ├── images │ │ ├── Screenshot from 2020-07-07 19-14-26.png │ │ ├── badger.png │ │ ├── badger.svg │ │ ├── dgraph-black.png │ │ ├── dgraph.svg │ │ ├── diggy-shadow.png │ │ ├── favicons │ │ │ ├── android-chrome-144x144.png │ │ │ ├── android-chrome-192x192.png │ │ │ ├── android-chrome-36x36.png │ │ │ ├── android-chrome-48x48.png │ │ │ ├── android-chrome-72x72.png │ │ │ ├── android-chrome-96x96.png │ │ │ ├── apple-touch-icon-114x114.png │ │ │ ├── apple-touch-icon-120x120.png │ │ │ ├── apple-touch-icon-144x144.png │ │ │ ├── apple-touch-icon-152x152.png │ │ │ ├── apple-touch-icon-180x180.png │ │ │ ├── apple-touch-icon-57x57.png │ │ │ ├── apple-touch-icon-60x60.png │ │ │ ├── apple-touch-icon-72x72.png │ │ │ ├── apple-touch-icon-76x76.png │ │ │ ├── apple-touch-icon.png │ │ │ ├── favicon-16x16.png │ │ │ ├── favicon-194x194.png │ │ │ ├── favicon-32x32.png │ │ │ ├── favicon-96x96.png │ │ │ ├── favicon.ico │ │ │ ├── manifest.json │ │ │ └── safari-pinned-tab.svg │ │ └── gopher-404.jpg │ ├── js │ │ ├── clipboard.min.js │ │ ├── dgraph.js │ │ └── jquery-2.x.min.js │ └── json │ │ └── search.json │ └── theme.toml ├── errors.go ├── fb ├── BlockOffset.go ├── TableIndex.go ├── flatbuffer.fbs ├── gen.sh └── install_flatbuffers.sh ├── go.mod ├── go.sum ├── histogram.go ├── histogram_test.go ├── images ├── benchmarks-rocksdb.png └── diggy-shadow.png ├── iterator.go ├── iterator_test.go ├── key_registry.go ├── key_registry_test.go ├── level_handler.go ├── levels.go ├── levels_test.go ├── lifetime.go ├── logger.go ├── logger_test.go ├── managed_db_test.go ├── manifest.go ├── manifest_test.go ├── options.go ├── options └── options.go ├── options_test.go ├── pb ├── badgerpb3.pb.go ├── badgerpb3.proto ├── gen.sh └── protos_test.go ├── publisher.go ├── publisher_test.go ├── skl ├── README.md ├── arena.go ├── skl.go └── skl_test.go ├── stream.go ├── stream_test.go ├── stream_writer.go ├── stream_writer_test.go ├── structs.go ├── table ├── README.md ├── builder.go ├── builder_test.go ├── iterator.go ├── merge_iterator.go ├── merge_iterator_test.go ├── table.go └── table_test.go ├── test.sh ├── trie ├── trie.go └── trie_test.go ├── txn.go ├── txn_test.go ├── util.go ├── value_test.go └── y ├── bloom.go ├── bloom_test.go ├── checksum.go ├── encrypt.go ├── encrypt_test.go ├── error.go ├── event_log.go ├── file_dsync.go ├── file_nodsync.go ├── iterator.go ├── metrics.go ├── mutex_test.go ├── watermark.go ├── y.go ├── y_test.go └── zstd.go /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | test_patterns = [ 4 | 'integration/testgc/**', 5 | '**/*_test.go' 6 | ] 7 | 8 | exclude_patterns = [ 9 | 10 | ] 11 | 12 | [[analyzers]] 13 | name = 'go' 14 | enabled = true 15 | 16 | 17 | [analyzers.meta] 18 | import_path = 'github.com/dgraph-io/badger' 19 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS info: https://help.github.com/en/articles/about-code-owners 2 | # Owners are automatically requested for review for PRs that changes code 3 | # that they own. 4 | * @manishrjain @ashish-goswami @jarifibrahim 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE: -------------------------------------------------------------------------------- 1 | **GitHub Issues are deprecated. Use [Discuss Issues](https://discuss.dgraph.io/c/issues/badger/37) for reporting issues about this repository.** 2 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 30 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - skip/stale 8 | - status/accepted 9 | # Label to use when marking an issue as stale 10 | staleLabel: status/stale 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: > 18 | This issue was marked as stale and no activity has occurred since then, 19 | therefore it will now be closed. Please, reopen if the issue is still 20 | relevant. 21 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: Issue Closer 4 | 5 | # Controls when the action will run. Triggers the workflow on push or pull request 6 | # events but only for the master branch 7 | on: 8 | issues: 9 | types: [ opened, reopened ] 10 | 11 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 12 | jobs: 13 | # This workflow contains a single job called "build" 14 | build: 15 | # The type of runner that the job will run on 16 | runs-on: ubuntu-latest 17 | 18 | # Steps represent a sequence of tasks that will be executed as part of the job 19 | steps: 20 | - name: Close Issue 21 | uses: peter-evans/close-issue@v1.0.1 22 | with: 23 | comment: | 24 | **Use [Discuss Issues](https://discuss.dgraph.io/c/issues/badger/37) for reporting issues about this repository.** 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | p/ 2 | badger-test*/ 3 | .idea/ 4 | 5 | vendor -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | tests: false 3 | 4 | linters-settings: 5 | lll: 6 | line-length: 100 7 | 8 | linters: 9 | disable-all: true 10 | enable: 11 | - errcheck 12 | - errorlint 13 | - ineffassign 14 | - gas 15 | - gofmt 16 | - golint 17 | - gosimple 18 | - govet 19 | - lll 20 | - varcheck 21 | - unused 22 | 23 | issues: 24 | exclude-rules: 25 | - linters: 26 | - gosec 27 | text: "G404: " 28 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - "1.12" 5 | - "1.13" 6 | - tip 7 | os: 8 | - osx 9 | env: 10 | jobs: 11 | - GOARCH=386 12 | - GOARCH=amd64 13 | global: 14 | - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8= 15 | 16 | 17 | jobs: 18 | allow_failures: 19 | - go: tip 20 | exclude: 21 | # Exclude builds for 386 architecture on go 1.12 and tip 22 | # Since we don't want it to run for 32 bit 23 | - go: "1.12" 24 | env: GOARCH=386 25 | - go: tip 26 | env: GOARCH=386 27 | include: 28 | # Define one extra linux build, which we use to run cross 29 | # compiled 32 bit tests 30 | - os: linux 31 | arch: arm64 32 | go: "1.14" 33 | env: go_32=yes 34 | 35 | notifications: 36 | email: false 37 | slack: 38 | secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk= 39 | 40 | script: >- 41 | if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then 42 | uname -a 43 | GOOS=linux GOARCH=arm go test -v ./... 44 | else 45 | go test -v ./... 46 | # Cross-compile for Plan 9 47 | GOOS=plan9 go build ./... 48 | fi 49 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Our Code of Conduct can be found here: 4 | 5 | https://dgraph.io/conduct 6 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guide 2 | 3 | * [Before you get started](#before-you-get-started) 4 | * [Code of Conduct](#code-of-conduct) 5 | * [Your First Contribution](#your-first-contribution) 6 | * [Find a good first topic](#find-a-good-first-topic) 7 | * [Setting up your development environment](#setting-up-your-development-environment) 8 | * [Fork the project](#fork-the-project) 9 | * [Clone the project](#clone-the-project) 10 | * [New branch for a new code](#new-branch-for-a-new-code) 11 | * [Test](#test) 12 | * [Commit and push](#commit-and-push) 13 | * [Create a Pull Request](#create-a-pull-request) 14 | * [Sign the CLA](#sign-the-cla) 15 | * [Get a code review](#get-a-code-review) 16 | 17 | ## Before you get started 18 | 19 | ### Code of Conduct 20 | 21 | Please make sure to read and observe our [Code of Conduct](./CODE_OF_CONDUCT.md). 22 | 23 | ## Your First Contribution 24 | 25 | ### Find a good first topic 26 | 27 | You can start by finding an existing issue with the 28 | [good first issue](https://github.com/outcaste-io/badger/labels/good%20first%20issue) or [help wanted](https://github.com/outcaste-io/badger/labels/help%20wanted) labels. These issues are well suited for new contributors. 29 | 30 | 31 | ## Setting up your development environment 32 | 33 | Badger uses [`Go Modules`](https://github.com/golang/go/wiki/Modules) 34 | to manage dependencies. The version of Go should be **1.12** or above. 35 | 36 | ### Fork the project 37 | 38 | - Visit https://github.com/outcaste-io/badger 39 | - Click the `Fork` button (top right) to create a fork of the repository 40 | 41 | ### Clone the project 42 | 43 | ```sh 44 | $ git clone https://github.com/$GITHUB_USER/badger 45 | $ cd badger 46 | $ git remote add upstream git@github.com:outcaste-io/badger.git 47 | 48 | # Never push to the upstream master 49 | git remote set-url --push upstream no_push 50 | ``` 51 | 52 | ### New branch for a new code 53 | 54 | Get your local master up to date: 55 | 56 | ```sh 57 | $ git fetch upstream 58 | $ git checkout master 59 | $ git rebase upstream/master 60 | ``` 61 | 62 | Create a new branch from the master: 63 | 64 | ```sh 65 | $ git checkout -b my_new_feature 66 | ``` 67 | 68 | And now you can finally add your changes to project. 69 | 70 | ### Test 71 | 72 | Build and run all tests: 73 | 74 | ```sh 75 | $ ./test.sh 76 | ``` 77 | 78 | ### Commit and push 79 | 80 | Commit your changes: 81 | 82 | ```sh 83 | $ git commit 84 | ``` 85 | 86 | When the changes are ready to review: 87 | 88 | ```sh 89 | $ git push origin my_new_feature 90 | ``` 91 | 92 | ### Create a Pull Request 93 | 94 | Just open `https://github.com/$GITHUB_USER/badger/pull/new/my_new_feature` and 95 | fill the PR description. 96 | 97 | ### Sign the CLA 98 | 99 | Click the **Sign in with Github to agree** button to sign the CLA. [An example](https://cla-assistant.io/outcaste-io/badger?pullRequest=4). 100 | 101 | ### Get a code review 102 | 103 | If your pull request (PR) is opened, it will be assigned to one or more 104 | reviewers. Those reviewers will do a code review. 105 | 106 | To address review comments, you should commit the changes to the same branch of 107 | the PR on your fork. 108 | -------------------------------------------------------------------------------- /VERSIONING.md: -------------------------------------------------------------------------------- 1 | # Serialization Versioning: Semantic Versioning for databases 2 | 3 | Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as 4 | a way to decide how to name software versions. The whole concept is very well summarized on 5 | semver.org with the following lines: 6 | 7 | > Given a version number MAJOR.MINOR.PATCH, increment the: 8 | > 9 | > 1. MAJOR version when you make incompatible API changes, 10 | > 2. MINOR version when you add functionality in a backwards-compatible manner, and 11 | > 3. PATCH version when you make backwards-compatible bug fixes. 12 | > 13 | > Additional labels for pre-release and build metadata are available as extensions to the 14 | > MAJOR.MINOR.PATCH format. 15 | 16 | Unfortunately, API changes are not the most important changes for libraries that serialize data for 17 | later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to 18 | handle than change to the data format used to store data on disk. 19 | 20 | ## Serialization Version specification 21 | 22 | Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them 23 | MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified: 24 | 25 | Given a version number MAJOR.MINOR.PATCH, increment the: 26 | 27 | - MAJOR version when you make changes that require a transformation of the dataset before it can be 28 | used again. 29 | - MINOR version when old datasets are still readable but the API might have changed in 30 | backwards-compatible or incompatible ways. 31 | - PATCH version when you make backwards-compatible bug fixes. 32 | 33 | Additional labels for pre-release and build metadata are available as extensions to the 34 | MAJOR.MINOR.PATCH format. 35 | 36 | Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your 37 | existing dataset, and as such has to be carefully planned. Migrations in between different minor 38 | versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once 39 | your code compiles there's no need for any data migration. Lastly, changes in between two different 40 | patch versions should never break your build or dataset. 41 | 42 | For more background on our decision to adopt Serialization Versioning, read the blog post 43 | [Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on 44 | [this comment on Dgraph's Discuss forum][discuss]. 45 | 46 | [blog]: https://blog.dgraph.io/post/serialization-versioning/ 47 | [discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7 -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # version format 2 | version: "{build}" 3 | 4 | # Operating system (build VM template) 5 | os: Windows Server 2012 R2 6 | 7 | # Platform. 8 | platform: x64 9 | 10 | clone_folder: c:\gopath\src\github.com\dgraph-io\badger 11 | 12 | # Environment variables 13 | environment: 14 | GOVERSION: 1.12 15 | GOPATH: c:\gopath 16 | GO111MODULE: on 17 | 18 | # scripts that run after cloning repository 19 | install: 20 | - set PATH=%GOPATH%\bin;c:\go\bin;c:\msys64\mingw64\bin;%PATH% 21 | - go version 22 | - go env 23 | - python --version 24 | - gcc --version 25 | 26 | # To run your custom scripts instead of automatic MSBuild 27 | build_script: 28 | # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 29 | - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' 30 | - cd c:\gopath\src\github.com\dgraph-io\badger 31 | - git branch 32 | - go get -t ./... 33 | 34 | # To run your custom scripts instead of automatic tests 35 | test_script: 36 | # Unit tests 37 | - ps: Add-AppveyorTest "Unit Tests" -Outcome Running 38 | - go test -v github.com/dgraph-io/badger/... 39 | - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed 40 | 41 | notifications: 42 | - provider: Email 43 | to: 44 | - pawan@dgraph.io 45 | on_build_failure: true 46 | on_build_status_changed: true 47 | # to disable deployment 48 | deploy: off 49 | 50 | -------------------------------------------------------------------------------- /badger/.gitignore: -------------------------------------------------------------------------------- 1 | /badger 2 | -------------------------------------------------------------------------------- /badger/cmd/bench.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "github.com/spf13/cobra" 21 | ) 22 | 23 | var benchCmd = &cobra.Command{ 24 | Use: "benchmark", 25 | Short: "Benchmark Badger database.", 26 | Long: `This command will benchmark Badger for different usecases. 27 | Useful for testing and performance analysis.`, 28 | } 29 | 30 | func init() { 31 | RootCmd.AddCommand(benchCmd) 32 | } 33 | -------------------------------------------------------------------------------- /badger/cmd/flatten.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "fmt" 21 | "math" 22 | 23 | "github.com/outcaste-io/badger/v4" 24 | "github.com/outcaste-io/badger/v4/options" 25 | "github.com/pkg/errors" 26 | "github.com/spf13/cobra" 27 | ) 28 | 29 | var flattenCmd = &cobra.Command{ 30 | Use: "flatten", 31 | Short: "Flatten the LSM tree.", 32 | Long: ` 33 | This command would compact all the LSM tables into one level. 34 | `, 35 | RunE: flatten, 36 | } 37 | 38 | var fo = struct { 39 | keyPath string 40 | numWorkers int 41 | numVersions int 42 | compressionType uint32 43 | }{} 44 | 45 | func init() { 46 | RootCmd.AddCommand(flattenCmd) 47 | flattenCmd.Flags().IntVarP(&fo.numWorkers, "num-workers", "w", 1, 48 | "Number of concurrent compactors to run. More compactors would use more"+ 49 | " server resources to potentially achieve faster compactions.") 50 | flattenCmd.Flags().IntVarP(&fo.numVersions, "num_versions", "", 0, 51 | "Option to configure the maximum number of versions per key. "+ 52 | "Values <= 0 will be considered to have the max number of versions.") 53 | flattenCmd.Flags().StringVar(&fo.keyPath, "encryption-key-file", "", 54 | "Path of the encryption key file.") 55 | flattenCmd.Flags().Uint32VarP(&fo.compressionType, "compression", "", 1, 56 | "Option to configure the compression type in output DB. "+ 57 | "0 to disable, 1 for Snappy, and 2 for ZSTD.") 58 | } 59 | 60 | func flatten(cmd *cobra.Command, args []string) error { 61 | if fo.numVersions <= 0 { 62 | // Keep all versions. 63 | fo.numVersions = math.MaxInt32 64 | } 65 | encKey, err := getKey(fo.keyPath) 66 | if err != nil { 67 | return err 68 | } 69 | if fo.compressionType < 0 || fo.compressionType > 2 { 70 | return errors.Errorf( 71 | "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") 72 | } 73 | opt := badger.DefaultOptions(sstDir). 74 | WithNumVersionsToKeep(fo.numVersions). 75 | WithNumCompactors(0). 76 | WithBlockCacheSize(100 << 20). 77 | WithIndexCacheSize(200 << 20). 78 | WithCompression(options.CompressionType(fo.compressionType)). 79 | WithEncryptionKey(encKey) 80 | fmt.Printf("Opening badger with options = %+v\n", opt) 81 | db, err := badger.Open(opt) 82 | if err != nil { 83 | return err 84 | } 85 | defer db.Close() 86 | 87 | return db.Flatten(fo.numWorkers) 88 | } 89 | -------------------------------------------------------------------------------- /badger/cmd/root.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "os" 23 | "strings" 24 | 25 | "github.com/spf13/cobra" 26 | ) 27 | 28 | var sstDir string 29 | 30 | // RootCmd represents the base command when called without any subcommands 31 | var RootCmd = &cobra.Command{ 32 | Use: "badger", 33 | Short: "Tools to manage Badger database.", 34 | PersistentPreRunE: validateRootCmdArgs, 35 | } 36 | 37 | // Execute adds all child commands to the root command and sets flags appropriately. 38 | // This is called by main.main(). It only needs to happen once to the rootCmd. 39 | func Execute() { 40 | if err := RootCmd.Execute(); err != nil { 41 | fmt.Println(err) 42 | os.Exit(1) 43 | } 44 | } 45 | 46 | func init() { 47 | RootCmd.PersistentFlags().StringVar(&sstDir, "dir", "", 48 | "Directory where the LSM tree files are located. (required)") 49 | } 50 | 51 | func validateRootCmdArgs(cmd *cobra.Command, args []string) error { 52 | if strings.HasPrefix(cmd.Use, "help ") { // No need to validate if it is help 53 | return nil 54 | } 55 | if sstDir == "" { 56 | return errors.New("--dir not specified") 57 | } 58 | return nil 59 | } 60 | -------------------------------------------------------------------------------- /badger/cmd/rotate.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "io/ioutil" 21 | "os" 22 | "time" 23 | 24 | "github.com/outcaste-io/badger/v4" 25 | 26 | "github.com/spf13/cobra" 27 | ) 28 | 29 | var oldKeyPath string 30 | var newKeyPath string 31 | var rotateCmd = &cobra.Command{ 32 | Use: "rotate", 33 | Short: "Rotate encryption key.", 34 | Long: "Rotate will rotate the old key with new encryption key.", 35 | RunE: doRotate, 36 | } 37 | 38 | func init() { 39 | RootCmd.AddCommand(rotateCmd) 40 | rotateCmd.Flags().StringVarP(&oldKeyPath, "old-key-path", "o", 41 | "", "Path of the old key") 42 | rotateCmd.Flags().StringVarP(&newKeyPath, "new-key-path", "n", 43 | "", "Path of the new key") 44 | } 45 | 46 | func doRotate(cmd *cobra.Command, args []string) error { 47 | oldKey, err := getKey(oldKeyPath) 48 | if err != nil { 49 | return err 50 | } 51 | opt := badger.KeyRegistryOptions{ 52 | Dir: sstDir, 53 | ReadOnly: true, 54 | EncryptionKey: oldKey, 55 | EncryptionKeyRotationDuration: 10 * 24 * time.Hour, 56 | } 57 | kr, err := badger.OpenKeyRegistry(opt) 58 | if err != nil { 59 | return err 60 | } 61 | newKey, err := getKey(newKeyPath) 62 | if err != nil { 63 | return err 64 | } 65 | opt.EncryptionKey = newKey 66 | err = badger.WriteKeyRegistry(kr, opt) 67 | if err != nil { 68 | return err 69 | } 70 | return nil 71 | } 72 | 73 | func getKey(path string) ([]byte, error) { 74 | if path == "" { 75 | // Empty bytes for plain text to encryption(vice versa). 76 | return []byte{}, nil 77 | } 78 | fp, err := os.Open(path) 79 | if err != nil { 80 | return nil, err 81 | } 82 | return ioutil.ReadAll(fp) 83 | } 84 | -------------------------------------------------------------------------------- /badger/cmd/rotate_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | /* 20 | func TestRotate(t *testing.T) { 21 | dir, err := ioutil.TempDir("", "badger-test") 22 | require.NoError(t, err) 23 | defer os.RemoveAll(dir) 24 | 25 | // Creating sample key. 26 | key := make([]byte, 32) 27 | _, err = rand.Read(key) 28 | require.NoError(t, err) 29 | 30 | fp, err := ioutil.TempFile("", "*.key") 31 | require.NoError(t, err) 32 | _, err = fp.Write(key) 33 | require.NoError(t, err) 34 | defer fp.Close() 35 | 36 | // Opening DB with the encryption key. 37 | opts := badger.DefaultOptions(dir) 38 | opts.EncryptionKey = key 39 | opts.BlockCacheSize = 1 << 20 40 | 41 | db, err := badger.Open(opts) 42 | require.NoError(t, err) 43 | // Closing the db. 44 | require.NoError(t, db.Close()) 45 | 46 | // Opening the db again for the successful open. 47 | db, err = badger.Open(opts) 48 | require.NoError(t, err) 49 | // Closing so that we can open another db 50 | require.NoError(t, db.Close()) 51 | 52 | // Creating another sample key. 53 | key2 := make([]byte, 32) 54 | _, err = rand.Read(key2) 55 | require.NoError(t, err) 56 | fp2, err := ioutil.TempFile("", "*.key") 57 | require.NoError(t, err) 58 | _, err = fp2.Write(key2) 59 | require.NoError(t, err) 60 | defer fp2.Close() 61 | oldKeyPath = fp2.Name() 62 | sstDir = dir 63 | 64 | // Check whether we able to rotate the key with some sample key. We should get mismatch 65 | // error. 66 | require.EqualError(t, doRotate(nil, []string{}), badger.ErrEncryptionKeyMismatch.Error()) 67 | 68 | // rotating key with proper key. 69 | oldKeyPath = fp.Name() 70 | newKeyPath = fp2.Name() 71 | require.NoError(t, doRotate(nil, []string{})) 72 | 73 | // Checking whether db opens with the new key. 74 | opts.EncryptionKey = key2 75 | db, err = badger.Open(opts) 76 | require.NoError(t, err) 77 | require.NoError(t, db.Close()) 78 | 79 | // Checking for plain text rotation. 80 | oldKeyPath = newKeyPath 81 | newKeyPath = "" 82 | require.NoError(t, doRotate(nil, []string{})) 83 | opts.EncryptionKey = []byte{} 84 | db, err = badger.Open(opts) 85 | require.NoError(t, err) 86 | defer db.Close() 87 | } 88 | 89 | // This test shows that rotate tool can be used to enable encryption. 90 | func TestRotatePlainTextToEncrypted(t *testing.T) { 91 | dir, err := ioutil.TempDir("", "badger-test") 92 | require.NoError(t, err) 93 | defer os.RemoveAll(dir) 94 | 95 | // Open DB without encryption. 96 | opts := badger.DefaultOptions(dir) 97 | db, err := badger.Open(opts) 98 | require.NoError(t, err) 99 | 100 | wb := db.NewWriteBatch() 101 | require.NoError(t, wb.SetAt([]byte("foo"), []byte("bar"), 1)) 102 | require.NoError(t, wb.Flush()) 103 | 104 | require.NoError(t, db.Close()) 105 | 106 | // Create an encryption key. 107 | key := make([]byte, 32) 108 | y.Check2(rand.Read(key)) 109 | fp, err := ioutil.TempFile("", "*.key") 110 | require.NoError(t, err) 111 | _, err = fp.Write(key) 112 | require.NoError(t, err) 113 | defer fp.Close() 114 | 115 | oldKeyPath = "" 116 | newKeyPath = fp.Name() 117 | sstDir = dir 118 | 119 | // Enable encryption. newKeyPath is encrypted. 120 | require.Nil(t, doRotate(nil, []string{})) 121 | 122 | // Try opening DB without the key. 123 | opts.BlockCacheSize = 1 << 20 124 | _, err = badger.Open(opts) 125 | require.EqualError(t, err, badger.ErrEncryptionKeyMismatch.Error()) 126 | 127 | // Check whether db opens with the new key. 128 | opts.EncryptionKey = key 129 | db, err = badger.Open(opts) 130 | require.NoError(t, err) 131 | 132 | db.View(func(txn *badger.Txn) error { 133 | iopt := badger.DefaultIteratorOptions 134 | it := txn.NewIterator(iopt) 135 | defer it.Close() 136 | count := 0 137 | for it.Rewind(); it.Valid(); it.Next() { 138 | count++ 139 | } 140 | require.Equal(t, 1, count) 141 | return nil 142 | }) 143 | require.NoError(t, db.Close()) 144 | } 145 | */ 146 | -------------------------------------------------------------------------------- /badger/cmd/stream.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package cmd 18 | 19 | import ( 20 | "fmt" 21 | "io" 22 | "math" 23 | "os" 24 | 25 | "github.com/outcaste-io/badger/v4" 26 | "github.com/outcaste-io/badger/v4/options" 27 | "github.com/outcaste-io/badger/v4/y" 28 | "github.com/pkg/errors" 29 | "github.com/spf13/cobra" 30 | ) 31 | 32 | var streamCmd = &cobra.Command{ 33 | Use: "stream", 34 | Short: "Stream DB into another DB with different options", 35 | Long: ` 36 | This command streams the contents of this DB into another DB with the given options. 37 | `, 38 | RunE: stream, 39 | } 40 | 41 | var so = struct { 42 | outDir string 43 | compressionType uint32 44 | numVersions int 45 | readOnly bool 46 | keyPath string 47 | }{} 48 | 49 | func init() { 50 | // TODO: Add more options. 51 | RootCmd.AddCommand(streamCmd) 52 | streamCmd.Flags().StringVarP(&so.outDir, "out", "o", "", 53 | "Path to output DB. The directory should be empty.") 54 | streamCmd.Flags().BoolVarP(&so.readOnly, "read_only", "", true, 55 | "Option to open input DB in read-only mode") 56 | streamCmd.Flags().IntVarP(&so.numVersions, "num_versions", "", 0, 57 | "Option to configure the maximum number of versions per key. "+ 58 | "Values <= 0 will be considered to have the max number of versions.") 59 | streamCmd.Flags().Uint32VarP(&so.compressionType, "compression", "", 1, 60 | "Option to configure the compression type in output DB. "+ 61 | "0 to disable, 1 for Snappy, and 2 for ZSTD.") 62 | streamCmd.Flags().StringVarP(&so.keyPath, "encryption-key-file", "e", "", 63 | "Path of the encryption key file.") 64 | } 65 | 66 | func stream(cmd *cobra.Command, args []string) error { 67 | // Options for input DB. 68 | if so.numVersions <= 0 { 69 | so.numVersions = math.MaxInt32 70 | } 71 | encKey, err := getKey(so.keyPath) 72 | if err != nil { 73 | return err 74 | } 75 | inOpt := badger.DefaultOptions(sstDir). 76 | WithReadOnly(so.readOnly). 77 | WithNumVersionsToKeep(so.numVersions). 78 | WithBlockCacheSize(100 << 20). 79 | WithIndexCacheSize(200 << 20). 80 | WithEncryptionKey(encKey) 81 | 82 | // Options for output DB. 83 | if so.compressionType < 0 || so.compressionType > 2 { 84 | return errors.Errorf( 85 | "compression value must be one of 0 (disabled), 1 (Snappy), or 2 (ZSTD)") 86 | } 87 | inDB, err := badger.Open(inOpt) 88 | if err != nil { 89 | return y.Wrapf(err, "cannot open DB at %s", sstDir) 90 | } 91 | defer inDB.Close() 92 | 93 | stream := inDB.NewStreamAt(math.MaxUint64) 94 | 95 | if len(so.outDir) > 0 { 96 | if _, err := os.Stat(so.outDir); err == nil { 97 | f, err := os.Open(so.outDir) 98 | if err != nil { 99 | return err 100 | } 101 | defer f.Close() 102 | 103 | _, err = f.Readdirnames(1) 104 | if !errors.Is(err, io.EOF) { 105 | return errors.Errorf( 106 | "cannot run stream tool on non-empty output directory %s", so.outDir) 107 | } 108 | } 109 | 110 | stream.LogPrefix = "DB.Stream" 111 | outOpt := inOpt. 112 | WithDir(so.outDir). 113 | WithNumVersionsToKeep(so.numVersions). 114 | WithCompression(options.CompressionType(so.compressionType)). 115 | WithEncryptionKey(encKey). 116 | WithReadOnly(false) 117 | err = inDB.StreamDB(outOpt) 118 | } 119 | fmt.Println("Done.") 120 | return err 121 | } 122 | -------------------------------------------------------------------------------- /badger/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "fmt" 21 | "net/http" 22 | _ "net/http/pprof" 23 | "runtime" 24 | 25 | "github.com/dustin/go-humanize" 26 | "github.com/outcaste-io/badger/v4/badger/cmd" 27 | "github.com/outcaste-io/ristretto/z" 28 | "go.opencensus.io/zpages" 29 | ) 30 | 31 | func main() { 32 | go func() { 33 | for i := 8080; i < 9080; i++ { 34 | fmt.Printf("Listening for /debug HTTP requests at port: %d\n", i) 35 | if err := http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", i), nil); err != nil { 36 | fmt.Println("Port busy. Trying another one...") 37 | continue 38 | 39 | } 40 | } 41 | }() 42 | zpages.Handle(nil, "/z") 43 | runtime.SetBlockProfileRate(100) 44 | runtime.GOMAXPROCS(128) 45 | 46 | out := z.CallocNoRef(1, "Badger.Main") 47 | fmt.Printf("jemalloc enabled: %v\n", len(out) > 0) 48 | z.StatsPrint() 49 | z.Free(out) 50 | 51 | cmd.Execute() 52 | fmt.Printf("Num Allocated Bytes at program end: %s\n", 53 | humanize.IBytes(uint64(z.NumAllocBytes()))) 54 | if z.NumAllocBytes() > 0 { 55 | fmt.Println(z.Leaks()) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /batch_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "testing" 22 | "time" 23 | 24 | "github.com/outcaste-io/badger/v4/y" 25 | 26 | "github.com/stretchr/testify/require" 27 | ) 28 | 29 | func TestWriteBatch(t *testing.T) { 30 | key := func(i int) []byte { 31 | return []byte(fmt.Sprintf("%10d", i)) 32 | } 33 | val := func(i int) []byte { 34 | return []byte(fmt.Sprintf("%128d", i)) 35 | } 36 | 37 | test := func(t *testing.T, db *DB) { 38 | wb := db.NewWriteBatch() 39 | defer wb.Cancel() 40 | 41 | // Sanity check for SetEntryAt. 42 | require.Error(t, wb.SetEntryAt(&Entry{}, 12)) 43 | 44 | N, M := 50000, 1000 45 | start := time.Now() 46 | var version uint64 47 | 48 | for i := 0; i < N; i++ { 49 | version++ 50 | require.NoError(t, wb.SetAt(key(i), val(i), version)) 51 | } 52 | for i := 0; i < M; i++ { 53 | version++ 54 | require.NoError(t, wb.DeleteAt(key(i), version)) 55 | } 56 | require.NoError(t, wb.Flush()) 57 | t.Logf("Time taken for %d writes (w/ test options): %s\n", N+M, time.Since(start)) 58 | 59 | err := db.View(func(txn *Txn) error { 60 | itr := txn.NewIterator(DefaultIteratorOptions) 61 | defer itr.Close() 62 | 63 | i := M 64 | for itr.Rewind(); itr.Valid(); itr.Next() { 65 | item := itr.Item() 66 | require.Equal(t, string(key(i)), string(item.Key())) 67 | valcopy, err := item.ValueCopy(nil) 68 | require.NoError(t, err) 69 | require.Equal(t, val(i), valcopy) 70 | i++ 71 | } 72 | require.Equal(t, N, i) 73 | return nil 74 | }) 75 | require.NoError(t, err) 76 | } 77 | t.Run("disk mode", func(t *testing.T) { 78 | opt := getTestOptions("") 79 | runBadgerTest(t, &opt, func(t *testing.T, db *DB) { 80 | test(t, db) 81 | }) 82 | t.Logf("Disk mode done\n") 83 | }) 84 | t.Run("InMemory mode", func(t *testing.T) { 85 | opt := getTestOptions("") 86 | opt.InMemory = true 87 | db, err := Open(opt) 88 | require.NoError(t, err) 89 | test(t, db) 90 | t.Logf("Disk mode done\n") 91 | require.NoError(t, db.Close()) 92 | }) 93 | } 94 | 95 | // This test ensures we don't end up in deadlock in case of empty writebatch. 96 | func TestEmptyWriteBatch(t *testing.T) { 97 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 98 | wb := db.NewWriteBatch() 99 | require.NoError(t, wb.Flush()) 100 | wb = db.NewWriteBatch() 101 | require.NoError(t, wb.Flush()) 102 | wb = db.NewWriteBatch() 103 | require.NoError(t, wb.Flush()) 104 | }) 105 | } 106 | 107 | // This test ensures we don't panic during flush. 108 | // See issue: https://github.com/dgraph-io/badger/issues/1394 109 | func TestFlushPanic(t *testing.T) { 110 | t.Run("flush after flush", func(t *testing.T) { 111 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 112 | wb := db.NewWriteBatch() 113 | wb.Flush() 114 | require.Error(t, y.ErrCommitAfterFinish, wb.Flush()) 115 | }) 116 | }) 117 | t.Run("flush after cancel", func(t *testing.T) { 118 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 119 | wb := db.NewWriteBatch() 120 | wb.Cancel() 121 | require.Error(t, y.ErrCommitAfterFinish, wb.Flush()) 122 | }) 123 | }) 124 | } 125 | -------------------------------------------------------------------------------- /changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | GHORG=${GHORG:-dgraph-io} 5 | GHREPO=${GHREPO:-badger} 6 | cat < 8 |
9 |

10 | BadgerDB is an embeddable, persistent, and fast key-value (KV) database written 11 | in pure Go. It is the underlying database for Dgraph, a 12 | fast, distributed graph database. It's meant to be a performant alternative to 13 | non-Go-based key-value stores like RocksDB. 14 |

15 | 16 |
17 |
18 |
19 | }}"> 20 |

Quickstart Guide

21 |

22 | A single page quickstart guide to get started with BadgerDB 23 |

24 |
25 |
26 |
27 |
28 | }}"> 29 |

Resources

30 |

31 | Additional resources and information 32 |

33 |
34 |
35 |
36 |
37 | }}"> 38 |

Design

39 |

40 | Design goals behind BadgerDB 41 |

42 |
43 |
44 | 45 |
46 |
47 | }}"> 48 |

Projects using Badger

49 |

50 | A list of known projects that use BadgerDB 51 |

52 |
53 |
54 |
55 |
56 | }}"> 57 |

FAQ

58 |

59 | Frequently asked questions 60 |

61 |
62 |
63 |
64 |
65 | 66 |

Badger

67 |

68 | Embeddable, persistent, and fast key-value database that powers Dgraph 69 |

70 |
71 |
72 | 73 | 74 | 75 | 80 | 81 | ## Changelog 82 | 83 | The [Changelog] is kept fairly up-to-date. 84 | 85 | - Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible 86 | with v1.0 is v1.6.0. 87 | - Badger v2.0 was released in Nov 2019 with a new storage format which won't 88 | be compatible with all of the v1.x. Badger v2.0 supports compression, encryption and uses a cache to speed up lookup. 89 | 90 | For more details on our version naming schema please read [Choosing a version]({{< relref "get-started/index.md#choosing-a-version" >}}). 91 | 92 | [Changelog]:https://github.com/outcaste-io/badger/blob/main/CHANGELOG.md 93 | 94 | ## Contribute 95 | 96 |
97 |
98 |
99 |
100 |
101 | 106 |

107 | Get started with contributing fixes and enhancements to Badger and related software. 108 |

109 |
110 |
111 |
112 |
113 |
114 | 115 | ## Our Community 116 | 117 | **Badger is made better every day by the growing community and the contributors all over the world.** 118 | 119 |
120 |
121 |
122 |
123 |
124 | 129 |

130 | Discuss Badger on the official community. 131 |

132 |
133 |
134 |
135 |
136 |
137 | -------------------------------------------------------------------------------- /docs/content/contact/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/contact/_index.md -------------------------------------------------------------------------------- /docs/content/contact/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Contact" 3 | aliases = ["/contact"] 4 | +++ 5 | 6 | - Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. 7 | - Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. 8 | - Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). 9 | -------------------------------------------------------------------------------- /docs/content/design/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/design/_index.md -------------------------------------------------------------------------------- /docs/content/design/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Design" 3 | aliases = ["/design"] 4 | +++ 5 | 6 | Badger was written with these design goals in mind: 7 | 8 | - Write a key-value database in pure Go. 9 | - Use latest research to build the fastest KV database for data sets spanning terabytes. 10 | - Optimize for SSDs. 11 | 12 | Badger’s design is based on a paper titled _[WiscKey: Separating Keys from 13 | Values in SSD-conscious Storage][wisckey]_. 14 | 15 | [wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf 16 | 17 | ## Comparisons 18 | | Feature | Badger | RocksDB | BoltDB | 19 | | ------- | ------ | ------- | ------ | 20 | | Design | LSM tree with value log | LSM tree only | B+ tree | 21 | | High Read throughput | Yes | No | Yes | 22 | | High Write throughput | Yes | Yes | No | 23 | | Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | 24 | | Embeddable | Yes | Yes | Yes | 25 | | Sorted KV access | Yes | Yes | Yes | 26 | | Pure Go (no Cgo) | Yes | No | Yes | 27 | | Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | 28 | | Snapshots | Yes | Yes | Yes | 29 | | TTL support | Yes | Yes | No | 30 | | 3D access (key-value-version) | Yes4 | No | No | 31 | 32 | 1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big 33 | wins with separating values from keys, significantly reducing the write 34 | amplification compared to a typical LSM tree. 35 | 36 | 2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. 37 | As such RocksDB's design isn't aimed at SSDs. 38 | 39 | 3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) 40 | 41 | 4 Badger provides direct access to value versions via its Iterator API. 42 | Users can also specify how many versions to keep per key via Options. 43 | 44 | ## Benchmarks 45 | We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The 46 | benchmarking code, and the detailed logs for the benchmarks can be found in the 47 | [badger-bench] repo. More explanation, including graphs can be found the blog posts (linked 48 | above). 49 | 50 | [badger-bench]: https://github.com/dgraph-io/badger-bench 51 | -------------------------------------------------------------------------------- /docs/content/faq/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/faq/_index.md -------------------------------------------------------------------------------- /docs/content/get-started/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/get-started/_index.md -------------------------------------------------------------------------------- /docs/content/projects-using-badger/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/projects-using-badger/_index.md -------------------------------------------------------------------------------- /docs/content/resources/_index.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/content/resources/_index.md -------------------------------------------------------------------------------- /docs/content/resources/index.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = "Resources" 3 | aliases = ["/resouces"] 4 | +++ 5 | 6 | 7 | ## Blog Posts 8 | 1. [Introducing Badger: A fast key-value store written natively in 9 | Go](https://open.dgraph.io/post/badger/) 10 | 2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) 11 | 3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) 12 | 4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) 13 | 14 | ## Contact 15 | - Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, bugs, feature requests, and discussions. 16 | - Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). 17 | 18 | ## Contributing 19 | 20 | If you're interested in contributing to Badger see [CONTRIBUTING.md](https://github.com/dgraph-io/badger/blob/master/CONTRIBUTING.md). 21 | -------------------------------------------------------------------------------- /docs/scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script runs in a loop (configurable with LOOP), checks for updates to the 3 | # Hugo docs theme or to the docs on certain branches and rebuilds the public 4 | # folder for them. It has be made more generalized, so that we don't have to 5 | # hardcode versions. 6 | 7 | # Warning - Changes should not be made on the server on which this script is running 8 | # becauses this script does git checkout and merge. 9 | 10 | set -e 11 | 12 | GREEN='\033[32;1m' 13 | RESET='\033[0m' 14 | HOST="${HOST:-https://dgraph.io/docs/badger}" 15 | # Name of output public directory 16 | PUBLIC="${PUBLIC:-public}" 17 | # LOOP true makes this script run in a loop to check for updates 18 | LOOP="${LOOP:-true}" 19 | # Binary of hugo command to run. 20 | HUGO="${HUGO:-hugo}" 21 | 22 | # TODO - Maybe get list of released versions from Github API and filter 23 | # those which have docs. 24 | 25 | # Place the latest version at the beginning so that version selector can 26 | # append '(latest)' to the version string, followed by the master version, 27 | # and then the older versions in descending order, such that the 28 | # build script can place the artifact in an appropriate location. 29 | VERSIONS_ARRAY=( 30 | 'master' 31 | ) 32 | 33 | joinVersions() { 34 | versions=$(printf ",%s" "${VERSIONS_ARRAY[@]}") 35 | echo "${versions:1}" 36 | } 37 | 38 | function version { echo "$@" | gawk -F. '{ printf("%03d%03d%03d\n", $1,$2,$3); }'; } 39 | 40 | rebuild() { 41 | echo -e "$(date) $GREEN Updating docs for branch: $1.$RESET" 42 | 43 | # The latest documentation is generated in the root of /public dir 44 | # Older documentations are generated in their respective `/public/vx.x.x` dirs 45 | dir='' 46 | if [[ $2 != "${VERSIONS_ARRAY[0]}" ]]; then 47 | dir=$2 48 | fi 49 | 50 | VERSION_STRING=$(joinVersions) 51 | # In Unix environments, env variables should also be exported to be seen by Hugo 52 | export CURRENT_BRANCH=${1} 53 | export CURRENT_VERSION=${2} 54 | export VERSIONS=${VERSION_STRING} 55 | 56 | HUGO_TITLE="Badger Doc ${2}"\ 57 | VERSIONS=${VERSION_STRING}\ 58 | CURRENT_BRANCH=${1}\ 59 | CURRENT_VERSION=${2} ${HUGO} \ 60 | --destination="${PUBLIC}"/"$dir"\ 61 | --baseURL="$HOST"/"$dir" 1> /dev/null 62 | } 63 | 64 | branchUpdated() 65 | { 66 | local branch="$1" 67 | git checkout -q "$1" 68 | UPSTREAM=$(git rev-parse "@{u}") 69 | LOCAL=$(git rev-parse "@") 70 | 71 | if [ "$LOCAL" != "$UPSTREAM" ] ; then 72 | git merge -q origin/"$branch" 73 | return 0 74 | else 75 | return 1 76 | fi 77 | } 78 | 79 | publicFolder() 80 | { 81 | dir='' 82 | if [[ $1 == "${VERSIONS_ARRAY[0]}" ]]; then 83 | echo "${PUBLIC}" 84 | else 85 | echo "${PUBLIC}/$1" 86 | fi 87 | } 88 | 89 | checkAndUpdate() 90 | { 91 | local version="$1" 92 | local branch="" 93 | 94 | if [[ $version == "master" ]]; then 95 | branch="master" 96 | else 97 | branch="release/$version" 98 | fi 99 | 100 | if branchUpdated "$branch" ; then 101 | git merge -q origin/"$branch" 102 | rebuild "$branch" "$version" 103 | fi 104 | 105 | folder=$(publicFolder "$version") 106 | if [ "$firstRun" = 1 ] || [ "$themeUpdated" = 0 ] || [ ! -d "$folder" ] ; then 107 | rebuild "$branch" "$version" 108 | fi 109 | } 110 | 111 | 112 | firstRun=1 113 | while true; do 114 | # Lets move to the docs directory. 115 | pushd "$(dirname "$0")/.." > /dev/null 116 | 117 | currentBranch=$(git rev-parse --abbrev-ref HEAD) 118 | 119 | # Lets check if the theme was updated. 120 | pushd themes/hugo-docs > /dev/null 121 | git remote update > /dev/null 122 | themeUpdated=1 123 | if branchUpdated "master" ; then 124 | echo -e "$(date) $GREEN Theme has been updated. Now will update the docs.$RESET" 125 | themeUpdated=0 126 | fi 127 | popd > /dev/null 128 | 129 | # Now lets check the theme. 130 | echo -e "$(date) Starting to check branches." 131 | git remote update > /dev/null 132 | 133 | for version in "${VERSIONS_ARRAY[@]}" 134 | do 135 | checkAndUpdate "$version" 136 | done 137 | 138 | echo -e "$(date) Done checking branches.\n" 139 | 140 | git checkout -q "$currentBranch" 141 | popd > /dev/null 142 | 143 | firstRun=0 144 | if ! $LOOP; then 145 | exit 146 | fi 147 | sleep 60 148 | done -------------------------------------------------------------------------------- /docs/scripts/local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | GREEN='\033[32;1m' 6 | RESET='\033[0m' 7 | 8 | VERSIONS_ARRAY=( 9 | 'preview' 10 | ) 11 | 12 | joinVersions() { 13 | versions=$(printf ",%s" "${VERSIONS_ARRAY[@]}") 14 | echo "${versions:1}" 15 | } 16 | 17 | VERSION_STRING=$(joinVersions) 18 | 19 | run() { 20 | export CURRENT_BRANCH="master" 21 | export CURRENT_VERSION=${VERSIONS_ARRAY[0]} 22 | export VERSIONS=${VERSION_STRING} 23 | export DGRAPH_ENDPOINT=${DGRAPH_ENDPOINT:-"https://play.dgraph.io/query?latency=true"} 24 | 25 | 26 | export HUGO_TITLE="Badger Doc - Preview" \ 27 | export VERSIONS=${VERSION_STRING} \ 28 | export CURRENT_BRANCH="master" \ 29 | export CURRENT_VERSION=${CURRENT_VERSION} 30 | 31 | pushd "$(dirname "$0")/.." > /dev/null 32 | pushd themes > /dev/null 33 | 34 | if [ ! -d "hugo-docs" ]; then 35 | echo -e "$(date) $GREEN Hugo-docs repository not found. Cloning the repo. $RESET" 36 | git clone https://github.com/dgraph-io/hugo-docs.git 37 | else 38 | echo -e "$(date) $GREEN Hugo-docs repository found. Pulling the latest version from master. $RESET" 39 | pushd hugo-docs > /dev/null 40 | git pull 41 | popd > /dev/null 42 | fi 43 | popd > /dev/null 44 | 45 | if [[ $1 == "-p" || $1 == "--preview" ]]; then 46 | echo -e "$(date) $GREEN Generating documentation static pages in the public folder. $RESET" 47 | hugo --destination=public --baseURL="$2" 1> /dev/null 48 | echo -e "$(date) $GREEN Done building. $RESET" 49 | else 50 | hugo server -w --baseURL=http://localhost:1313 51 | fi 52 | popd > /dev/null 53 | } 54 | 55 | run "$1" "$2" -------------------------------------------------------------------------------- /docs/static/images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/static/images/diggy-shadow.png -------------------------------------------------------------------------------- /docs/themes/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/.DS_Store -------------------------------------------------------------------------------- /docs/themes/hugo-docs/LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Grav 4 | Copyright (c) 2016 MATHIEU CORNIC 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | this software and associated documentation files (the "Software"), to deal in 8 | the Software without restriction, including without limitation the rights to 9 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 10 | the Software, and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 18 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 19 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/archetypes/default.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Some Title" 3 | weight: 5 4 | prev: /prev/path 5 | next: /next/path 6 | toc: true 7 | --- 8 | 9 | Lorem Ipsum 10 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/images/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/images/screenshot.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/images/tn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/images/tn.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/.gitignore: -------------------------------------------------------------------------------- 1 | /hugo-docs 2 | 3 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {{ partial "meta.html" . }} 6 | {{ .Title }} 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 |
37 |
38 |
39 |

Error

40 |

41 |

42 |

Woops. Looks like this page doesn't exist.

43 |

44 |

Go to homepage

45 |

46 |
47 |
48 | 49 |
50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/article.html: -------------------------------------------------------------------------------- 1 | 2 |
3 | {{ partial "request-edit.html" . }} 4 | {{ partial "suggest-edit.html" . }} 5 | 6 |

{{ .Title }}

7 | 8 |
{{ .Content }}
9 |
10 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/list.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/layouts/_default/list.html -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/_default/section.html: -------------------------------------------------------------------------------- 1 | {{ partial "header.html" . }} 2 | 3 | {{ range .Data.Pages.ByWeight }} 4 | {{ .Render "article" }} 5 | {{ end }} 6 | 7 | {{ partial "footer.html" . }} 8 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/index.html: -------------------------------------------------------------------------------- 1 | {{ partial "header.html" . }} 2 | 3 |
4 | {{ partial "request-edit.html" . }} 5 | {{ partial "suggest-edit.html" . }} 6 | 7 |

{{ .Title }}

8 | 9 |
{{ .Content }}
10 |
11 | 12 | 13 | {{ partial "footer.html" . }} 14 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/footer.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 24 | 25 | 26 | 29 | 32 | 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {{ .Hugo.Generator }} 7 | {{ partial "meta.html" . }} 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | {{.Section | default "Badgerdb Documentation" | humanize}} — {{ .Site.Title }} 36 | 37 | 38 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | {{ partial "topbar.html" . }} 54 | {{ partial "sidebar.html" . }} 55 | 56 |
57 |
58 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/meta.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/request-edit.html: -------------------------------------------------------------------------------- 1 | {{ $currentBranch := getenv "CURRENT_BRANCH" }} 2 | 3 | 7 | Report Issue 8 | 9 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/sidebar.html: -------------------------------------------------------------------------------- 1 | 42 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/suggest-edit.html: -------------------------------------------------------------------------------- 1 | {{ $currentBranch := getenv "CURRENT_BRANCH" }} 2 | 3 | 5 | Edit Page 6 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/partials/topbar.html: -------------------------------------------------------------------------------- 1 | 32 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/load-img.html: -------------------------------------------------------------------------------- 1 | {{ $url := .Get 0}} 2 | {{ $alt := .Get 1}} 3 | 4 | {{ $alt }} -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/notice.html: -------------------------------------------------------------------------------- 1 | {{ $type := .Get 0}} 2 | 3 |
4 | {{ humanize $type }} {{ .Inner | markdownify }} 5 |
6 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/layouts/shortcodes/version.html: -------------------------------------------------------------------------------- 1 | {{ getenv "CURRENT_VERSION" }} -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Inconsolata.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Inconsolata.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Inconsolata.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-Normal-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Novecentosanswide-UltraLight-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_200.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_200.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_200.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_300.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_300.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_300.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_500.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_500.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/Work_Sans_500.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/Screenshot from 2020-07-07 19-14-26.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/Screenshot from 2020-07-07 19-14-26.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/badger.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/badger.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/dgraph-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/dgraph-black.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/diggy-shadow.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-144x144.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-192x192.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-36x36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-36x36.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-48x48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-48x48.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-72x72.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/android-chrome-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/android-chrome-96x96.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-114x114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-114x114.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-120x120.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-144x144.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-144x144.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-152x152.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-180x180.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-57x57.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-57x57.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-60x60.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-72x72.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-72x72.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon-76x76.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/apple-touch-icon.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/favicon-16x16.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-194x194.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/favicon-194x194.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/favicon-32x32.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon-96x96.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/favicon-96x96.png -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/favicons/favicon.ico -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Dgraph", 3 | "icons": [ 4 | { 5 | "src": "\/images\/favicons\/android-chrome-36x36.png", 6 | "sizes": "36x36", 7 | "type": "image\/png", 8 | "density": 0.75 9 | }, 10 | { 11 | "src": "\/images\/favicons\/android-chrome-48x48.png", 12 | "sizes": "48x48", 13 | "type": "image\/png", 14 | "density": 1 15 | }, 16 | { 17 | "src": "\/images\/favicons\/android-chrome-72x72.png", 18 | "sizes": "72x72", 19 | "type": "image\/png", 20 | "density": 1.5 21 | }, 22 | { 23 | "src": "\/images\/favicons\/android-chrome-96x96.png", 24 | "sizes": "96x96", 25 | "type": "image\/png", 26 | "density": 2 27 | }, 28 | { 29 | "src": "\/images\/favicons\/android-chrome-144x144.png", 30 | "sizes": "144x144", 31 | "type": "image\/png", 32 | "density": 3 33 | }, 34 | { 35 | "src": "\/images\/favicons\/android-chrome-192x192.png", 36 | "sizes": "192x192", 37 | "type": "image\/png", 38 | "density": 4 39 | } 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/favicons/safari-pinned-tab.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by potrace 1.11, written by Peter Selinger 2001-2013 9 | 10 | 12 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/images/gopher-404.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/docs/themes/hugo-docs/static/images/gopher-404.jpg -------------------------------------------------------------------------------- /docs/themes/hugo-docs/static/json/search.json: -------------------------------------------------------------------------------- 1 | [] 2 | -------------------------------------------------------------------------------- /docs/themes/hugo-docs/theme.toml: -------------------------------------------------------------------------------- 1 | # theme.toml template for a Hugo theme 2 | # See https://github.com/spf13/hugoThemes#themetoml for an example 3 | 4 | name = "Learn" 5 | license = "MIT" 6 | licenselink = "https://github.com/matcornic/hugo-theme-learn/blob/master/LICENSE.md" 7 | description = "Documentation theme for Hugo, based on Grav Learn theme" 8 | homepage = "https://matcornic.github.io/hugo-learn-doc/basics/what-is-this-hugo-theme/" 9 | tags = ["documentation", "grav", "learn", "doc"] 10 | features = ["documentation"] 11 | min_version = 0.17 12 | 13 | [author] 14 | name = "Mathieu Cornic" 15 | homepage = "http://matcornic.github.io/" 16 | 17 | [original] 18 | name = "Grav Learn" 19 | homepage = "http://learn.getgrav.org/" 20 | repo = "https://github.com/getgrav/grav-learn" 21 | -------------------------------------------------------------------------------- /fb/BlockOffset.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package fb 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type BlockOffset struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsBlockOffset(buf []byte, offset flatbuffers.UOffsetT) *BlockOffset { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &BlockOffset{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *BlockOffset) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *BlockOffset) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *BlockOffset) Key(j int) byte { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | a := rcv._tab.Vector(o) 33 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 34 | } 35 | return 0 36 | } 37 | 38 | func (rcv *BlockOffset) KeyLength() int { 39 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 40 | if o != 0 { 41 | return rcv._tab.VectorLen(o) 42 | } 43 | return 0 44 | } 45 | 46 | func (rcv *BlockOffset) KeyBytes() []byte { 47 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 48 | if o != 0 { 49 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 50 | } 51 | return nil 52 | } 53 | 54 | func (rcv *BlockOffset) MutateKey(j int, n byte) bool { 55 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 56 | if o != 0 { 57 | a := rcv._tab.Vector(o) 58 | return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) 59 | } 60 | return false 61 | } 62 | 63 | func (rcv *BlockOffset) Offset() uint32 { 64 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 65 | if o != 0 { 66 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 67 | } 68 | return 0 69 | } 70 | 71 | func (rcv *BlockOffset) MutateOffset(n uint32) bool { 72 | return rcv._tab.MutateUint32Slot(6, n) 73 | } 74 | 75 | func (rcv *BlockOffset) Len() uint32 { 76 | o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) 77 | if o != 0 { 78 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 79 | } 80 | return 0 81 | } 82 | 83 | func (rcv *BlockOffset) MutateLen(n uint32) bool { 84 | return rcv._tab.MutateUint32Slot(8, n) 85 | } 86 | 87 | func BlockOffsetStart(builder *flatbuffers.Builder) { 88 | builder.StartObject(3) 89 | } 90 | func BlockOffsetAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) { 91 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0) 92 | } 93 | func BlockOffsetStartKeyVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 94 | return builder.StartVector(1, numElems, 1) 95 | } 96 | func BlockOffsetAddOffset(builder *flatbuffers.Builder, offset uint32) { 97 | builder.PrependUint32Slot(1, offset, 0) 98 | } 99 | func BlockOffsetAddLen(builder *flatbuffers.Builder, len uint32) { 100 | builder.PrependUint32Slot(2, len, 0) 101 | } 102 | func BlockOffsetEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 103 | return builder.EndObject() 104 | } 105 | -------------------------------------------------------------------------------- /fb/TableIndex.go: -------------------------------------------------------------------------------- 1 | // Code generated by the FlatBuffers compiler. DO NOT EDIT. 2 | 3 | package fb 4 | 5 | import ( 6 | flatbuffers "github.com/google/flatbuffers/go" 7 | ) 8 | 9 | type TableIndex struct { 10 | _tab flatbuffers.Table 11 | } 12 | 13 | func GetRootAsTableIndex(buf []byte, offset flatbuffers.UOffsetT) *TableIndex { 14 | n := flatbuffers.GetUOffsetT(buf[offset:]) 15 | x := &TableIndex{} 16 | x.Init(buf, n+offset) 17 | return x 18 | } 19 | 20 | func (rcv *TableIndex) Init(buf []byte, i flatbuffers.UOffsetT) { 21 | rcv._tab.Bytes = buf 22 | rcv._tab.Pos = i 23 | } 24 | 25 | func (rcv *TableIndex) Table() flatbuffers.Table { 26 | return rcv._tab 27 | } 28 | 29 | func (rcv *TableIndex) Offsets(obj *BlockOffset, j int) bool { 30 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 31 | if o != 0 { 32 | x := rcv._tab.Vector(o) 33 | x += flatbuffers.UOffsetT(j) * 4 34 | x = rcv._tab.Indirect(x) 35 | obj.Init(rcv._tab.Bytes, x) 36 | return true 37 | } 38 | return false 39 | } 40 | 41 | func (rcv *TableIndex) OffsetsLength() int { 42 | o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) 43 | if o != 0 { 44 | return rcv._tab.VectorLen(o) 45 | } 46 | return 0 47 | } 48 | 49 | func (rcv *TableIndex) BloomFilter(j int) byte { 50 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 51 | if o != 0 { 52 | a := rcv._tab.Vector(o) 53 | return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1)) 54 | } 55 | return 0 56 | } 57 | 58 | func (rcv *TableIndex) BloomFilterLength() int { 59 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 60 | if o != 0 { 61 | return rcv._tab.VectorLen(o) 62 | } 63 | return 0 64 | } 65 | 66 | func (rcv *TableIndex) BloomFilterBytes() []byte { 67 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 68 | if o != 0 { 69 | return rcv._tab.ByteVector(o + rcv._tab.Pos) 70 | } 71 | return nil 72 | } 73 | 74 | func (rcv *TableIndex) MutateBloomFilter(j int, n byte) bool { 75 | o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) 76 | if o != 0 { 77 | a := rcv._tab.Vector(o) 78 | return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n) 79 | } 80 | return false 81 | } 82 | 83 | func (rcv *TableIndex) MaxVersion() uint64 { 84 | o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) 85 | if o != 0 { 86 | return rcv._tab.GetUint64(o + rcv._tab.Pos) 87 | } 88 | return 0 89 | } 90 | 91 | func (rcv *TableIndex) MutateMaxVersion(n uint64) bool { 92 | return rcv._tab.MutateUint64Slot(8, n) 93 | } 94 | 95 | func (rcv *TableIndex) KeyCount() uint32 { 96 | o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) 97 | if o != 0 { 98 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 99 | } 100 | return 0 101 | } 102 | 103 | func (rcv *TableIndex) MutateKeyCount(n uint32) bool { 104 | return rcv._tab.MutateUint32Slot(10, n) 105 | } 106 | 107 | func (rcv *TableIndex) UncompressedSize() uint32 { 108 | o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) 109 | if o != 0 { 110 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 111 | } 112 | return 0 113 | } 114 | 115 | func (rcv *TableIndex) MutateUncompressedSize(n uint32) bool { 116 | return rcv._tab.MutateUint32Slot(12, n) 117 | } 118 | 119 | func (rcv *TableIndex) OnDiskSize() uint32 { 120 | o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) 121 | if o != 0 { 122 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 123 | } 124 | return 0 125 | } 126 | 127 | func (rcv *TableIndex) MutateOnDiskSize(n uint32) bool { 128 | return rcv._tab.MutateUint32Slot(14, n) 129 | } 130 | 131 | func (rcv *TableIndex) StaleDataSize() uint32 { 132 | o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) 133 | if o != 0 { 134 | return rcv._tab.GetUint32(o + rcv._tab.Pos) 135 | } 136 | return 0 137 | } 138 | 139 | func (rcv *TableIndex) MutateStaleDataSize(n uint32) bool { 140 | return rcv._tab.MutateUint32Slot(16, n) 141 | } 142 | 143 | func TableIndexStart(builder *flatbuffers.Builder) { 144 | builder.StartObject(7) 145 | } 146 | func TableIndexAddOffsets(builder *flatbuffers.Builder, offsets flatbuffers.UOffsetT) { 147 | builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(offsets), 0) 148 | } 149 | func TableIndexStartOffsetsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 150 | return builder.StartVector(4, numElems, 4) 151 | } 152 | func TableIndexAddBloomFilter(builder *flatbuffers.Builder, bloomFilter flatbuffers.UOffsetT) { 153 | builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(bloomFilter), 0) 154 | } 155 | func TableIndexStartBloomFilterVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { 156 | return builder.StartVector(1, numElems, 1) 157 | } 158 | func TableIndexAddMaxVersion(builder *flatbuffers.Builder, maxVersion uint64) { 159 | builder.PrependUint64Slot(2, maxVersion, 0) 160 | } 161 | func TableIndexAddKeyCount(builder *flatbuffers.Builder, keyCount uint32) { 162 | builder.PrependUint32Slot(3, keyCount, 0) 163 | } 164 | func TableIndexAddUncompressedSize(builder *flatbuffers.Builder, uncompressedSize uint32) { 165 | builder.PrependUint32Slot(4, uncompressedSize, 0) 166 | } 167 | func TableIndexAddOnDiskSize(builder *flatbuffers.Builder, onDiskSize uint32) { 168 | builder.PrependUint32Slot(5, onDiskSize, 0) 169 | } 170 | func TableIndexAddStaleDataSize(builder *flatbuffers.Builder, staleDataSize uint32) { 171 | builder.PrependUint32Slot(6, staleDataSize, 0) 172 | } 173 | func TableIndexEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { 174 | return builder.EndObject() 175 | } 176 | -------------------------------------------------------------------------------- /fb/flatbuffer.fbs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | namespace fb; 18 | 19 | table TableIndex { 20 | offsets:[BlockOffset]; 21 | bloom_filter:[ubyte]; 22 | max_version:uint64; 23 | key_count:uint32; 24 | uncompressed_size:uint32; 25 | on_disk_size:uint32; 26 | stale_data_size:uint32; 27 | } 28 | 29 | table BlockOffset { 30 | key:[ubyte]; 31 | offset:uint; 32 | len:uint; 33 | } 34 | 35 | root_type TableIndex; 36 | root_type BlockOffset; 37 | -------------------------------------------------------------------------------- /fb/gen.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | ## Install flatc if not present 6 | ## ref. https://google.github.io/flatbuffers/flatbuffers_guide_building.html 7 | command -v flatc > /dev/null || { ./install_flatbuffers.sh ; } 8 | 9 | flatc --go flatbuffer.fbs 10 | # Move files to the correct directory. 11 | mv fb/* ./ 12 | rmdir fb 13 | -------------------------------------------------------------------------------- /fb/install_flatbuffers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | install_mac() { 6 | command -v brew > /dev/null || \ 7 | { echo "[ERROR]: 'brew' command not not found. Exiting" 1>&2; exit 1; } 8 | brew install flatbuffers 9 | } 10 | 11 | install_linux() { 12 | for CMD in curl cmake g++ make; do 13 | command -v $CMD > /dev/null || \ 14 | { echo "[ERROR]: '$CMD' command not not found. Exiting" 1>&2; exit 1; } 15 | done 16 | 17 | ## Create Temp Build Directory 18 | BUILD_DIR=$(mktemp -d) 19 | pushd $BUILD_DIR 20 | 21 | ## Fetch Latest Tarball 22 | LATEST_VERSION=$(curl -s https://api.github.com/repos/google/flatbuffers/releases/latest | grep -oP '(?<=tag_name": ")[^"]+') 23 | curl -sLO https://github.com/google/flatbuffers/archive/$LATEST_VERSION.tar.gz 24 | tar xf $LATEST_VERSION.tar.gz 25 | 26 | ## Build Binaries 27 | cd flatbuffers-${LATEST_VERSION#v} 28 | cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release 29 | make 30 | ./flattests 31 | cp flatc /usr/local/bin/flatc 32 | 33 | ## Cleanup Temp Build Directory 34 | popd 35 | rm -rf $BUILD_DIR 36 | } 37 | 38 | SYSTEM=$(uname -s) 39 | 40 | case ${SYSTEM,,} in 41 | linux) 42 | sudo bash -c "$(declare -f install_linux); install_linux" 43 | ;; 44 | darwin) 45 | install_mac 46 | ;; 47 | esac 48 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/outcaste-io/badger/v4 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/cespare/xxhash/v2 v2.1.2 7 | github.com/dustin/go-humanize v1.0.0 8 | github.com/gogo/protobuf v1.3.2 9 | github.com/golang/protobuf v1.3.1 10 | github.com/golang/snappy v0.0.3 11 | github.com/google/flatbuffers v1.12.1 12 | github.com/google/go-cmp v0.5.4 // indirect 13 | github.com/klauspost/compress v1.14.3 14 | github.com/kr/pretty v0.1.0 // indirect 15 | github.com/outcaste-io/ristretto v0.1.1-0.20220404170646-118eb5c81eac 16 | github.com/outcaste-io/sroar v0.0.0-20221114214615-697d5538e564 // indirect 17 | github.com/pkg/errors v0.9.1 18 | github.com/spf13/cobra v0.0.5 19 | github.com/stretchr/testify v1.7.0 20 | go.opencensus.io v0.22.5 21 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 22 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c 23 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 24 | ) 25 | -------------------------------------------------------------------------------- /histogram_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/stretchr/testify/require" 23 | ) 24 | 25 | func TestBuildKeyValueSizeHistogram(t *testing.T) { 26 | t.Run("All same size key-values", func(t *testing.T) { 27 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 28 | entries := int64(40) 29 | wb := db.NewWriteBatch() 30 | for i := rune(0); i < rune(entries); i++ { 31 | err := wb.SetEntryAt(NewEntry([]byte(string(i)), []byte("B")), 1) 32 | require.NoError(t, err) 33 | } 34 | require.NoError(t, wb.Flush()) 35 | 36 | histogram := db.buildHistogram(nil) 37 | keyHistogram := histogram.keySizeHistogram 38 | valueHistogram := histogram.valueSizeHistogram 39 | 40 | require.Equal(t, entries, keyHistogram.totalCount) 41 | require.Equal(t, entries, valueHistogram.totalCount) 42 | 43 | // Each entry is of size one. So the sum of sizes should be the same 44 | // as number of entries 45 | require.Equal(t, entries, valueHistogram.sum) 46 | require.Equal(t, entries, keyHistogram.sum) 47 | 48 | // All value sizes are same. The first bin should have all the values. 49 | require.Equal(t, entries, valueHistogram.countPerBin[0]) 50 | require.Equal(t, entries, keyHistogram.countPerBin[0]) 51 | 52 | require.Equal(t, int64(1), keyHistogram.max) 53 | require.Equal(t, int64(1), keyHistogram.min) 54 | require.Equal(t, int64(1), valueHistogram.max) 55 | require.Equal(t, int64(1), valueHistogram.min) 56 | }) 57 | }) 58 | 59 | t.Run("different size key-values", func(t *testing.T) { 60 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 61 | entries := int64(3) 62 | wb := db.NewWriteBatch() 63 | err := wb.SetEntryAt(NewEntry([]byte("A"), []byte("B")), 1) 64 | require.NoError(t, err) 65 | err = wb.SetEntryAt(NewEntry([]byte("AA"), []byte("BB")), 1) 66 | require.NoError(t, err) 67 | 68 | err = wb.SetEntryAt(NewEntry([]byte("AAA"), []byte("BBB")), 1) 69 | require.NoError(t, err) 70 | require.NoError(t, wb.Flush()) 71 | 72 | histogram := db.buildHistogram(nil) 73 | keyHistogram := histogram.keySizeHistogram 74 | valueHistogram := histogram.valueSizeHistogram 75 | 76 | require.Equal(t, entries, keyHistogram.totalCount) 77 | require.Equal(t, entries, valueHistogram.totalCount) 78 | 79 | // Each entry is of size one. So the sum of sizes should be the same 80 | // as number of entries 81 | require.Equal(t, int64(6), valueHistogram.sum) 82 | require.Equal(t, int64(6), keyHistogram.sum) 83 | 84 | // Length 1 key is in first bucket, length 2 and 3 are in the second 85 | // bucket 86 | require.Equal(t, int64(1), valueHistogram.countPerBin[0]) 87 | require.Equal(t, int64(2), valueHistogram.countPerBin[1]) 88 | require.Equal(t, int64(1), keyHistogram.countPerBin[0]) 89 | require.Equal(t, int64(2), keyHistogram.countPerBin[1]) 90 | 91 | require.Equal(t, int64(3), keyHistogram.max) 92 | require.Equal(t, int64(1), keyHistogram.min) 93 | require.Equal(t, int64(3), valueHistogram.max) 94 | require.Equal(t, int64(1), valueHistogram.min) 95 | }) 96 | }) 97 | } 98 | -------------------------------------------------------------------------------- /images/benchmarks-rocksdb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/images/benchmarks-rocksdb.png -------------------------------------------------------------------------------- /images/diggy-shadow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/outcaste-io/badger/ff8b6ef73eeeb02c07b567374b55361d15528e1e/images/diggy-shadow.png -------------------------------------------------------------------------------- /key_registry_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package badger 17 | 18 | import ( 19 | "io/ioutil" 20 | "math/rand" 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | func getRegistryTestOptions(dir string, key []byte) KeyRegistryOptions { 27 | return KeyRegistryOptions{ 28 | Dir: dir, 29 | EncryptionKey: key, 30 | ReadOnly: false, 31 | } 32 | } 33 | func TestBuildRegistry(t *testing.T) { 34 | encryptionKey := make([]byte, 32) 35 | dir, err := ioutil.TempDir("", "badger-test") 36 | require.NoError(t, err) 37 | defer removeDir(dir) 38 | 39 | _, err = rand.Read(encryptionKey) 40 | require.NoError(t, err) 41 | opt := getRegistryTestOptions(dir, encryptionKey) 42 | 43 | kr, err := OpenKeyRegistry(opt) 44 | require.NoError(t, err) 45 | dk, err := kr.LatestDataKey() 46 | require.NoError(t, err) 47 | // We're resetting the last created timestamp. So, it creates 48 | // new datakey. 49 | kr.lastCreated = 0 50 | dk1, err := kr.LatestDataKey() 51 | // We generated two key. So, checking the length. 52 | require.Equal(t, 2, len(kr.dataKeys)) 53 | require.NoError(t, err) 54 | require.NoError(t, kr.Close()) 55 | 56 | kr2, err := OpenKeyRegistry(opt) 57 | require.NoError(t, err) 58 | require.Equal(t, 2, len(kr2.dataKeys)) 59 | // Asserting the correctness of the datakey after opening the registry. 60 | require.Equal(t, dk.Data, kr.dataKeys[dk.KeyId].Data) 61 | require.Equal(t, dk1.Data, kr.dataKeys[dk1.KeyId].Data) 62 | require.NoError(t, kr2.Close()) 63 | } 64 | 65 | func TestRewriteRegistry(t *testing.T) { 66 | encryptionKey := make([]byte, 32) 67 | dir, err := ioutil.TempDir("", "badger-test") 68 | require.NoError(t, err) 69 | defer removeDir(dir) 70 | _, err = rand.Read(encryptionKey) 71 | require.NoError(t, err) 72 | opt := getRegistryTestOptions(dir, encryptionKey) 73 | kr, err := OpenKeyRegistry(opt) 74 | require.NoError(t, err) 75 | _, err = kr.LatestDataKey() 76 | require.NoError(t, err) 77 | // We're resetting the last created timestamp. So, it creates 78 | // new datakey. 79 | kr.lastCreated = 0 80 | _, err = kr.LatestDataKey() 81 | require.NoError(t, err) 82 | require.NoError(t, kr.Close()) 83 | delete(kr.dataKeys, 1) 84 | require.NoError(t, WriteKeyRegistry(kr, opt)) 85 | kr2, err := OpenKeyRegistry(opt) 86 | require.NoError(t, err) 87 | require.Equal(t, 1, len(kr2.dataKeys)) 88 | require.NoError(t, kr2.Close()) 89 | } 90 | 91 | func TestMismatch(t *testing.T) { 92 | encryptionKey := make([]byte, 32) 93 | dir, err := ioutil.TempDir("", "badger-test") 94 | require.NoError(t, err) 95 | defer removeDir(dir) 96 | _, err = rand.Read(encryptionKey) 97 | require.NoError(t, err) 98 | opt := getRegistryTestOptions(dir, encryptionKey) 99 | kr, err := OpenKeyRegistry(opt) 100 | require.NoError(t, err) 101 | require.NoError(t, kr.Close()) 102 | // Opening with the same key and asserting. 103 | kr, err = OpenKeyRegistry(opt) 104 | require.NoError(t, err) 105 | require.NoError(t, kr.Close()) 106 | // Opening with the invalid key and asserting. 107 | encryptionKey = make([]byte, 32) 108 | _, err = rand.Read(encryptionKey) 109 | require.NoError(t, err) 110 | opt.EncryptionKey = encryptionKey 111 | _, err = OpenKeyRegistry(opt) 112 | require.Error(t, err) 113 | require.EqualError(t, err, ErrEncryptionKeyMismatch.Error()) 114 | } 115 | 116 | func TestEncryptionAndDecryption(t *testing.T) { 117 | encryptionKey := make([]byte, 32) 118 | dir, err := ioutil.TempDir("", "badger-test") 119 | require.NoError(t, err) 120 | defer removeDir(dir) 121 | _, err = rand.Read(encryptionKey) 122 | require.NoError(t, err) 123 | opt := getRegistryTestOptions(dir, encryptionKey) 124 | kr, err := OpenKeyRegistry(opt) 125 | require.NoError(t, err) 126 | dk, err := kr.LatestDataKey() 127 | require.NoError(t, err) 128 | require.NoError(t, kr.Close()) 129 | // Checking the correctness of the datakey after closing and 130 | // opening the key registry. 131 | kr, err = OpenKeyRegistry(opt) 132 | require.NoError(t, err) 133 | dk1, err := kr.DataKey(dk.GetKeyId()) 134 | require.NoError(t, err) 135 | require.Equal(t, dk.Data, dk1.Data) 136 | require.NoError(t, kr.Close()) 137 | } 138 | 139 | func TestKeyRegistryInMemory(t *testing.T) { 140 | encryptionKey := make([]byte, 32) 141 | _, err := rand.Read(encryptionKey) 142 | require.NoError(t, err) 143 | 144 | opt := getRegistryTestOptions("", encryptionKey) 145 | opt.InMemory = true 146 | 147 | kr, err := OpenKeyRegistry(opt) 148 | require.NoError(t, err) 149 | _, err = kr.LatestDataKey() 150 | require.NoError(t, err) 151 | // We're resetting the last created timestamp. So, it creates 152 | // new datakey. 153 | kr.lastCreated = 0 154 | _, err = kr.LatestDataKey() 155 | // We generated two key. So, checking the length. 156 | require.Equal(t, 2, len(kr.dataKeys)) 157 | require.NoError(t, err) 158 | require.NoError(t, kr.Close()) 159 | } 160 | -------------------------------------------------------------------------------- /lifetime.go: -------------------------------------------------------------------------------- 1 | package badger 2 | 3 | import ( 4 | "encoding/binary" 5 | "os" 6 | "sync" 7 | 8 | "github.com/outcaste-io/badger/v4/y" 9 | "github.com/outcaste-io/ristretto/z" 10 | ) 11 | 12 | type LifetimeStats struct { 13 | sync.RWMutex 14 | mf *z.MmapFile 15 | } 16 | 17 | var ( 18 | maxSz int = 1 << 20 19 | idxVersion int = 0 20 | idxOpen int = 1 21 | idxClose int = 2 22 | idxNumWrites int = 3 23 | idxReservedBelow int = 128 24 | ) 25 | 26 | func InitLifetimeStats(path string) *LifetimeStats { 27 | mf, err := z.OpenMmapFile(path, os.O_RDWR|os.O_CREATE, maxSz) 28 | if err == z.NewFile { 29 | for i := range mf.Data { 30 | mf.Data[i] = 0x0 31 | } 32 | y.Check(mf.Sync()) 33 | err = nil 34 | } 35 | y.Check(err) 36 | 37 | lf := &LifetimeStats{mf: mf} 38 | lf.updateAt(idxOpen, 1) 39 | return lf 40 | } 41 | 42 | func getOffset(idx int) int { 43 | off := idx * 8 44 | y.AssertTrue(off < maxSz) 45 | return off 46 | } 47 | 48 | func (lf *LifetimeStats) readAt(idx int) uint64 { 49 | offset := getOffset(idx) 50 | return binary.BigEndian.Uint64(lf.mf.Data[offset : offset+8]) 51 | } 52 | func (lf *LifetimeStats) updateAt(idx int, delta uint64) { 53 | val := lf.readAt(idx) 54 | val += delta 55 | 56 | off := getOffset(idx) 57 | binary.BigEndian.PutUint64(lf.mf.Data[off:off+8], val) 58 | } 59 | func (lf *LifetimeStats) ReadAt(idx int) uint64 { 60 | lf.RLock() 61 | val := lf.readAt(idx) 62 | lf.RUnlock() 63 | return val 64 | } 65 | 66 | func (lf *LifetimeStats) UpdateAt(idx int, delta uint64) { 67 | lf.Lock() 68 | defer lf.Unlock() 69 | 70 | y.AssertTrue(idx > idxReservedBelow) 71 | lf.updateAt(idx, delta) 72 | lf.updateAt(idxNumWrites, 1) 73 | } 74 | 75 | func (lf *LifetimeStats) Close() error { 76 | lf.Lock() 77 | defer lf.Unlock() 78 | 79 | lf.updateAt(idxClose, 1) 80 | return lf.mf.Close(-1) 81 | } 82 | 83 | func (lf *LifetimeStats) Stats() map[int]uint64 { 84 | res := make(map[int]uint64) 85 | 86 | lf.RLock() 87 | for i := 0; i < (1<<20)/8; i++ { 88 | if val := lf.readAt(i); val > 0 { 89 | res[i] = val 90 | } 91 | } 92 | lf.RUnlock() 93 | 94 | return res 95 | } 96 | -------------------------------------------------------------------------------- /logger.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "log" 21 | "os" 22 | ) 23 | 24 | // Logger is implemented by any logging system that is used for standard logs. 25 | type Logger interface { 26 | Errorf(string, ...interface{}) 27 | Warningf(string, ...interface{}) 28 | Infof(string, ...interface{}) 29 | Debugf(string, ...interface{}) 30 | } 31 | 32 | // Errorf logs an ERROR log message to the logger specified in opts or to the 33 | // global logger if no logger is specified in opts. 34 | func (opt *Options) Errorf(format string, v ...interface{}) { 35 | if opt.Logger == nil { 36 | return 37 | } 38 | opt.Logger.Errorf(format, v...) 39 | } 40 | 41 | // Infof logs an INFO message to the logger specified in opts. 42 | func (opt *Options) Infof(format string, v ...interface{}) { 43 | if opt.Logger == nil { 44 | return 45 | } 46 | opt.Logger.Infof(format, v...) 47 | } 48 | 49 | // Warningf logs a WARNING message to the logger specified in opts. 50 | func (opt *Options) Warningf(format string, v ...interface{}) { 51 | if opt.Logger == nil { 52 | return 53 | } 54 | opt.Logger.Warningf(format, v...) 55 | } 56 | 57 | // Debugf logs a DEBUG message to the logger specified in opts. 58 | func (opt *Options) Debugf(format string, v ...interface{}) { 59 | if opt.Logger == nil { 60 | return 61 | } 62 | opt.Logger.Debugf(format, v...) 63 | } 64 | 65 | type loggingLevel int 66 | 67 | const ( 68 | DEBUG loggingLevel = iota 69 | INFO 70 | WARNING 71 | ERROR 72 | ) 73 | 74 | type defaultLog struct { 75 | *log.Logger 76 | level loggingLevel 77 | } 78 | 79 | func defaultLogger(level loggingLevel) *defaultLog { 80 | return &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags), level: level} 81 | } 82 | 83 | func (l *defaultLog) Errorf(f string, v ...interface{}) { 84 | if l.level <= ERROR { 85 | l.Printf("ERROR: "+f, v...) 86 | } 87 | } 88 | 89 | func (l *defaultLog) Warningf(f string, v ...interface{}) { 90 | if l.level <= WARNING { 91 | l.Printf("WARNING: "+f, v...) 92 | } 93 | } 94 | 95 | func (l *defaultLog) Infof(f string, v ...interface{}) { 96 | if l.level <= INFO { 97 | l.Printf("INFO: "+f, v...) 98 | } 99 | } 100 | 101 | func (l *defaultLog) Debugf(f string, v ...interface{}) { 102 | if l.level <= DEBUG { 103 | l.Printf("DEBUG: "+f, v...) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /logger_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "testing" 22 | 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | type mockLogger struct { 27 | output string 28 | } 29 | 30 | func (l *mockLogger) Errorf(f string, v ...interface{}) { 31 | l.output = fmt.Sprintf("ERROR: "+f, v...) 32 | } 33 | 34 | func (l *mockLogger) Infof(f string, v ...interface{}) { 35 | l.output = fmt.Sprintf("INFO: "+f, v...) 36 | } 37 | 38 | func (l *mockLogger) Warningf(f string, v ...interface{}) { 39 | l.output = fmt.Sprintf("WARNING: "+f, v...) 40 | } 41 | 42 | func (l *mockLogger) Debugf(f string, v ...interface{}) { 43 | l.output = fmt.Sprintf("DEBUG: "+f, v...) 44 | } 45 | 46 | // Test that the DB-specific log is used instead of the global log. 47 | func TestDbLog(t *testing.T) { 48 | l := &mockLogger{} 49 | opt := Options{Logger: l} 50 | 51 | opt.Errorf("test") 52 | require.Equal(t, "ERROR: test", l.output) 53 | opt.Infof("test") 54 | require.Equal(t, "INFO: test", l.output) 55 | opt.Warningf("test") 56 | require.Equal(t, "WARNING: test", l.output) 57 | } 58 | 59 | // Test that the global logger is used when no logger is specified in Options. 60 | func TestNoDbLog(t *testing.T) { 61 | l := &mockLogger{} 62 | opt := Options{} 63 | opt.Logger = l 64 | 65 | opt.Errorf("test") 66 | require.Equal(t, "ERROR: test", l.output) 67 | opt.Infof("test") 68 | require.Equal(t, "INFO: test", l.output) 69 | opt.Warningf("test") 70 | require.Equal(t, "WARNING: test", l.output) 71 | } 72 | -------------------------------------------------------------------------------- /options/options.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package options 18 | 19 | // ChecksumVerificationMode tells when should DB verify checksum for SSTable blocks. 20 | type ChecksumVerificationMode int 21 | 22 | const ( 23 | // NoVerification indicates DB should not verify checksum for SSTable blocks. 24 | NoVerification ChecksumVerificationMode = iota 25 | // OnTableRead indicates checksum should be verified while opening SSTtable. 26 | OnTableRead 27 | // OnBlockRead indicates checksum should be verified on every SSTable block read. 28 | OnBlockRead 29 | // OnTableAndBlockRead indicates checksum should be verified 30 | // on SSTable opening and on every block read. 31 | OnTableAndBlockRead 32 | ) 33 | 34 | // CompressionType specifies how a block should be compressed. 35 | type CompressionType uint32 36 | 37 | const ( 38 | // None mode indicates that a block is not compressed. 39 | None CompressionType = 0 40 | // Snappy mode indicates that a block is compressed using Snappy algorithm. 41 | Snappy CompressionType = 1 42 | // ZSTD mode indicates that a block is compressed using ZSTD algorithm. 43 | ZSTD CompressionType = 2 44 | ) 45 | -------------------------------------------------------------------------------- /options_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2021 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "reflect" 21 | "testing" 22 | 23 | "github.com/outcaste-io/badger/v4/options" 24 | ) 25 | 26 | func TestOptions(t *testing.T) { 27 | t.Run("default options", func(t *testing.T) { 28 | // copy all the default options over to a big SuperFlag string 29 | defaultSuperFlag := generateSuperFlag(DefaultOptions("")) 30 | // fill an empty Options with values from the SuperFlag 31 | generated := Options{}.FromSuperFlag(defaultSuperFlag) 32 | // make sure they're equal 33 | if !optionsEqual(DefaultOptions(""), generated) { 34 | t.Fatal("generated default SuperFlag != default Options") 35 | } 36 | // check that values are overwritten properly 37 | overwritten := DefaultOptions("").FromSuperFlag("numgoroutines=1234") 38 | if overwritten.NumGoroutines != 1234 { 39 | t.Fatal("Option value not overwritten by SuperFlag value") 40 | } 41 | }) 42 | 43 | t.Run("special flags", func(t *testing.T) { 44 | o1 := DefaultOptions("") 45 | o1.NamespaceOffset = 10 46 | o1.Compression = options.ZSTD 47 | o1.ZSTDCompressionLevel = 2 48 | o1.NumGoroutines = 20 49 | 50 | o2 := DefaultOptions("") 51 | o2.NamespaceOffset = 10 52 | o2 = o2.FromSuperFlag("compression=zstd:2; numgoroutines=20;") 53 | 54 | // make sure they're equal 55 | if !optionsEqual(o1, o2) { 56 | t.Fatal("generated superFlag != expected options") 57 | } 58 | }) 59 | } 60 | 61 | // optionsEqual just compares the values of two Options structs 62 | func optionsEqual(o1, o2 Options) bool { 63 | o1v := reflect.ValueOf(&o1).Elem() 64 | o2v := reflect.ValueOf(&o2).Elem() 65 | for i := 0; i < o1v.NumField(); i++ { 66 | if o1v.Field(i).CanInterface() { 67 | kind := o1v.Field(i).Kind() 68 | // compare values 69 | switch kind { 70 | case reflect.Bool: 71 | if o1v.Field(i).Bool() != o2v.Field(i).Bool() { 72 | return false 73 | } 74 | case reflect.Int, reflect.Int64: 75 | if o1v.Field(i).Int() != o2v.Field(i).Int() { 76 | return false 77 | } 78 | case reflect.Uint32, reflect.Uint64: 79 | if o1v.Field(i).Uint() != o2v.Field(i).Uint() { 80 | return false 81 | } 82 | case reflect.Float64: 83 | if o1v.Field(i).Float() != o2v.Field(i).Float() { 84 | return false 85 | } 86 | case reflect.String: 87 | if o1v.Field(i).String() != o2v.Field(i).String() { 88 | return false 89 | } 90 | } 91 | } 92 | } 93 | return true 94 | } 95 | -------------------------------------------------------------------------------- /pb/badgerpb3.proto: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Use protos/gen.sh to generate .pb.go files. 18 | syntax = "proto3"; 19 | 20 | package badgerpb3; 21 | 22 | option go_package = "github.com/outcaste-io/badger/v3/pb"; 23 | 24 | message KV { 25 | bytes key = 1; 26 | bytes value = 2; 27 | bytes user_meta = 3; 28 | uint64 version = 4; 29 | /* uint64 expires_at = 5; */ 30 | bytes meta = 6; 31 | 32 | // Stream id is used to identify which stream the KV came from. 33 | uint32 stream_id = 10; 34 | // Stream done is used to indicate end of stream. 35 | bool stream_done = 11; 36 | 37 | enum Kind { 38 | KEY = 0; 39 | DATA_KEY = 1; 40 | FILE = 2; 41 | } 42 | Kind kind = 12; 43 | } 44 | 45 | message KVList { 46 | repeated KV kv = 1; 47 | 48 | // alloc_ref used internally for memory management. 49 | uint64 alloc_ref = 10; 50 | } 51 | 52 | message ManifestChangeSet { 53 | // A set of changes that are applied atomically. 54 | repeated ManifestChange changes = 1; 55 | } 56 | 57 | enum EncryptionAlgo { 58 | aes = 0; 59 | } 60 | 61 | message ManifestChange { 62 | uint64 Id = 1; // Table ID. 63 | enum Operation { 64 | CREATE = 0; 65 | DELETE = 1; 66 | } 67 | Operation Op = 2; 68 | uint32 Level = 3; // Only used for CREATE. 69 | uint64 key_id = 4; 70 | EncryptionAlgo encryption_algo = 5; 71 | uint32 compression = 6; // Only used for CREATE Op. 72 | } 73 | 74 | message Checksum { 75 | enum Algorithm { 76 | CRC32C = 0; 77 | XXHash64 = 1; 78 | } 79 | Algorithm algo = 1; // For storing type of Checksum algorithm used 80 | uint64 sum = 2; 81 | } 82 | 83 | message DataKey { 84 | uint64 key_id = 1; 85 | bytes data = 2; 86 | bytes iv = 3; 87 | int64 created_at = 4; 88 | } 89 | 90 | message Match { 91 | bytes prefix = 1; 92 | string ignore_bytes = 2; // Comma separated with dash to represent ranges "1, 2-3, 4-7, 9" 93 | } 94 | 95 | -------------------------------------------------------------------------------- /pb/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run this script from its directory, so that badgerpb2.proto is where it's expected to 4 | # be. 5 | 6 | # You might need to go get -v github.com/gogo/protobuf/... 7 | go get -v github.com/gogo/protobuf/protoc-gen-gogofaster 8 | protoc --gogofaster_out=. --gogofaster_opt=paths=source_relative -I=. badgerpb3.proto 9 | -------------------------------------------------------------------------------- /pb/protos_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2021 Dgraph Labs, Inc. and Contributors 2 | * 3 | * Licensed under the Apache License, Version 2.0 (the "License"); 4 | * you may not use this file except in compliance with the License. 5 | * You may obtain a copy of the License at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * Unless required by applicable law or agreed to in writing, software 10 | * distributed under the License is distributed on an "AS IS" BASIS, 11 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | * See the License for the specific language governing permissions and 13 | * limitations under the License. 14 | */ 15 | 16 | package pb 17 | 18 | import ( 19 | "os/exec" 20 | "testing" 21 | 22 | "github.com/stretchr/testify/require" 23 | ) 24 | 25 | func Exec(argv ...string) error { 26 | cmd := exec.Command(argv[0], argv[1:]...) 27 | 28 | if err := cmd.Start(); err != nil { 29 | return err 30 | } 31 | return cmd.Wait() 32 | } 33 | 34 | func TestProtosRegenerate(t *testing.T) { 35 | err := Exec("./gen.sh") 36 | require.NoError(t, err, "Got error while regenerating protos: %v\n", err) 37 | 38 | generatedProtos := "badgerpb3.pb.go" 39 | err = Exec("git", "diff", "--quiet", "--", generatedProtos) 40 | require.NoError(t, err, "badgerpb3.pb.go changed after regenerating") 41 | } 42 | -------------------------------------------------------------------------------- /publisher.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "sync" 21 | "sync/atomic" 22 | 23 | "github.com/outcaste-io/badger/v4/pb" 24 | "github.com/outcaste-io/badger/v4/trie" 25 | "github.com/outcaste-io/badger/v4/y" 26 | "github.com/outcaste-io/ristretto/z" 27 | ) 28 | 29 | type subscriber struct { 30 | id uint64 31 | matches []pb.Match 32 | sendCh chan *pb.KVList 33 | subCloser *z.Closer 34 | // this will be atomic pointer which will be used to 35 | // track whether the subscriber is active or not 36 | active *uint64 37 | } 38 | 39 | type publisher struct { 40 | sync.Mutex 41 | pubCh chan requests 42 | subscribers map[uint64]subscriber 43 | nextID uint64 44 | indexer *trie.Trie 45 | } 46 | 47 | func newPublisher() *publisher { 48 | return &publisher{ 49 | pubCh: make(chan requests, 1000), 50 | subscribers: make(map[uint64]subscriber), 51 | nextID: 0, 52 | indexer: trie.NewTrie(), 53 | } 54 | } 55 | 56 | func (p *publisher) listenForUpdates(c *z.Closer) { 57 | defer func() { 58 | p.cleanSubscribers() 59 | c.Done() 60 | }() 61 | slurp := func(batch requests) { 62 | for { 63 | select { 64 | case reqs := <-p.pubCh: 65 | batch = append(batch, reqs...) 66 | default: 67 | p.publishUpdates(batch) 68 | return 69 | } 70 | } 71 | } 72 | for { 73 | select { 74 | case <-c.HasBeenClosed(): 75 | return 76 | case reqs := <-p.pubCh: 77 | slurp(reqs) 78 | } 79 | } 80 | } 81 | 82 | func (p *publisher) publishUpdates(reqs requests) { 83 | p.Lock() 84 | defer func() { 85 | p.Unlock() 86 | // Release all the request. 87 | reqs.DecrRef() 88 | }() 89 | batchedUpdates := make(map[uint64]*pb.KVList) 90 | for _, req := range reqs { 91 | if req.Skl != nil && !req.Skl.Empty() { 92 | itr := req.Skl.NewIterator() 93 | defer itr.Close() 94 | 95 | for itr.SeekToFirst(); itr.Valid(); itr.Next() { 96 | ids := p.indexer.Get(itr.Key()) 97 | if len(ids) == 0 { 98 | continue 99 | } 100 | v := itr.Value() 101 | kv := &pb.KV{ 102 | Key: y.ParseKey(itr.Key()), 103 | Version: y.ParseTs(itr.Key()), 104 | Value: y.SafeCopy(nil, v.Value), 105 | UserMeta: []byte{v.UserMeta}, 106 | } 107 | for id := range ids { 108 | if _, ok := batchedUpdates[id]; !ok { 109 | batchedUpdates[id] = &pb.KVList{} 110 | } 111 | batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv) 112 | } 113 | } 114 | } 115 | 116 | for _, e := range req.Entries { 117 | ids := p.indexer.Get(e.Key) 118 | if len(ids) == 0 { 119 | continue 120 | } 121 | k := y.SafeCopy(nil, e.Key) 122 | kv := &pb.KV{ 123 | Key: y.ParseKey(k), 124 | Value: y.SafeCopy(nil, e.Value), 125 | UserMeta: []byte{e.UserMeta}, 126 | Version: y.ParseTs(k), 127 | } 128 | for id := range ids { 129 | if _, ok := batchedUpdates[id]; !ok { 130 | batchedUpdates[id] = &pb.KVList{} 131 | } 132 | batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv) 133 | } 134 | } 135 | } 136 | 137 | for id, kvs := range batchedUpdates { 138 | if atomic.LoadUint64(p.subscribers[id].active) == 1 { 139 | p.subscribers[id].sendCh <- kvs 140 | } 141 | } 142 | } 143 | 144 | func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) (subscriber, error) { 145 | p.Lock() 146 | defer p.Unlock() 147 | ch := make(chan *pb.KVList, 1000) 148 | id := p.nextID 149 | // Increment next ID. 150 | p.nextID++ 151 | active := uint64(1) 152 | s := subscriber{ 153 | active: &active, 154 | id: id, 155 | matches: matches, 156 | sendCh: ch, 157 | subCloser: c, 158 | } 159 | p.subscribers[id] = s 160 | for _, m := range matches { 161 | if err := p.indexer.AddMatch(m, id); err != nil { 162 | return s, err 163 | } 164 | } 165 | return s, nil 166 | } 167 | 168 | // cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. 169 | func (p *publisher) cleanSubscribers() { 170 | p.Lock() 171 | defer p.Unlock() 172 | for id, s := range p.subscribers { 173 | for _, m := range s.matches { 174 | _ = p.indexer.DeleteMatch(m, id) 175 | } 176 | delete(p.subscribers, id) 177 | s.subCloser.SignalAndWait() 178 | } 179 | } 180 | 181 | func (p *publisher) deleteSubscriber(id uint64) { 182 | p.Lock() 183 | defer p.Unlock() 184 | if s, ok := p.subscribers[id]; ok { 185 | for _, m := range s.matches { 186 | _ = p.indexer.DeleteMatch(m, id) 187 | } 188 | } 189 | delete(p.subscribers, id) 190 | } 191 | 192 | func (p *publisher) sendUpdates(reqs requests) { 193 | if p.noOfSubscribers() != 0 { 194 | reqs.IncrRef() 195 | p.pubCh <- reqs 196 | } 197 | } 198 | 199 | func (p *publisher) noOfSubscribers() int { 200 | p.Lock() 201 | defer p.Unlock() 202 | return len(p.subscribers) 203 | } 204 | -------------------------------------------------------------------------------- /publisher_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package badger 17 | 18 | import ( 19 | "context" 20 | "fmt" 21 | "sync" 22 | "sync/atomic" 23 | "testing" 24 | "time" 25 | 26 | "github.com/pkg/errors" 27 | 28 | "github.com/stretchr/testify/require" 29 | 30 | "github.com/outcaste-io/badger/v4/pb" 31 | ) 32 | 33 | // This test will result in deadlock for commits before this. 34 | // Exiting this test gracefully will be the proof that the 35 | // publisher is no longer stuck in deadlock. 36 | func TestPublisherDeadlock(t *testing.T) { 37 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 38 | var subWg sync.WaitGroup 39 | subWg.Add(1) 40 | 41 | var firstUpdate sync.WaitGroup 42 | firstUpdate.Add(1) 43 | 44 | var subDone sync.WaitGroup 45 | subDone.Add(1) 46 | go func() { 47 | subWg.Done() 48 | match := pb.Match{Prefix: []byte("ke"), IgnoreBytes: ""} 49 | err := db.Subscribe(context.Background(), func(kvs *pb.KVList) error { 50 | firstUpdate.Done() 51 | time.Sleep(time.Second * 20) 52 | return errors.New("error returned") 53 | }, []pb.Match{match}) 54 | require.Error(t, err, errors.New("error returned")) 55 | subDone.Done() 56 | }() 57 | subWg.Wait() 58 | go func() { 59 | wb := db.NewWriteBatch() 60 | e := NewEntry([]byte(fmt.Sprintf("key%d", 0)), []byte(fmt.Sprintf("value%d", 0))) 61 | require.NoError(t, wb.SetEntryAt(e, 1)) 62 | require.NoError(t, wb.Flush()) 63 | }() 64 | 65 | firstUpdate.Wait() 66 | req := int64(0) 67 | for i := 1; i < 111; i++ { 68 | time.Sleep(time.Millisecond * 10) 69 | go func(i int) { 70 | wb := db.NewWriteBatch() 71 | e := NewEntry([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i))) 72 | require.NoError(t, wb.SetEntryAt(e, 1)) 73 | require.NoError(t, wb.Flush()) 74 | atomic.AddInt64(&req, 1) 75 | }(i) 76 | } 77 | for { 78 | if atomic.LoadInt64(&req) == 110 { 79 | break 80 | } 81 | time.Sleep(time.Second) 82 | } 83 | subDone.Wait() 84 | }) 85 | } 86 | 87 | func TestPublisherOrdering(t *testing.T) { 88 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 89 | order := []string{} 90 | var wg sync.WaitGroup 91 | wg.Add(1) 92 | var subWg sync.WaitGroup 93 | subWg.Add(1) 94 | go func() { 95 | subWg.Done() 96 | updates := 0 97 | match := pb.Match{Prefix: []byte("ke"), IgnoreBytes: ""} 98 | err := db.Subscribe(context.Background(), func(kvs *pb.KVList) error { 99 | updates += len(kvs.GetKv()) 100 | for _, kv := range kvs.GetKv() { 101 | order = append(order, string(kv.Value)) 102 | } 103 | if updates == 5 { 104 | wg.Done() 105 | } 106 | return nil 107 | }, []pb.Match{match}) 108 | if err != nil { 109 | require.Equal(t, err.Error(), context.Canceled.Error()) 110 | } 111 | }() 112 | subWg.Wait() 113 | for i := 0; i < 5; i++ { 114 | wb := db.NewWriteBatch() 115 | e := NewEntry([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i))) 116 | require.NoError(t, wb.SetEntryAt(e, 1)) 117 | require.NoError(t, wb.Flush()) 118 | } 119 | wg.Wait() 120 | for i := 0; i < 5; i++ { 121 | require.Equal(t, fmt.Sprintf("value%d", i), order[i]) 122 | } 123 | }) 124 | } 125 | 126 | func TestMultiplePrefix(t *testing.T) { 127 | runBadgerTest(t, nil, func(t *testing.T, db *DB) { 128 | var wg sync.WaitGroup 129 | wg.Add(1) 130 | var subWg sync.WaitGroup 131 | subWg.Add(1) 132 | go func() { 133 | subWg.Done() 134 | updates := 0 135 | match1 := pb.Match{Prefix: []byte("ke"), IgnoreBytes: ""} 136 | match2 := pb.Match{Prefix: []byte("hel"), IgnoreBytes: ""} 137 | err := db.Subscribe(context.Background(), func(kvs *pb.KVList) error { 138 | updates += len(kvs.GetKv()) 139 | for _, kv := range kvs.GetKv() { 140 | if string(kv.Key) == "key" { 141 | require.Equal(t, string(kv.Value), "value") 142 | } else { 143 | require.Equal(t, string(kv.Value), "badger") 144 | } 145 | } 146 | if updates == 2 { 147 | wg.Done() 148 | } 149 | return nil 150 | }, []pb.Match{match1, match2}) 151 | if err != nil { 152 | require.Equal(t, err.Error(), context.Canceled.Error()) 153 | } 154 | }() 155 | subWg.Wait() 156 | 157 | e := NewEntry([]byte("key"), []byte("value")) 158 | e.version = 1 159 | require.NoError(t, db.BatchSet([]*Entry{e})) 160 | 161 | e = NewEntry([]byte("hello"), []byte("badger")) 162 | e.version = 1 163 | require.NoError(t, db.BatchSet([]*Entry{e})) 164 | 165 | wg.Wait() 166 | }) 167 | } 168 | -------------------------------------------------------------------------------- /skl/README.md: -------------------------------------------------------------------------------- 1 | This is much better than `skiplist` and `slist`. 2 | 3 | ``` 4 | BenchmarkReadWrite/frac_0-8 3000000 537 ns/op 5 | BenchmarkReadWrite/frac_1-8 3000000 503 ns/op 6 | BenchmarkReadWrite/frac_2-8 3000000 492 ns/op 7 | BenchmarkReadWrite/frac_3-8 3000000 475 ns/op 8 | BenchmarkReadWrite/frac_4-8 3000000 440 ns/op 9 | BenchmarkReadWrite/frac_5-8 5000000 442 ns/op 10 | BenchmarkReadWrite/frac_6-8 5000000 380 ns/op 11 | BenchmarkReadWrite/frac_7-8 5000000 338 ns/op 12 | BenchmarkReadWrite/frac_8-8 5000000 294 ns/op 13 | BenchmarkReadWrite/frac_9-8 10000000 268 ns/op 14 | BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op 15 | ``` 16 | 17 | And even better than a simple map with read-write lock: 18 | 19 | ``` 20 | BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op 21 | BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op 22 | BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op 23 | BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op 24 | BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op 25 | BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op 26 | BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op 27 | BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op 28 | BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op 29 | BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op 30 | BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op 31 | ``` 32 | 33 | # Node Pooling 34 | 35 | Command used 36 | 37 | ``` 38 | rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10 39 | ``` 40 | 41 | For pprof results, we run without using /usr/bin/time. There are four runs below. 42 | 43 | Results seem to vary quite a bit between runs. 44 | 45 | ## Before node pooling 46 | 47 | ``` 48 | 1311.53MB of 1338.69MB total (97.97%) 49 | Dropped 30 nodes (cum <= 6.69MB) 50 | Showing top 10 nodes out of 37 (cum >= 12.50MB) 51 | flat flat% sum% cum cum% 52 | 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put 53 | 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte 54 | 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put 55 | 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E 56 | 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice 57 | 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue 58 | 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV 59 | 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next 60 | 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read 61 | 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode 62 | 63 | 128.31 real 329.37 user 17.11 sys 64 | 3355660288 maximum resident set size 65 | 0 average shared memory size 66 | 0 average unshared data size 67 | 0 average unshared stack size 68 | 2203080 page reclaims 69 | 764 page faults 70 | 0 swaps 71 | 275 block input operations 72 | 76 block output operations 73 | 0 messages sent 74 | 0 messages received 75 | 0 signals received 76 | 49173 voluntary context switches 77 | 599922 involuntary context switches 78 | ``` 79 | 80 | ## After node pooling 81 | 82 | ``` 83 | 1963.13MB of 2026.09MB total (96.89%) 84 | Dropped 29 nodes (cum <= 10.13MB) 85 | Showing top 10 nodes out of 41 (cum >= 185.62MB) 86 | flat flat% sum% cum cum% 87 | 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1 88 | 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E 89 | 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte 90 | 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put 91 | 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice 92 | 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode 93 | 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue 94 | 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV 95 | 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read 96 | 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next 97 | 98 | 135.58 real 374.29 user 17.65 sys 99 | 3740614656 maximum resident set size 100 | 0 average shared memory size 101 | 0 average unshared data size 102 | 0 average unshared stack size 103 | 2276566 page reclaims 104 | 770 page faults 105 | 0 swaps 106 | 128 block input operations 107 | 90 block output operations 108 | 0 messages sent 109 | 0 messages received 110 | 0 signals received 111 | 46434 voluntary context switches 112 | 597049 involuntary context switches 113 | ``` 114 | -------------------------------------------------------------------------------- /skl/arena.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package skl 18 | 19 | import ( 20 | "sync/atomic" 21 | "unsafe" 22 | 23 | "github.com/outcaste-io/badger/v4/y" 24 | ) 25 | 26 | const ( 27 | offsetSize = int(unsafe.Sizeof(uint32(0))) 28 | 29 | // Always align nodes on 64-bit boundaries, even on 32-bit architectures, 30 | // so that the node.value field is 64-bit aligned. This is necessary because 31 | // node.getValueOffset uses atomic.LoadUint64, which expects its input 32 | // pointer to be 64-bit aligned. 33 | nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 34 | ) 35 | 36 | // Arena should be lock-free. 37 | type Arena struct { 38 | n uint32 39 | shouldGrow bool 40 | buf []byte 41 | } 42 | 43 | // newArena returns a new arena. 44 | func newArena(n int64) *Arena { 45 | // Don't store data at position 0 in order to reserve offset=0 as a kind 46 | // of nil pointer. 47 | out := &Arena{ 48 | n: 1, 49 | buf: make([]byte, n), 50 | } 51 | return out 52 | } 53 | 54 | func (s *Arena) allocate(sz uint32) uint32 { 55 | offset := atomic.AddUint32(&s.n, sz) 56 | if !s.shouldGrow { 57 | y.AssertTrue(int(offset) <= len(s.buf)) 58 | return offset - sz 59 | } 60 | 61 | // We are keeping extra bytes in the end so that the checkptr doesn't fail. We apply some 62 | // intelligence to reduce the size of the node by only keeping towers upto valid height and not 63 | // maxHeight. This reduces the node's size, but checkptr doesn't know about its reduced size. 64 | // checkptr tries to verify that the node of size MaxNodeSize resides on a single heap 65 | // allocation which causes this error: checkptr:converted pointer straddles multiple allocations 66 | if int(offset) > len(s.buf)-MaxNodeSize { 67 | growBy := uint32(len(s.buf)) 68 | if growBy > 1<<30 { 69 | growBy = 1 << 30 70 | } 71 | if growBy < sz { 72 | growBy = sz 73 | } 74 | newBuf := make([]byte, len(s.buf)+int(growBy)) 75 | y.AssertTrue(len(s.buf) == copy(newBuf, s.buf)) 76 | s.buf = newBuf 77 | } 78 | return offset - sz 79 | } 80 | 81 | func (s *Arena) size() int64 { 82 | return int64(atomic.LoadUint32(&s.n)) 83 | } 84 | 85 | // putNode allocates a node in the arena. The node is aligned on a pointer-sized 86 | // boundary. The arena offset of the node is returned. 87 | func (s *Arena) putNode(height int) uint32 { 88 | // Compute the amount of the tower that will never be used, since the height 89 | // is less than maxHeight. 90 | unusedSize := (maxHeight - height) * offsetSize 91 | 92 | // Pad the allocation with enough bytes to ensure pointer alignment. 93 | l := uint32(MaxNodeSize - unusedSize + nodeAlign) 94 | n := s.allocate(l) 95 | 96 | // Return the aligned offset. 97 | m := (n + uint32(nodeAlign)) & ^uint32(nodeAlign) 98 | return m 99 | } 100 | 101 | // Put will *copy* val into arena. To make better use of this, reuse your input 102 | // val buffer. Returns an offset into buf. User is responsible for remembering 103 | // size of val. We could also store this size inside arena but the encoding and 104 | // decoding will incur some overhead. 105 | func (s *Arena) putVal(v y.ValueStruct) uint32 { 106 | l := uint32(v.EncodedSize()) 107 | offset := s.allocate(l) 108 | v.Encode(s.buf[offset:]) 109 | return offset 110 | } 111 | 112 | func (s *Arena) putKey(key []byte) uint32 { 113 | keySz := uint32(len(key)) 114 | offset := s.allocate(keySz) 115 | buf := s.buf[offset : offset+keySz] 116 | y.AssertTrue(len(key) == copy(buf, key)) 117 | return offset 118 | } 119 | 120 | // getNode returns a pointer to the node located at offset. If the offset is 121 | // zero, then the nil node pointer is returned. 122 | func (s *Arena) getNode(offset uint32) *node { 123 | if offset == 0 { 124 | return nil 125 | } 126 | return (*node)(unsafe.Pointer(&s.buf[offset])) 127 | } 128 | 129 | // getKey returns byte slice at offset. 130 | func (s *Arena) getKey(offset uint32, size uint16) []byte { 131 | return s.buf[offset : offset+uint32(size)] 132 | } 133 | 134 | // getVal returns byte slice at offset. The given size should be just the value 135 | // size and should NOT include the meta bytes. 136 | func (s *Arena) getVal(offset uint32, size uint32) (ret y.ValueStruct) { 137 | ret.Decode(s.buf[offset : offset+size]) 138 | return 139 | } 140 | 141 | // getNodeOffset returns the offset of node in the arena. If the node pointer is 142 | // nil, then the zero offset is returned. 143 | func (s *Arena) getNodeOffset(nd *node) uint32 { 144 | if nd == nil { 145 | return 0 146 | } 147 | 148 | return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0]))) 149 | } 150 | -------------------------------------------------------------------------------- /structs.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "sync" 22 | "sync/atomic" 23 | 24 | "github.com/outcaste-io/badger/v4/skl" 25 | ) 26 | 27 | // Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by 28 | // the user to set data. 29 | type Entry struct { 30 | Key []byte 31 | Value []byte 32 | version uint64 33 | offset uint32 // offset is an internal field. 34 | UserMeta byte 35 | meta byte 36 | } 37 | 38 | func (e *Entry) estimateSize() int64 { 39 | k := int64(len(e.Key)) 40 | v := int64(len(e.Value)) 41 | return k + v + 2 // Meta, UserMeta 42 | } 43 | 44 | func (e Entry) String() { 45 | fmt.Printf("Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", 46 | e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) 47 | } 48 | 49 | // NewEntry creates a new entry with key and value passed in args. This newly created entry can be 50 | // set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by 51 | // calling WithMeta, WithDiscard, WithTTL methods on it. 52 | // This function uses key and value reference, hence users must 53 | // not modify key and value until the end of transaction. 54 | func NewEntry(key, value []byte) *Entry { 55 | return &Entry{ 56 | Key: key, 57 | Value: value, 58 | } 59 | } 60 | 61 | // WithMeta adds meta data to Entry e. This byte is stored alongside the key 62 | // and can be used as an aid to interpret the value or store other contextual 63 | // bits corresponding to the key-value pair of entry. 64 | func (e *Entry) WithMeta(meta byte) *Entry { 65 | e.UserMeta = meta 66 | return e 67 | } 68 | 69 | // WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the 70 | // Entry) will be eligible for garbage collection. 71 | // This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The 72 | // default setting is 1, in which case, this function doesn't add any more benefit. If however, you 73 | // have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this 74 | // method to indicate that all the older versions can be discarded and removed during compactions. 75 | func (e *Entry) WithDiscard() *Entry { 76 | e.meta = BitDiscardEarlierVersions 77 | return e 78 | } 79 | 80 | type request struct { 81 | // Input values 82 | Skl *skl.Skiplist 83 | Entries []*Entry 84 | ref int32 85 | } 86 | 87 | type handoverRequest struct { 88 | skl *skl.Skiplist 89 | callback func() 90 | err error 91 | wg sync.WaitGroup 92 | } 93 | 94 | func (req *request) IncrRef() { 95 | atomic.AddInt32(&req.ref, 1) 96 | } 97 | 98 | func (req *request) DecrRef() { 99 | nRef := atomic.AddInt32(&req.ref, -1) 100 | if nRef > 0 { 101 | return 102 | } 103 | req.Entries = nil 104 | } 105 | 106 | type requests []*request 107 | 108 | func (reqs requests) DecrRef() { 109 | for _, req := range reqs { 110 | req.DecrRef() 111 | } 112 | } 113 | 114 | func (reqs requests) IncrRef() { 115 | for _, req := range reqs { 116 | req.IncrRef() 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /table/README.md: -------------------------------------------------------------------------------- 1 | Size of table is 123,217,667 bytes for all benchmarks. 2 | 3 | # BenchmarkRead 4 | ``` 5 | $ go test -bench ^BenchmarkRead$ -run ^$ -count 3 6 | goos: linux 7 | goarch: amd64 8 | pkg: github.com/dgraph-io/badger/table 9 | BenchmarkRead-16 10 154074944 ns/op 10 | BenchmarkRead-16 10 154340411 ns/op 11 | BenchmarkRead-16 10 151914489 ns/op 12 | PASS 13 | ok github.com/dgraph-io/badger/table 22.467s 14 | ``` 15 | 16 | Size of table is 123,217,667 bytes, which is ~118MB. 17 | 18 | The rate is ~762MB/s using LoadToRAM (when table is in RAM). 19 | 20 | To read a 64MB table, this would take ~0.084s, which is negligible. 21 | 22 | # BenchmarkReadAndBuild 23 | ```go 24 | $ go test -bench BenchmarkReadAndBuild -run ^$ -count 3 25 | goos: linux 26 | goarch: amd64 27 | pkg: github.com/dgraph-io/badger/table 28 | BenchmarkReadAndBuild-16 1 1026755231 ns/op 29 | BenchmarkReadAndBuild-16 1 1009543316 ns/op 30 | BenchmarkReadAndBuild-16 1 1039920546 ns/op 31 | PASS 32 | ok github.com/dgraph-io/badger/table 12.081s 33 | ``` 34 | 35 | The rate is ~123MB/s. To build a 64MB table, this would take ~0.56s. Note that this 36 | does NOT include the flushing of the table to disk. All we are doing above is 37 | reading one table (which is in RAM) and write one table in memory. 38 | 39 | The table building takes 0.56-0.084s ~ 0.4823s. 40 | 41 | # BenchmarkReadMerged 42 | Below, we merge 5 tables. The total size remains unchanged at ~122M. 43 | 44 | ```go 45 | $ go test -bench ReadMerged -run ^$ -count 3 46 | goos: linux 47 | goarch: amd64 48 | pkg: github.com/dgraph-io/badger/table 49 | BenchmarkReadMerged-16 2 977588975 ns/op 50 | BenchmarkReadMerged-16 2 982140738 ns/op 51 | BenchmarkReadMerged-16 2 962046017 ns/op 52 | PASS 53 | ok github.com/dgraph-io/badger/table 27.433s 54 | ``` 55 | 56 | The rate is ~120MB/s. To read a 64MB table using merge iterator, this would take ~0.53s. 57 | 58 | # BenchmarkRandomRead 59 | 60 | ```go 61 | go test -bench BenchmarkRandomRead$ -run ^$ -count 3 62 | goos: linux 63 | goarch: amd64 64 | pkg: github.com/dgraph-io/badger/table 65 | BenchmarkRandomRead-16 500000 2645 ns/op 66 | BenchmarkRandomRead-16 500000 2648 ns/op 67 | BenchmarkRandomRead-16 500000 2614 ns/op 68 | PASS 69 | ok github.com/dgraph-io/badger/table 50.850s 70 | ``` 71 | For random read benchmarking, we are randomly reading a key and verifying its value. 72 | 73 | # DB Open benchmark 74 | 1. Create badger DB with 2 billion key-value pairs (about 380GB of data) 75 | ``` 76 | badger fill -m 2000 --dir="/tmp/data" --sorted 77 | ``` 78 | 2. Clear buffers and swap memory 79 | ``` 80 | free -mh && sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo swapoff -a && sudo swapon -a && free -mh 81 | ``` 82 | Also flush disk buffers 83 | ``` 84 | blockdev --flushbufs /dev/nvme0n1p4 85 | ``` 86 | 3. Run the benchmark 87 | ``` 88 | go test -run=^$ github.com/dgraph-io/badger -bench ^BenchmarkDBOpen$ -benchdir="/tmp/data" -v 89 | 90 | badger 2019/06/04 17:15:56 INFO: 126 tables out of 1028 opened in 3.017s 91 | badger 2019/06/04 17:15:59 INFO: 257 tables out of 1028 opened in 6.014s 92 | badger 2019/06/04 17:16:02 INFO: 387 tables out of 1028 opened in 9.017s 93 | badger 2019/06/04 17:16:05 INFO: 516 tables out of 1028 opened in 12.025s 94 | badger 2019/06/04 17:16:08 INFO: 645 tables out of 1028 opened in 15.013s 95 | badger 2019/06/04 17:16:11 INFO: 775 tables out of 1028 opened in 18.008s 96 | badger 2019/06/04 17:16:14 INFO: 906 tables out of 1028 opened in 21.003s 97 | badger 2019/06/04 17:16:17 INFO: All 1028 tables opened in 23.851s 98 | badger 2019/06/04 17:16:17 INFO: Replaying file id: 1998 at offset: 332000 99 | badger 2019/06/04 17:16:17 INFO: Replay took: 9.81µs 100 | goos: linux 101 | goarch: amd64 102 | pkg: github.com/dgraph-io/badger 103 | BenchmarkDBOpen-16 1 23930082140 ns/op 104 | PASS 105 | ok github.com/dgraph-io/badger 24.076s 106 | 107 | ``` 108 | It takes about 23.851s to open a DB with 2 billion sorted key-value entries. 109 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | go version 6 | 7 | # Run `go list` BEFORE setting GOFLAGS so that the output is in the right 8 | # format for grep. 9 | # export packages because the test will run in a sub process. 10 | export packages=$(go list ./... | grep "github.com/outcaste-io/badger/v3/") 11 | 12 | if [[ ! -z "$TEAMCITY_VERSION" ]]; then 13 | export GOFLAGS="-json" 14 | fi 15 | 16 | function InstallJemalloc() { 17 | pushd . 18 | if [ ! -f /usr/local/lib/libjemalloc.a ]; then 19 | USER_ID=`id -u` 20 | JEMALLOC_URL="https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2" 21 | 22 | mkdir -p /tmp/jemalloc-temp && cd /tmp/jemalloc-temp ; 23 | echo "Downloading jemalloc" ; 24 | curl -s -L ${JEMALLOC_URL} -o jemalloc.tar.bz2 ; 25 | tar xjf ./jemalloc.tar.bz2 ; 26 | cd jemalloc-5.2.1 ; 27 | ./configure --with-jemalloc-prefix='je_' ; 28 | make ; 29 | if [ "$USER_ID" -eq "0" ]; then 30 | make install ; 31 | else 32 | echo "==== Need sudo access to install jemalloc" ; 33 | sudo make install ; 34 | fi 35 | fi 36 | popd 37 | } 38 | 39 | tags="-tags=jemalloc" 40 | 41 | # Ensure that we can compile the binary. 42 | pushd badger 43 | go build -v $tags . 44 | popd 45 | 46 | # tags="" 47 | InstallJemalloc 48 | 49 | # Run the memory intensive tests first. 50 | manual() { 51 | timeout="-timeout 2m" 52 | echo "==> Running package tests for $packages" 53 | set -e 54 | for pkg in $packages; do 55 | echo "===> Testing $pkg" 56 | go test $tags -timeout=25m -race $pkg -parallel 16 57 | done 58 | echo "==> DONE package tests" 59 | 60 | echo "==> Running manual tests" 61 | # Run the special Truncate test. 62 | rm -rf p 63 | set -e 64 | 65 | # TODO(ibrahim): Let's make these tests have Manual prefix. 66 | # go test $tags -run='TestManual' --manual=true --parallel=2 67 | # TestWriteBatch 68 | # TestValueGCManaged 69 | # TestDropPrefix 70 | # TestDropAllManaged 71 | go test $tags $timeout -run='TestBigKeyValuePairs$' --manual=true 72 | go test $tags $timeout -run='TestPushValueLogLimit' --manual=true 73 | go test $tags $timeout -run='TestKeyCount' --manual=true 74 | go test $tags $timeout -run='TestIteratePrefix' --manual=true 75 | go test $tags $timeout -run='TestIterateParallel' --manual=true 76 | go test $tags $timeout -run='TestBigStream' --manual=true 77 | go test $tags $timeout -run='TestGoroutineLeak' --manual=true 78 | go test $tags $timeout -run='TestGetMore' --manual=true 79 | 80 | echo "==> DONE manual tests" 81 | } 82 | 83 | root() { 84 | # Run the normal tests. 85 | # go test -timeout=25m -v -race github.com/dgraph-io/badger/v3/... 86 | 87 | echo "==> Running root level tests." 88 | set -e 89 | go test --failfast $tags -timeout=25m . -v -race -parallel 16 90 | echo "==> DONE root level tests" 91 | } 92 | 93 | stream() { 94 | set -eo pipefail 95 | pushd badger 96 | baseDir=$(mktemp -d -p .) 97 | ./badger benchmark write -s --dir=$baseDir/test | tee $baseDir/log.txt 98 | ./badger benchmark read --dir=$baseDir/test --full-scan | tee --append $baseDir/log.txt 99 | ./badger benchmark read --dir=$baseDir/test -d=30s | tee --append $baseDir/log.txt 100 | ./badger stream --dir=$baseDir/test -o "$baseDir/test2" | tee --append $baseDir/log.txt 101 | count=$(cat "$baseDir/log.txt" | grep "at program end: 0 B" | wc -l) 102 | rm -rf $baseDir 103 | if [ $count -ne 4 ]; then 104 | echo "LEAK detected in Badger stream." 105 | return 1 106 | fi 107 | echo "==> DONE stream test" 108 | return 0 109 | } 110 | 111 | export -f stream 112 | export -f manual 113 | export -f root 114 | 115 | parallel --halt now,fail=1 --progress --line-buffer ::: stream manual root 116 | -------------------------------------------------------------------------------- /trie/trie_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package trie 18 | 19 | import ( 20 | "sort" 21 | "testing" 22 | 23 | "github.com/outcaste-io/badger/v4/pb" 24 | "github.com/stretchr/testify/require" 25 | ) 26 | 27 | func TestGet(t *testing.T) { 28 | trie := NewTrie() 29 | trie.Add([]byte("hello"), 1) 30 | trie.Add([]byte("hello"), 3) 31 | trie.Add([]byte("hello"), 4) 32 | trie.Add([]byte("hel"), 20) 33 | trie.Add([]byte("he"), 20) 34 | trie.Add([]byte("badger"), 30) 35 | 36 | trie.Add(nil, 10) 37 | require.Equal(t, map[uint64]struct{}{10: {}}, trie.Get([]byte("A"))) 38 | 39 | ids := trie.Get([]byte("hel")) 40 | require.Equal(t, 2, len(ids)) 41 | require.Equal(t, map[uint64]struct{}{10: {}, 20: {}}, ids) 42 | 43 | ids = trie.Get([]byte("badger")) 44 | require.Equal(t, 2, len(ids)) 45 | require.Equal(t, map[uint64]struct{}{10: {}, 30: {}}, ids) 46 | 47 | ids = trie.Get([]byte("hello")) 48 | require.Equal(t, 5, len(ids)) 49 | require.Equal(t, map[uint64]struct{}{10: {}, 1: {}, 3: {}, 4: {}, 20: {}}, ids) 50 | 51 | trie.Add([]byte{}, 11) 52 | require.Equal(t, map[uint64]struct{}{10: {}, 11: {}}, trie.Get([]byte("A"))) 53 | } 54 | 55 | func TestTrieDelete(t *testing.T) { 56 | trie := NewTrie() 57 | t.Logf("Num nodes: %d", numNodes(trie.root)) 58 | require.Equal(t, 1, numNodes(trie.root)) 59 | 60 | trie.Add([]byte("hello"), 1) 61 | trie.Add([]byte("hello"), 3) 62 | trie.Add([]byte("hello"), 4) 63 | trie.Add(nil, 5) 64 | 65 | t.Logf("Num nodes: %d", numNodes(trie.root)) 66 | 67 | trie.Delete([]byte("hello"), 4) 68 | t.Logf("Num nodes: %d", numNodes(trie.root)) 69 | 70 | require.Equal(t, map[uint64]struct{}{5: {}, 1: {}, 3: {}}, trie.Get([]byte("hello"))) 71 | 72 | trie.Delete(nil, 5) 73 | t.Logf("Num nodes: %d", numNodes(trie.root)) 74 | require.Equal(t, map[uint64]struct{}{1: {}, 3: {}}, trie.Get([]byte("hello"))) 75 | 76 | trie.Delete([]byte("hello"), 1) 77 | trie.Delete([]byte("hello"), 3) 78 | trie.Delete([]byte("hello"), 4) 79 | trie.Delete([]byte("hello"), 5) 80 | trie.Delete([]byte("hello"), 6) 81 | 82 | require.Equal(t, 1, numNodes(trie.root)) 83 | t.Logf("Num nodes: %d", numNodes(trie.root)) 84 | 85 | require.Equal(t, true, trie.root.isEmpty()) 86 | require.Equal(t, map[uint64]struct{}{}, trie.Get([]byte("hello"))) 87 | } 88 | 89 | func TestParseIgnoreBytes(t *testing.T) { 90 | out, err := parseIgnoreBytes("1") 91 | require.NoError(t, err) 92 | require.Equal(t, []bool{false, true}, out) 93 | 94 | out, err = parseIgnoreBytes("0") 95 | require.NoError(t, err) 96 | require.Equal(t, []bool{true}, out) 97 | 98 | out, err = parseIgnoreBytes("0, 3 - 5, 7") 99 | require.NoError(t, err) 100 | require.Equal(t, []bool{true, false, false, true, true, true, false, true}, out) 101 | } 102 | 103 | func TestPrefixMatchWithHoles(t *testing.T) { 104 | trie := NewTrie() 105 | 106 | add := func(prefix, ignore string, id uint64) { 107 | m := pb.Match{ 108 | Prefix: []byte(prefix), 109 | IgnoreBytes: ignore, 110 | } 111 | require.NoError(t, trie.AddMatch(m, id)) 112 | } 113 | 114 | add("", "", 1) 115 | add("aaaa", "", 2) 116 | add("aaaaaa", "2-10", 3) 117 | add("aaaaaaaaa", "0, 4 - 6, 8", 4) 118 | 119 | get := func(k string) []uint64 { 120 | var ids []uint64 121 | m := trie.Get([]byte(k)) 122 | for id := range m { 123 | ids = append(ids, id) 124 | } 125 | sort.Slice(ids, func(i, j int) bool { 126 | return ids[i] < ids[j] 127 | }) 128 | return ids 129 | } 130 | 131 | // Everything matches 1 132 | require.Equal(t, []uint64{1}, get("")) 133 | require.Equal(t, []uint64{1}, get("aax")) 134 | 135 | // aaaaa would match 2, but not 3 because 3's length is 6. 136 | require.Equal(t, []uint64{1, 2}, get("aaaaa")) 137 | 138 | // aa and enough length is sufficient to match 3. 139 | require.Equal(t, []uint64{1, 3}, get("aabbbbbbbb")) 140 | 141 | // has differences in the right place to match 4. 142 | require.Equal(t, []uint64{1, 4}, get("baaabbbabba")) 143 | 144 | // Even with differences matches everything. 145 | require.Equal(t, []uint64{1, 2, 3, 4}, get("aaaabbbabba")) 146 | 147 | t.Logf("Num nodes: %d", numNodes(trie.root)) 148 | 149 | del := func(prefix, ignore string, id uint64) { 150 | m := pb.Match{ 151 | Prefix: []byte(prefix), 152 | IgnoreBytes: ignore, 153 | } 154 | require.NoError(t, trie.DeleteMatch(m, id)) 155 | } 156 | 157 | del("aaaaaaaaa", "0, 4 - 6, 8", 5) 158 | t.Logf("Num nodes: %d", numNodes(trie.root)) 159 | 160 | del("aaaaaaaaa", "0, 4 - 6, 8", 4) 161 | t.Logf("Num nodes: %d", numNodes(trie.root)) 162 | 163 | del("aaaaaa", "2-10", 3) 164 | t.Logf("Num nodes: %d", numNodes(trie.root)) 165 | 166 | del("aaaa", "", 2) 167 | t.Logf("Num nodes: %d", numNodes(trie.root)) 168 | 169 | del("", "", 1) 170 | t.Logf("Num nodes: %d", numNodes(trie.root)) 171 | 172 | del("abracadabra", "", 4) 173 | t.Logf("Num nodes: %d", numNodes(trie.root)) 174 | 175 | require.Equal(t, 1, numNodes(trie.root)) 176 | } 177 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "encoding/hex" 21 | "io/ioutil" 22 | "math/rand" 23 | "sync/atomic" 24 | "time" 25 | 26 | "github.com/outcaste-io/badger/v4/table" 27 | "github.com/outcaste-io/badger/v4/y" 28 | "github.com/pkg/errors" 29 | ) 30 | 31 | func (s *levelsController) validate() error { 32 | for _, l := range s.levels { 33 | if err := l.validate(); err != nil { 34 | return y.Wrap(err, "Levels Controller") 35 | } 36 | } 37 | return nil 38 | } 39 | 40 | // Check does some sanity check on one level of data or in-memory index. 41 | func (s *levelHandler) validate() error { 42 | if s.level == 0 { 43 | return nil 44 | } 45 | 46 | s.RLock() 47 | defer s.RUnlock() 48 | numTables := len(s.tables) 49 | for j := 1; j < numTables; j++ { 50 | if j >= len(s.tables) { 51 | return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) 52 | } 53 | 54 | if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { 55 | return errors.Errorf( 56 | "Inter: Biggest(j-1)[%d] \n%s\n vs Smallest(j)[%d]: \n%s\n: "+ 57 | "level=%d j=%d numTables=%d", 58 | s.tables[j-1].ID(), hex.Dump(s.tables[j-1].Biggest()), s.tables[j].ID(), 59 | hex.Dump(s.tables[j].Smallest()), s.level, j, numTables) 60 | } 61 | 62 | if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { 63 | return errors.Errorf( 64 | "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d", 65 | hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables) 66 | } 67 | } 68 | return nil 69 | } 70 | 71 | // func (s *KV) debugPrintMore() { s.lc.debugPrintMore() } 72 | 73 | // // debugPrintMore shows key ranges of each level. 74 | // func (s *levelsController) debugPrintMore() { 75 | // s.Lock() 76 | // defer s.Unlock() 77 | // for i := 0; i < s.kv.opt.MaxLevels; i++ { 78 | // s.levels[i].debugPrintMore() 79 | // } 80 | // } 81 | 82 | // func (s *levelHandler) debugPrintMore() { 83 | // s.RLock() 84 | // defer s.RUnlock() 85 | // s.elog.Printf("Level %d:", s.level) 86 | // for _, t := range s.tables { 87 | // y.Printf(" [%s, %s]", t.Smallest(), t.Biggest()) 88 | // } 89 | // y.Printf("\n") 90 | // } 91 | 92 | // reserveFileID reserves a unique file id. 93 | func (s *levelsController) reserveFileID() uint64 { 94 | id := atomic.AddUint64(&s.nextFileID, 1) 95 | return id - 1 96 | } 97 | 98 | func getIDMap(dir string) map[uint64]struct{} { 99 | fileInfos, err := ioutil.ReadDir(dir) 100 | y.Check(err) 101 | idMap := make(map[uint64]struct{}) 102 | for _, info := range fileInfos { 103 | if info.IsDir() { 104 | continue 105 | } 106 | fileID, ok := table.ParseFileID(info.Name()) 107 | if !ok { 108 | continue 109 | } 110 | idMap[fileID] = struct{}{} 111 | } 112 | return idMap 113 | } 114 | 115 | func init() { 116 | rand.Seed(time.Now().UnixNano()) 117 | } 118 | -------------------------------------------------------------------------------- /value_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package badger 18 | 19 | import ( 20 | "fmt" 21 | "io/ioutil" 22 | "testing" 23 | 24 | "github.com/stretchr/testify/require" 25 | ) 26 | 27 | func TestValueEntryChecksum(t *testing.T) { 28 | k := []byte("KEY") 29 | v := []byte(fmt.Sprintf("val%100d", 10)) 30 | t.Run("ok", func(t *testing.T) { 31 | dir, err := ioutil.TempDir("", "badger-test") 32 | require.NoError(t, err) 33 | defer removeDir(dir) 34 | 35 | opt := getTestOptions(dir) 36 | opt.VerifyValueChecksum = true 37 | db, err := Open(opt) 38 | require.NoError(t, err) 39 | 40 | txnSetSlow(t, db, k, v, 0) 41 | require.NoError(t, db.Close()) 42 | 43 | db, err = Open(opt) 44 | require.NoError(t, err) 45 | 46 | txn := db.NewReadTxn(1) 47 | entry, err := txn.Get(k) 48 | require.NoError(t, err) 49 | 50 | x, err := entry.ValueCopy(nil) 51 | require.NoError(t, err) 52 | require.Equal(t, v, x) 53 | 54 | require.NoError(t, db.Close()) 55 | }) 56 | } 57 | -------------------------------------------------------------------------------- /y/bloom.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The LevelDB-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package y 6 | 7 | import "math" 8 | 9 | // Filter is an encoded set of []byte keys. 10 | type Filter []byte 11 | 12 | func (f Filter) MayContainKey(k []byte) bool { 13 | return f.MayContain(Hash(k)) 14 | } 15 | 16 | // MayContain returns whether the filter may contain given key. False positives 17 | // are possible, where it returns true for keys not in the original set. 18 | func (f Filter) MayContain(h uint32) bool { 19 | if len(f) < 2 { 20 | return false 21 | } 22 | k := f[len(f)-1] 23 | if k > 30 { 24 | // This is reserved for potentially new encodings for short Bloom filters. 25 | // Consider it a match. 26 | return true 27 | } 28 | nBits := uint32(8 * (len(f) - 1)) 29 | delta := h>>17 | h<<15 30 | for j := uint8(0); j < k; j++ { 31 | bitPos := h % nBits 32 | if f[bitPos/8]&(1<<(bitPos%8)) == 0 { 33 | return false 34 | } 35 | h += delta 36 | } 37 | return true 38 | } 39 | 40 | // NewFilter returns a new Bloom filter that encodes a set of []byte keys with 41 | // the given number of bits per key, approximately. 42 | // 43 | // A good bitsPerKey value is 10, which yields a filter with ~ 1% false 44 | // positive rate. 45 | func NewFilter(keys []uint32, bitsPerKey int) Filter { 46 | return Filter(appendFilter(nil, keys, bitsPerKey)) 47 | } 48 | 49 | // BloomBitsPerKey returns the bits per key required by bloomfilter based on 50 | // the false positive rate. 51 | func BloomBitsPerKey(fp float64) int { 52 | if fp <= 0 { 53 | return 75 54 | } 55 | if fp >= 1 { 56 | return 1 57 | } 58 | return int(math.Ceil(-1.44 * math.Log2(fp))) 59 | } 60 | 61 | func appendFilter(buf []byte, keys []uint32, bitsPerKey int) []byte { 62 | if bitsPerKey < 0 { 63 | bitsPerKey = 0 64 | } 65 | // 0.69 is approximately ln(2). 66 | k := uint32(float64(bitsPerKey) * 0.69) 67 | if k < 1 { 68 | k = 1 69 | } 70 | if k > 30 { 71 | k = 30 72 | } 73 | 74 | nBits := len(keys) * int(bitsPerKey) 75 | // For small len(keys), we can see a very high false positive rate. Fix it 76 | // by enforcing a minimum bloom filter length. 77 | if nBits < 64 { 78 | nBits = 64 79 | } 80 | nBytes := (nBits + 7) / 8 81 | nBits = nBytes * 8 82 | buf, filter := extend(buf, nBytes+1) 83 | 84 | for _, h := range keys { 85 | delta := h>>17 | h<<15 86 | for j := uint32(0); j < k; j++ { 87 | bitPos := h % uint32(nBits) 88 | filter[bitPos/8] |= 1 << (bitPos % 8) 89 | h += delta 90 | } 91 | } 92 | filter[nBytes] = uint8(k) 93 | 94 | return buf 95 | } 96 | 97 | // extend appends n zero bytes to b. It returns the overall slice (of length 98 | // n+len(originalB)) and the slice of n trailing zeroes. 99 | func extend(b []byte, n int) (overall, trailer []byte) { 100 | want := n + len(b) 101 | if want <= cap(b) { 102 | overall = b[:want] 103 | trailer = overall[len(b):] 104 | for i := range trailer { 105 | trailer[i] = 0 106 | } 107 | } else { 108 | // Grow the capacity exponentially, with a 1KiB minimum. 109 | c := 1024 110 | for c < want { 111 | c += c / 4 112 | } 113 | overall = make([]byte, want, c) 114 | trailer = overall[len(b):] 115 | copy(overall, b) 116 | } 117 | return overall, trailer 118 | } 119 | 120 | // hash implements a hashing algorithm similar to the Murmur hash. 121 | func Hash(b []byte) uint32 { 122 | const ( 123 | seed = 0xbc9f1d34 124 | m = 0xc6a4a793 125 | ) 126 | h := uint32(seed) ^ uint32(len(b))*m 127 | for ; len(b) >= 4; b = b[4:] { 128 | h += uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 129 | h *= m 130 | h ^= h >> 16 131 | } 132 | switch len(b) { 133 | case 3: 134 | h += uint32(b[2]) << 16 135 | fallthrough 136 | case 2: 137 | h += uint32(b[1]) << 8 138 | fallthrough 139 | case 1: 140 | h += uint32(b[0]) 141 | h *= m 142 | h ^= h >> 24 143 | } 144 | return h 145 | } 146 | 147 | // FilterPolicy implements the db.FilterPolicy interface from the leveldb/db 148 | // package. 149 | // 150 | // The integer value is the approximate number of bits used per key. A good 151 | // value is 10, which yields a filter with ~ 1% false positive rate. 152 | // 153 | // It is valid to use the other API in this package (leveldb/bloom) without 154 | // using this type or the leveldb/db package. 155 | 156 | // type FilterPolicy int 157 | 158 | // // Name implements the db.FilterPolicy interface. 159 | // func (p FilterPolicy) Name() string { 160 | // // This string looks arbitrary, but its value is written to LevelDB .ldb 161 | // // files, and should be this exact value to be compatible with those files 162 | // // and with the C++ LevelDB code. 163 | // return "leveldb.BuiltinBloomFilter2" 164 | // } 165 | 166 | // // AppendFilter implements the db.FilterPolicy interface. 167 | // func (p FilterPolicy) AppendFilter(dst []byte, keys [][]byte) []byte { 168 | // return appendFilter(dst, keys, int(p)) 169 | // } 170 | 171 | // // MayContain implements the db.FilterPolicy interface. 172 | // func (p FilterPolicy) MayContain(filter, key []byte) bool { 173 | // return Filter(filter).MayContain(key) 174 | // } 175 | -------------------------------------------------------------------------------- /y/bloom_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The LevelDB-Go Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package y 6 | 7 | import ( 8 | "fmt" 9 | "math" 10 | "math/rand" 11 | "testing" 12 | ) 13 | 14 | func (f Filter) String() string { 15 | s := make([]byte, 8*len(f)) 16 | for i, x := range f { 17 | for j := 0; j < 8; j++ { 18 | if x&(1<> 0) 75 | b[1] = uint8(uint32(i) >> 8) 76 | b[2] = uint8(uint32(i) >> 16) 77 | b[3] = uint8(uint32(i) >> 24) 78 | return b 79 | } 80 | 81 | nMediocreFilters, nGoodFilters := 0, 0 82 | loop: 83 | for length := 1; length <= 10000; length = nextLength(length) { 84 | keys := make([][]byte, 0, length) 85 | for i := 0; i < length; i++ { 86 | keys = append(keys, le32(i)) 87 | } 88 | var hashes []uint32 89 | for _, key := range keys { 90 | hashes = append(hashes, Hash(key)) 91 | } 92 | f := NewFilter(hashes, 10) 93 | 94 | if len(f) > (length*10/8)+40 { 95 | t.Errorf("length=%d: len(f)=%d is too large", length, len(f)) 96 | continue 97 | } 98 | 99 | // All added keys must match. 100 | for _, key := range keys { 101 | if !f.MayContainKey(key) { 102 | t.Errorf("length=%d: did not contain key %q", length, key) 103 | continue loop 104 | } 105 | } 106 | 107 | // Check false positive rate. 108 | nFalsePositive := 0 109 | for i := 0; i < 10000; i++ { 110 | if f.MayContainKey(le32(1e9 + i)) { 111 | nFalsePositive++ 112 | } 113 | } 114 | if nFalsePositive > 0.02*10000 { 115 | t.Errorf("length=%d: %d false positives in 10000", length, nFalsePositive) 116 | continue 117 | } 118 | if nFalsePositive > 0.0125*10000 { 119 | nMediocreFilters++ 120 | } else { 121 | nGoodFilters++ 122 | } 123 | } 124 | 125 | if nMediocreFilters > nGoodFilters/5 { 126 | t.Errorf("%d mediocre filters but only %d good filters", nMediocreFilters, nGoodFilters) 127 | } 128 | } 129 | 130 | func BenchmarkHash(b *testing.B) { 131 | for _, sz := range []int64{1, 3, 4, 7, 8, 16, 32, 256, 1024, 4096} { 132 | d := make([]byte, sz) 133 | _, err := rand.Read(d) 134 | if err != nil { 135 | b.Errorf("failed to read %d random bytes %s", sz, err) 136 | } 137 | b.Run(fmt.Sprintf("hasher-%d", sz), func(b *testing.B) { 138 | b.SetBytes(sz) 139 | b.ResetTimer() 140 | for i := 0; i < b.N; i++ { 141 | _ = Hash(d) 142 | } 143 | }) 144 | } 145 | } 146 | 147 | func TestHash(t *testing.T) { 148 | // The magic want numbers come from running the C++ leveldb code in hash.cc. 149 | testCases := []struct { 150 | s string 151 | want uint32 152 | }{ 153 | {"", 0xbc9f1d34}, 154 | {"g", 0xd04a8bda}, 155 | {"go", 0x3e0b0745}, 156 | {"gop", 0x0c326610}, 157 | {"goph", 0x8c9d6390}, 158 | {"gophe", 0x9bfd4b0a}, 159 | {"gopher", 0xa78edc7c}, 160 | {"I had a dream it would end this way.", 0xe14a9db9}, 161 | } 162 | for _, tc := range testCases { 163 | if got := Hash([]byte(tc.s)); got != tc.want { 164 | t.Errorf("s=%q: got 0x%08x, want 0x%08x", tc.s, got, tc.want) 165 | } 166 | } 167 | } 168 | 169 | func TestBloomBitsPerKey(t *testing.T) { 170 | testCases := []struct { 171 | fp float64 172 | want int 173 | }{ 174 | // epsilon (minimum possible float) 175 | {math.Nextafter(1, 2) - 1, 75}, 176 | {0, 75}, 177 | {0.1, 5}, 178 | {0.01, 10}, 179 | {0.001, 15}, 180 | {0.999, 1}, 181 | {1, 1}, 182 | {math.MaxFloat64, 1}, 183 | } 184 | for _, tc := range testCases { 185 | if got := BloomBitsPerKey(tc.fp); got != tc.want { 186 | t.Errorf("fp=%f: got %d, want %d", tc.fp, got, tc.want) 187 | } 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /y/checksum.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "hash/crc32" 21 | 22 | "github.com/outcaste-io/badger/v4/pb" 23 | 24 | "github.com/cespare/xxhash/v2" 25 | "github.com/pkg/errors" 26 | ) 27 | 28 | // ErrChecksumMismatch is returned at checksum mismatch. 29 | var ErrChecksumMismatch = errors.New("checksum mismatch") 30 | 31 | // CalculateChecksum calculates checksum for data using ct checksum type. 32 | func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64 { 33 | switch ct { 34 | case pb.Checksum_CRC32C: 35 | return uint64(crc32.Checksum(data, CastagnoliCrcTable)) 36 | case pb.Checksum_XXHash64: 37 | return xxhash.Sum64(data) 38 | default: 39 | panic("checksum type not supported") 40 | } 41 | } 42 | 43 | // VerifyChecksum validates the checksum for the data against the given expected checksum. 44 | func VerifyChecksum(data []byte, expected *pb.Checksum) error { 45 | actual := CalculateChecksum(data, expected.Algo) 46 | if actual != expected.Sum { 47 | return Wrapf(ErrChecksumMismatch, "actual: %d, expected: %d", actual, expected.Sum) 48 | } 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /y/encrypt.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "bytes" 21 | "crypto/aes" 22 | "crypto/cipher" 23 | "crypto/rand" 24 | "io" 25 | ) 26 | 27 | // XORBlock encrypts the given data with AES and XOR's with IV. 28 | // Can be used for both encryption and decryption. IV is of 29 | // AES block size. 30 | func XORBlock(dst, src, key, iv []byte) error { 31 | block, err := aes.NewCipher(key) 32 | if err != nil { 33 | return err 34 | } 35 | stream := cipher.NewCTR(block, iv) 36 | stream.XORKeyStream(dst, src) 37 | return nil 38 | } 39 | 40 | func XORBlockAllocate(src, key, iv []byte) ([]byte, error) { 41 | block, err := aes.NewCipher(key) 42 | if err != nil { 43 | return nil, err 44 | } 45 | stream := cipher.NewCTR(block, iv) 46 | dst := make([]byte, len(src)) 47 | stream.XORKeyStream(dst, src) 48 | return dst, nil 49 | } 50 | 51 | func XORBlockStream(w io.Writer, src, key, iv []byte) error { 52 | block, err := aes.NewCipher(key) 53 | if err != nil { 54 | return err 55 | } 56 | stream := cipher.NewCTR(block, iv) 57 | sw := cipher.StreamWriter{S: stream, W: w} 58 | _, err = io.Copy(sw, bytes.NewReader(src)) 59 | return Wrapf(err, "XORBlockStream") 60 | } 61 | 62 | // GenerateIV generates IV. 63 | func GenerateIV() ([]byte, error) { 64 | iv := make([]byte, aes.BlockSize) 65 | _, err := rand.Read(iv) 66 | return iv, err 67 | } 68 | -------------------------------------------------------------------------------- /y/encrypt_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "crypto/aes" 21 | "crypto/rand" 22 | "testing" 23 | 24 | "github.com/stretchr/testify/require" 25 | ) 26 | 27 | func TestXORBlock(t *testing.T) { 28 | key := make([]byte, 32) 29 | rand.Read(key) 30 | 31 | var iv []byte 32 | { 33 | b, err := aes.NewCipher(key) 34 | require.NoError(t, err) 35 | iv = make([]byte, b.BlockSize()) 36 | rand.Read(iv) 37 | t.Logf("Using %d size IV\n", len(iv)) 38 | } 39 | 40 | src := make([]byte, 1024) 41 | rand.Read(src) 42 | 43 | dst := make([]byte, 1024) 44 | err := XORBlock(dst, src, key, iv) 45 | require.NoError(t, err) 46 | 47 | act := make([]byte, 1024) 48 | err = XORBlock(act, dst, key, iv) 49 | require.NoError(t, err) 50 | require.Equal(t, src, act) 51 | 52 | // Now check if we can use the same byte slice as src and dst. While this is useful to know that 53 | // we can use src and dst as the same slice, this isn't applicable to Badger because we're 54 | // reading data right off mmap. We should not modify that data, so we have to use a different 55 | // slice for dst anyway. 56 | cp := append([]byte{}, src...) 57 | err = XORBlock(cp, cp, key, iv) 58 | require.NoError(t, err) 59 | require.Equal(t, dst, cp) 60 | 61 | err = XORBlock(cp, cp, key, iv) 62 | require.NoError(t, err) 63 | require.Equal(t, src, cp) 64 | } 65 | -------------------------------------------------------------------------------- /y/error.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | // This file contains some functions for error handling. Note that we are moving 20 | // towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these 21 | // functions are useful for simple checks logged on one machine. 22 | // Some common use cases are: 23 | // (1) You receive an error from external lib, and would like to check/log fatal. 24 | // For this, use x.Check, x.Checkf. These will check for err != nil, which is 25 | // more common in Go. If you want to check for boolean being true, use 26 | // x.Assert, x.Assertf. 27 | // (2) You receive an error from external lib, and would like to pass on with some 28 | // stack trace information. In this case, use x.Wrap or x.Wrapf. 29 | // (3) You want to generate a new error with stack trace info. Use x.Errorf. 30 | 31 | import ( 32 | "fmt" 33 | "log" 34 | 35 | "github.com/pkg/errors" 36 | ) 37 | 38 | var debugMode = false 39 | 40 | // Check logs fatal if err != nil. 41 | func Check(err error) { 42 | if err != nil { 43 | log.Fatalf("%+v", Wrap(err, "")) 44 | } 45 | } 46 | 47 | // Check2 acts as convenience wrapper around Check, using the 2nd argument as error. 48 | func Check2(_ interface{}, err error) { 49 | Check(err) 50 | } 51 | 52 | // AssertTrue asserts that b is true. Otherwise, it would log fatal. 53 | func AssertTrue(b bool) { 54 | if !b { 55 | log.Fatalf("%+v", errors.Errorf("Assert failed")) 56 | } 57 | } 58 | 59 | // AssertTruef is AssertTrue with extra info. 60 | func AssertTruef(b bool, format string, args ...interface{}) { 61 | if !b { 62 | log.Fatalf("%+v", errors.Errorf(format, args...)) 63 | } 64 | } 65 | 66 | // Wrap wraps errors from external lib. 67 | func Wrap(err error, msg string) error { 68 | if !debugMode { 69 | if err == nil { 70 | return nil 71 | } 72 | return fmt.Errorf("%s err: %w", msg, err) 73 | } 74 | return errors.Wrap(err, msg) 75 | } 76 | 77 | // Wrapf is Wrap with extra info. 78 | func Wrapf(err error, format string, args ...interface{}) error { 79 | if !debugMode { 80 | if err == nil { 81 | return nil 82 | } 83 | return fmt.Errorf(format+" error: %w", append(args, err)...) 84 | } 85 | return errors.Wrapf(err, format, args...) 86 | } 87 | -------------------------------------------------------------------------------- /y/event_log.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import "golang.org/x/net/trace" 20 | 21 | var ( 22 | NoEventLog trace.EventLog = nilEventLog{} 23 | ) 24 | 25 | type nilEventLog struct{} 26 | 27 | func (nel nilEventLog) Printf(format string, a ...interface{}) {} 28 | 29 | func (nel nilEventLog) Errorf(format string, a ...interface{}) {} 30 | 31 | func (nel nilEventLog) Finish() {} 32 | -------------------------------------------------------------------------------- /y/file_dsync.go: -------------------------------------------------------------------------------- 1 | //go:build !dragonfly && !freebsd && !windows && !plan9 2 | // +build !dragonfly,!freebsd,!windows,!plan9 3 | 4 | /* 5 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 6 | * 7 | * Licensed under the Apache License, Version 2.0 (the "License"); 8 | * you may not use this file except in compliance with the License. 9 | * You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | 20 | package y 21 | 22 | import "golang.org/x/sys/unix" 23 | 24 | func init() { 25 | datasyncFileFlag = unix.O_DSYNC 26 | } 27 | -------------------------------------------------------------------------------- /y/file_nodsync.go: -------------------------------------------------------------------------------- 1 | //go:build dragonfly || freebsd || windows || plan9 2 | // +build dragonfly freebsd windows plan9 3 | 4 | /* 5 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 6 | * 7 | * Licensed under the Apache License, Version 2.0 (the "License"); 8 | * you may not use this file except in compliance with the License. 9 | * You may obtain a copy of the License at 10 | * 11 | * http://www.apache.org/licenses/LICENSE-2.0 12 | * 13 | * Unless required by applicable law or agreed to in writing, software 14 | * distributed under the License is distributed on an "AS IS" BASIS, 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | * See the License for the specific language governing permissions and 17 | * limitations under the License. 18 | */ 19 | 20 | package y 21 | 22 | import "syscall" 23 | 24 | func init() { 25 | datasyncFileFlag = syscall.O_SYNC 26 | } 27 | -------------------------------------------------------------------------------- /y/iterator.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "bytes" 21 | ) 22 | 23 | // ValueStruct represents the value info that can be associated with a key, but also the internal 24 | // Meta field. 25 | type ValueStruct struct { 26 | Meta byte 27 | UserMeta byte 28 | Value []byte 29 | 30 | Version uint64 // This field is not serialized. Only for internal usage. 31 | } 32 | 33 | func sizeVarint(x uint64) (n int) { 34 | for { 35 | n++ 36 | x >>= 7 37 | if x == 0 { 38 | break 39 | } 40 | } 41 | return n 42 | } 43 | 44 | // EncodedSize is the size of the ValueStruct when encoded 45 | func (v *ValueStruct) EncodedSize() uint32 { 46 | return uint32(len(v.Value) + 2) // meta, usermeta. 47 | } 48 | 49 | // Decode uses the length of the slice to infer the length of the Value field. 50 | func (v *ValueStruct) Decode(b []byte) { 51 | v.Meta = b[0] 52 | v.UserMeta = b[1] 53 | v.Value = b[2:] 54 | } 55 | 56 | // Encode expects a slice of length at least v.EncodedSize(). 57 | func (v *ValueStruct) Encode(b []byte) uint32 { 58 | b[0] = v.Meta 59 | b[1] = v.UserMeta 60 | n := copy(b[2:], v.Value) 61 | return uint32(2 + n) 62 | } 63 | 64 | // EncodeTo should be kept in sync with the Encode function above. The reason 65 | // this function exists is to avoid creating byte arrays per key-value pair in 66 | // table/builder.go. 67 | func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) { 68 | buf.WriteByte(v.Meta) 69 | buf.WriteByte(v.UserMeta) 70 | buf.Write(v.Value) 71 | } 72 | 73 | // Iterator is an interface for a basic iterator. 74 | type Iterator interface { 75 | Next() 76 | Rewind() 77 | Seek(key []byte) 78 | Key() []byte 79 | Value() ValueStruct 80 | Valid() bool 81 | 82 | // All iterators should be closed so that file garbage collection works. 83 | Close() error 84 | } 85 | -------------------------------------------------------------------------------- /y/metrics.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "expvar" 21 | ) 22 | 23 | var ( 24 | // lsmSize has size of the LSM in bytes 25 | lsmSize *expvar.Map 26 | // pendingWrites tracks the number of pending writes. 27 | pendingWrites *expvar.Map 28 | 29 | // These are cumulative 30 | 31 | // numReads has cumulative number of reads 32 | numReads *expvar.Int 33 | // numWrites has cumulative number of writes 34 | numWrites *expvar.Int 35 | // numBytesRead has cumulative number of bytes read 36 | numBytesRead *expvar.Int 37 | // numBytesWritten has cumulative number of bytes written 38 | numBytesWritten *expvar.Int 39 | // numLSMGets is number of LMS gets 40 | numLSMGets *expvar.Map 41 | // numLSMBloomHits is number of LMS bloom hits 42 | numLSMBloomHits *expvar.Map 43 | // numGets is number of gets 44 | numGets *expvar.Int 45 | // numPuts is number of puts 46 | numPuts *expvar.Int 47 | // numBlockedPuts is number of blocked puts 48 | numBlockedPuts *expvar.Int 49 | // numMemtableGets is number of memtable gets 50 | numMemtableGets *expvar.Int 51 | // numCompactionTables is the number of tables being compacted 52 | numCompactionTables *expvar.Int 53 | ) 54 | 55 | // These variables are global and have cumulative values for all kv stores. 56 | func init() { 57 | numReads = expvar.NewInt("badger_v3_disk_reads_total") 58 | numWrites = expvar.NewInt("badger_v3_disk_writes_total") 59 | numBytesRead = expvar.NewInt("badger_v3_read_bytes") 60 | numBytesWritten = expvar.NewInt("badger_v3_written_bytes") 61 | numLSMGets = expvar.NewMap("badger_v3_lsm_level_gets_total") 62 | numLSMBloomHits = expvar.NewMap("badger_v3_lsm_bloom_hits_total") 63 | numGets = expvar.NewInt("badger_v3_gets_total") 64 | numPuts = expvar.NewInt("badger_v3_puts_total") 65 | numBlockedPuts = expvar.NewInt("badger_v3_blocked_puts_total") 66 | numMemtableGets = expvar.NewInt("badger_v3_memtable_gets_total") 67 | lsmSize = expvar.NewMap("badger_v3_lsm_size_bytes") 68 | pendingWrites = expvar.NewMap("badger_v3_pending_writes_total") 69 | numCompactionTables = expvar.NewInt("badger_v3_compactions_current") 70 | } 71 | 72 | func NumReadsAdd(enabled bool, val int64) { 73 | addInt(enabled, numReads, val) 74 | } 75 | 76 | func NumWritesAdd(enabled bool, val int64) { 77 | addInt(enabled, numWrites, val) 78 | } 79 | 80 | func NumBytesReadAdd(enabled bool, val int64) { 81 | addInt(enabled, numBytesRead, val) 82 | } 83 | 84 | func NumBytesWrittenAdd(enabled bool, val int64) { 85 | addInt(enabled, numBytesWritten, val) 86 | } 87 | 88 | func NumGetsAdd(enabled bool, val int64) { 89 | addInt(enabled, numGets, val) 90 | } 91 | 92 | func NumPutsAdd(enabled bool, val int64) { 93 | addInt(enabled, numPuts, val) 94 | } 95 | 96 | func NumBlockedPutsAdd(enabled bool, val int64) { 97 | addInt(enabled, numBlockedPuts, val) 98 | } 99 | 100 | func NumMemtableGetsAdd(enabled bool, val int64) { 101 | addInt(enabled, numMemtableGets, val) 102 | } 103 | 104 | func NumCompactionTablesAdd(enabled bool, val int64) { 105 | addInt(enabled, numCompactionTables, val) 106 | } 107 | 108 | func LSMSizeSet(enabled bool, key string, val expvar.Var) { 109 | storeToMap(enabled, lsmSize, key, val) 110 | } 111 | 112 | func PendingWritesSet(enabled bool, key string, val expvar.Var) { 113 | storeToMap(enabled, pendingWrites, key, val) 114 | } 115 | 116 | func NumLSMBloomHitsAdd(enabled bool, key string, val int64) { 117 | addToMap(enabled, numLSMBloomHits, key, val) 118 | } 119 | 120 | func NumLSMGetsAdd(enabled bool, key string, val int64) { 121 | addToMap(enabled, numLSMGets, key, val) 122 | } 123 | 124 | func LSMSizeGet(enabled bool, key string) expvar.Var { 125 | return getFromMap(enabled, lsmSize, key) 126 | } 127 | 128 | func addInt(enabled bool, metric *expvar.Int, val int64) { 129 | if !enabled { 130 | return 131 | } 132 | 133 | metric.Add(val) 134 | } 135 | 136 | func addToMap(enabled bool, metric *expvar.Map, key string, val int64) { 137 | if !enabled { 138 | return 139 | } 140 | 141 | metric.Add(key, val) 142 | } 143 | 144 | func storeToMap(enabled bool, metric *expvar.Map, key string, val expvar.Var) { 145 | if !enabled { 146 | return 147 | } 148 | 149 | metric.Set(key, val) 150 | } 151 | 152 | func getFromMap(enabled bool, metric *expvar.Map, key string) expvar.Var { 153 | if !enabled { 154 | return nil 155 | } 156 | 157 | return metric.Get(key) 158 | } 159 | -------------------------------------------------------------------------------- /y/mutex_test.go: -------------------------------------------------------------------------------- 1 | package y 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | ) 8 | 9 | func BenchmarkRWMutex(b *testing.B) { 10 | for ng := 1; ng <= 256; ng <<= 2 { 11 | b.Run(fmt.Sprint(ng), func(b *testing.B) { 12 | var mu sync.RWMutex 13 | mu.Lock() 14 | 15 | var wg sync.WaitGroup 16 | wg.Add(ng) 17 | 18 | n := b.N 19 | quota := n / ng 20 | 21 | for g := ng; g > 0; g-- { 22 | if g == 1 { 23 | quota = n 24 | } 25 | 26 | go func(quota int) { 27 | for i := 0; i < quota; i++ { 28 | mu.RLock() 29 | mu.RUnlock() 30 | } 31 | wg.Done() 32 | }(quota) 33 | 34 | n -= quota 35 | } 36 | 37 | if n != 0 { 38 | b.Fatalf("Incorrect quota assignments: %v remaining", n) 39 | } 40 | 41 | b.StartTimer() 42 | mu.Unlock() 43 | wg.Wait() 44 | b.StopTimer() 45 | }) 46 | } 47 | } 48 | 49 | func BenchmarkMutex(b *testing.B) { 50 | for ng := 1; ng <= 256; ng <<= 2 { 51 | b.Run(fmt.Sprint(ng), func(b *testing.B) { 52 | var mu sync.Mutex 53 | 54 | var wg sync.WaitGroup 55 | wg.Add(ng) 56 | 57 | n := b.N 58 | quota := n / ng 59 | 60 | for g := ng; g > 0; g-- { 61 | if g == 1 { 62 | quota = n 63 | } 64 | 65 | go func(quota int) { 66 | for i := 0; i < quota; i++ { 67 | mu.Lock() 68 | mu.Unlock() 69 | } 70 | wg.Done() 71 | }(quota) 72 | 73 | n -= quota 74 | } 75 | 76 | if n != 0 { 77 | b.Fatalf("Incorrect quota assignments: %v remaining", n) 78 | } 79 | 80 | b.StartTimer() 81 | wg.Wait() 82 | b.StopTimer() 83 | }) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /y/zstd.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2019 Dgraph Labs, Inc. and Contributors 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package y 18 | 19 | import ( 20 | "sync" 21 | 22 | "github.com/klauspost/compress/zstd" 23 | ) 24 | 25 | var ( 26 | decoder *zstd.Decoder 27 | encoder *zstd.Encoder 28 | 29 | encOnce, decOnce sync.Once 30 | ) 31 | 32 | // ZSTDDecompress decompresses a block using ZSTD algorithm. 33 | func ZSTDDecompress(dst, src []byte) ([]byte, error) { 34 | decOnce.Do(func() { 35 | var err error 36 | decoder, err = zstd.NewReader(nil) 37 | Check(err) 38 | }) 39 | return decoder.DecodeAll(src, dst[:0]) 40 | } 41 | 42 | // ZSTDCompress compresses a block using ZSTD algorithm. 43 | func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error) { 44 | encOnce.Do(func() { 45 | var err error 46 | level := zstd.EncoderLevelFromZstd(compressionLevel) 47 | encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) 48 | Check(err) 49 | }) 50 | return encoder.EncodeAll(src, dst[:0]), nil 51 | } 52 | 53 | // ZSTDCompressBound returns the worst case size needed for a destination buffer. 54 | // Klauspost ZSTD library does not provide any API for Compression Bound. This 55 | // calculation is based on the DataDog ZSTD library. 56 | // See https://pkg.go.dev/github.com/DataDog/zstd#CompressBound 57 | func ZSTDCompressBound(srcSize int) int { 58 | lowLimit := 128 << 10 // 128 kB 59 | var margin int 60 | if srcSize < lowLimit { 61 | margin = (lowLimit - srcSize) >> 11 62 | } 63 | return srcSize + (srcSize >> 8) + margin 64 | } 65 | --------------------------------------------------------------------------------