├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.md │ └── feature-request.md ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── ci.yml │ ├── codeql-analysis.yml │ ├── coverage.yml │ └── safer-golangci-lint.yml ├── .gitignore ├── .golangci.yml ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── SECURITY.md ├── array.go ├── array_bench_test.go ├── array_benchmark_test.go ├── array_data_slab.go ├── array_data_slab_decode.go ├── array_data_slab_encode.go ├── array_dump.go ├── array_extradata.go ├── array_iterator.go ├── array_metadata_slab.go ├── array_metadata_slab_decode.go ├── array_metadata_slab_encode.go ├── array_serialization_verify.go ├── array_size_consts.go ├── array_slab.go ├── array_slab_stats.go ├── array_test.go ├── array_verify.go ├── array_wrappervalue_test.go ├── blake3_regression_test.go ├── buffer.go ├── cbor_tag_nums.go ├── check-headers.sh ├── circlehash64_regression_test.go ├── cmd └── smoke │ ├── array.go │ ├── main.go │ ├── map.go │ ├── storable.go │ ├── typeinfo.go │ └── utils.go ├── codecov.yml ├── compactmap_extradata.go ├── decode.go ├── doc.go ├── encode.go ├── errors.go ├── export_test.go ├── extradata.go ├── files ├── example.jpg └── logo.png ├── flag.go ├── flag_test.go ├── go.mod ├── go.sum ├── hash.go ├── inline_utils.go ├── logs ├── 2021-07-07 │ └── README.md └── 2021-07-08 │ ├── README.md │ └── atree_short_10x.tar.gz ├── map.go ├── map_data_slab.go ├── map_data_slab_decode.go ├── map_data_slab_encode.go ├── map_dump.go ├── map_element.go ├── map_element_decode.go ├── map_element_encode.go ├── map_elements.go ├── map_elements_decode.go ├── map_elements_encode.go ├── map_elements_hashkey.go ├── map_elements_nokey.go ├── map_extradata.go ├── map_iterator.go ├── map_metadata_slab.go ├── map_metadata_slab_decode.go ├── map_metadata_slab_encode.go ├── map_serialization_verify.go ├── map_size_consts.go ├── map_slab.go ├── map_slab_stats.go ├── map_test.go ├── map_verify.go ├── map_wrappervalue_test.go ├── mapcollision_bench_test.go ├── settings.go ├── slab.go ├── slab_id.go ├── slab_id_storable.go ├── slab_test.go ├── slice_utils.go ├── slice_utils_test.go ├── storable.go ├── storable_slab.go ├── storable_test.go ├── storage.go ├── storage_bench_test.go ├── storage_health_check.go ├── storage_test.go ├── test_utils ├── expected_value_utils.go ├── storable_utils.go ├── storage_utils.go ├── typeinfo_utils.go └── value_utils.go ├── typeinfo.go ├── utils_test.go ├── value.go └── value_id.go /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Reporting a Problem/Bug 3 | about: Reporting a Problem/Bug 4 | title: '' 5 | labels: bug 6 | assignees: fxamacker 7 | 8 | --- 9 | 10 | 17 | 18 | ### Problem 19 | 20 | 21 | 22 | ### Steps to Reproduce 23 | 24 | 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Requesting a Feature or Improvement 3 | about: "For feature requests. Please search for existing issues first. Also see CONTRIBUTING.md" 4 | title: '' 5 | labels: feedback, feature 6 | assignees: fxamacker 7 | 8 | --- 9 | 10 | 17 | 18 | ### Issue To Be Solved 19 | 20 | 28 | 29 | ### Suggested Solution 30 | 31 | 44 | 45 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | 9 | - package-ecosystem: "gomod" 10 | directory: "/" 11 | schedule: 12 | interval: "daily" 13 | ignore: 14 | - dependency-name: "github.com/fxamacker/cbor/v2" 15 | - dependency-name: "github.com/fxamacker/circlehash" 16 | 17 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Closes #??? 2 | 3 | ## Description 4 | 5 | 9 | 10 | ______ 11 | 12 | 13 | 14 | - [ ] Targeted PR against `main` branch 15 | - [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work 16 | - [ ] Code follows the [standards mentioned here](https://github.com/onflow/atree/blob/master/CONTRIBUTING.md#styleguides) 17 | - [ ] Updated relevant documentation 18 | - [ ] Re-reviewed `Files changed` in the Github PR explorer 19 | - [ ] Added appropriate labels 20 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # GitHub Actions - CI for Go to build & test. 2 | # Based on copy of https://github.com/fxamacker/cbor/workflows/ci.yml 3 | name: ci 4 | 5 | # Remove default permissions at top level and grant in jobs. 6 | permissions: {} 7 | 8 | on: 9 | workflow_dispatch: 10 | push: 11 | branches: 12 | - main 13 | - 'feature/**' 14 | - 'release-**' 15 | pull_request: 16 | branches: 17 | - main 18 | - 'feature/**' 19 | - 'release-**' 20 | 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} 23 | cancel-in-progress: true 24 | 25 | jobs: 26 | 27 | # Test on various OS with default Go version. 28 | tests: 29 | name: Test on ${{matrix.os}} 30 | runs-on: ${{ matrix.os }} 31 | 32 | permissions: 33 | contents: read 34 | 35 | strategy: 36 | matrix: 37 | os: [ubuntu-latest] 38 | go-version: ['1.23', '1.24'] 39 | 40 | steps: 41 | - name: Install Go 42 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 43 | with: 44 | go-version: ${{ matrix.go-version }} 45 | check-latest: true 46 | 47 | - name: Checkout code 48 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 49 | with: 50 | fetch-depth: 1 51 | 52 | - name: Get dependencies 53 | run: go get -v -t -d ./... 54 | 55 | - name: Build project 56 | run: go build ./... 57 | 58 | - name: Run tests 59 | run: | 60 | go version 61 | go test -timeout 180m -race -v ./... 62 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | # Remove default permissions at top level and grant in jobs. 4 | permissions: {} 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | - 'feature/**' 11 | - 'release-**' 12 | pull_request: 13 | branches: 14 | - main 15 | - 'feature/**' 16 | - 'release-**' 17 | schedule: 18 | - cron: '45 12 * * *' # Daily at 12:45 UTC / 7:45 AM Central 19 | 20 | concurrency: 21 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} 22 | cancel-in-progress: true 23 | 24 | jobs: 25 | analyze: 26 | name: Analyze 27 | runs-on: ubuntu-latest 28 | permissions: 29 | actions: read 30 | contents: read 31 | security-events: write 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | language: [ 'go' ] 37 | 38 | steps: 39 | - name: Checkout repository 40 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 41 | 42 | # Initializes the CodeQL tools for scanning. 43 | - name: Initialize CodeQL 44 | uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 45 | with: 46 | languages: ${{ matrix.language }} 47 | 48 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 49 | # If this step fails, then you should remove it and run the build manually (see below) 50 | - name: Autobuild 51 | uses: github/codeql-action/autobuild@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 52 | 53 | - name: Perform CodeQL Analysis 54 | uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 55 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | # coverage.yml Generate and upload Go code coverage report to Codecov. 2 | # https://github.com/onflow/atree/blob/main/.github/workflows/coverage.yml 3 | 4 | name: coverage 5 | 6 | # Remove permissions from top level and grant in jobs. 7 | permissions: {} 8 | 9 | on: 10 | push: 11 | branches: 12 | - main 13 | - 'feature/**' 14 | - 'release-**' 15 | pull_request: 16 | branches: 17 | - main 18 | - 'feature/**' 19 | - 'release-**' 20 | 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} 23 | cancel-in-progress: true 24 | 25 | jobs: 26 | build: 27 | runs-on: ubuntu-latest 28 | 29 | permissions: 30 | contents: read 31 | 32 | steps: 33 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 34 | with: 35 | fetch-depth: 2 36 | 37 | - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 38 | with: 39 | check-latest: true 40 | 41 | - name: Get dependencies 42 | run: go get -v -t -d ./... 43 | 44 | - name: Build project 45 | run: go build ./... 46 | 47 | - name: Generate coverage report 48 | run: go test -timeout 180m -race -coverprofile=coverage.txt -covermode=atomic 49 | 50 | - name: Upload coverage report to Codecov 51 | uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 52 | with: 53 | files: ./coverage.txt 54 | fail_ci_if_error: false # we can set this to true after "Could not find a repository" upload error is fixed 55 | verbose: true # optional (default = false) 56 | -------------------------------------------------------------------------------- /.github/workflows/safer-golangci-lint.yml: -------------------------------------------------------------------------------- 1 | # Copyright © 2021-2023 Montgomery Edwards⁴⁴⁸ (github.com/x448). 2 | # This file is licensed under MIT License. 3 | # 4 | # Safer GitHub Actions Workflow for golangci-lint. 5 | # https://github.com/x448/safer-golangci-lint 6 | # 7 | name: linters 8 | 9 | # Remove default permissions and grant only what is required in each job. 10 | permissions: {} 11 | 12 | on: 13 | workflow_dispatch: 14 | push: 15 | branches: 16 | - main 17 | - 'feature/**' 18 | - 'release-**' 19 | pull_request: 20 | branches: 21 | - main 22 | - 'feature/**' 23 | - 'release-**' 24 | 25 | env: 26 | GO_VERSION: '1.23' 27 | GOLINTERS_VERSION: 1.63.4 28 | GOLINTERS_ARCH: linux-amd64 29 | GOLINTERS_TGZ_DGST: 01abb14a4df47b5ca585eff3c34b105023cba92ec34ff17212dbb83855581690 30 | GOLINTERS_TIMEOUT: 15m 31 | OPENSSL_DGST_CMD: openssl dgst -sha256 -r 32 | CURL_CMD: curl --proto =https --tlsv1.2 --location --silent --show-error --fail 33 | 34 | concurrency: 35 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} 36 | cancel-in-progress: true 37 | 38 | jobs: 39 | main: 40 | name: Lint 41 | runs-on: ubuntu-latest 42 | permissions: 43 | contents: read 44 | steps: 45 | - name: Checkout source 46 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 47 | with: 48 | fetch-depth: 1 49 | 50 | - name: Setup Go 51 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 52 | with: 53 | go-version: ${{ env.GO_VERSION }} 54 | check-latest: true 55 | 56 | - name: Install golangci-lint 57 | run: | 58 | GOLINTERS_URL_PREFIX="https://github.com/golangci/golangci-lint/releases/download/v${GOLINTERS_VERSION}/" 59 | GOLINTERS_TGZ="golangci-lint-${GOLINTERS_VERSION}-${GOLINTERS_ARCH}.tar.gz" 60 | GOLINTERS_EXPECTED_DGST="${GOLINTERS_TGZ_DGST} *${GOLINTERS_TGZ}" 61 | DGST_CMD="${OPENSSL_DGST_CMD} ${GOLINTERS_TGZ}" 62 | 63 | cd $(mktemp -d /tmp/golinters.XXXXX) 64 | ${CURL_CMD} "${GOLINTERS_URL_PREFIX}${GOLINTERS_TGZ}" --output ${GOLINTERS_TGZ} 65 | 66 | GOLINTERS_GOT_DGST=$(${DGST_CMD}) 67 | if [ "${GOLINTERS_GOT_DGST}" != "${GOLINTERS_EXPECTED_DGST}" ] 68 | then 69 | echo "Digest of tarball is not equal to expected digest." 70 | echo "Expected digest: " "${GOLINTERS_EXPECTED_DGST}" 71 | echo "Got digest: " "${GOLINTERS_GOT_DGST}" 72 | exit 1 73 | fi 74 | 75 | tar --no-same-owner -xzf "${GOLINTERS_TGZ}" --strip-components 1 76 | install golangci-lint $(go env GOPATH)/bin 77 | shell: bash 78 | 79 | # Run required linters enabled in .golangci.yml (or default linters if yml doesn't exist) 80 | - name: Run golangci-lint 81 | run: $(go env GOPATH)/bin/golangci-lint run --timeout="${GOLINTERS_TIMEOUT}" 82 | shell: bash 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | # .golangci.yml for github.com/onflow/atree 2 | 3 | linters: 4 | disable-all: true 5 | enable: 6 | - asciicheck 7 | - bidichk 8 | - copyloopvar 9 | - errcheck 10 | # - forbidigo # TODO: enable forbidigo after adding non-default settings 11 | - gocritic 12 | - gofmt 13 | - goimports 14 | - gosimple 15 | - govet 16 | - ineffassign 17 | - misspell 18 | - nilerr 19 | - staticcheck 20 | - typecheck 21 | - unconvert 22 | - unused 23 | 24 | issues: 25 | max-issues-per-linter: 0 26 | max-same-issues: 0 27 | 28 | linters-settings: 29 | gocritic: 30 | disabled-checks: 31 | - ifElseChain # style 32 | - singleCaseSwitch # style 33 | - unslice # false positives 34 | - commentFormatting # does not detect commented out code 35 | - exitAfterDefer 36 | 37 | goimports: 38 | local-prefixes: github.com/onflow/atree 39 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @fxamacker @ramtinms @turbolent 2 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, religion, or sexual identity 11 | and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the 27 | overall community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or 32 | advances of any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email 36 | address, without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official e-mail address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement 61 | 62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 63 | reported to the community leaders responsible for enforcement at . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Atree 2 | 3 | The following is a set of guidelines for contributing to Atree. 4 | These are mostly guidelines, not rules. 5 | Use your best judgment, and feel free to propose changes to this document in a pull request. 6 | 7 | ## Table Of Contents 8 | 9 | [Getting Started](#project-overview) 10 | 11 | [How Can I Contribute?](#how-can-i-contribute) 12 | 13 | - [Reporting Bugs](#reporting-bugs) 14 | - [Suggesting Enhancements](#suggesting-enhancements) 15 | - [Your First Code Contribution](#your-first-code-contribution) 16 | - [Pull Requests](#pull-requests) 17 | 18 | [Styleguides](#styleguides) 19 | 20 | - [Git Commit Messages](#git-commit-messages) 21 | - [Go Styleguide](#go-styleguide) 22 | 23 | [Additional Notes](#additional-notes) 24 | 25 | ## How Can I Contribute? 26 | 27 | ### Reporting Bugs 28 | 29 | #### Before Submitting A Bug Report 30 | 31 | - **Search existing issues** to see if the problem has already been reported. 32 | If it has **and the issue is still open**, add a comment to the existing issue instead of opening a new one. 33 | 34 | #### How Do I Submit A (Good) Bug Report? 35 | 36 | Explain the problem and include additional details to help maintainers reproduce the problem: 37 | 38 | - **Use a clear and descriptive title** for the issue to identify the problem. 39 | - **Describe the exact steps which reproduce the problem** in as many details as possible. 40 | When listing steps, **don't just say what you did, but explain how you did it**. 41 | - **Provide specific examples to demonstrate the steps**. 42 | Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. 43 | If you're providing snippets in the issue, 44 | use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). 45 | - **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior. 46 | - **Explain which behavior you expected to see instead and why.** 47 | - **Include error messages and stack traces** which show the output / crash and clearly demonstrate the problem. 48 | 49 | Provide more context by answering these questions: 50 | 51 | - **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens 52 | and under which conditions it normally happens. 53 | 54 | Include details about your configuration and environment: 55 | 56 | - **What is the version of the Atree you're using**? 57 | - **What's the name and version of the Operating System you're using**? 58 | 59 | ### Suggesting Enhancements 60 | 61 | #### Before Submitting An Enhancement Suggestion 62 | 63 | - **Perform a cursory search** to see if the enhancement has already been suggested. 64 | If it has, add a comment to the existing issue instead of opening a new one. 65 | 66 | #### How Do I Submit A (Good) Enhancement Suggestion? 67 | 68 | Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). 69 | Create an issue and provide the following information: 70 | 71 | - **Use a clear and descriptive title** for the issue to identify the suggestion. 72 | - **Provide a step-by-step description of the suggested enhancement** in as many details as possible. 73 | - **Provide specific examples to demonstrate the steps**. 74 | Include copy/pasteable snippets which you use in those examples, 75 | as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines). 76 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why. 77 | - **Explain why this enhancement would be useful** to Atree users. 78 | 79 | ### Your First Code Contribution 80 | 81 | Unsure where to begin contributing to Atree? 82 | You can start by looking through these "Good first issue" and "Help wanted" issues: 83 | 84 | - [Good first issues](https://github.com/onflow/atree/labels/good%20first%20issue): 85 | issues which should only require a few lines of code, and a test or two. 86 | - [Help wanted issues](https://github.com/onflow/atree/labels/help%20wanted): 87 | issues which should be a bit more involved than "Good first issue" issues. 88 | 89 | Both issue lists are sorted by total number of comments. 90 | While not perfect, number of comments is a reasonable proxy for impact a given change will have. 91 | 92 | ### Pull Requests 93 | 94 | The process described here has several goals: 95 | 96 | - Maintain code quality 97 | - Fix problems that are important to users 98 | - Engage the community in working toward the best possible Atree UX 99 | - Enable a sustainable system for the Atree's maintainers to review contributions 100 | 101 | Please follow the [styleguides](#styleguides) to have your contribution considered by the maintainers. 102 | Reviewer(s) may ask you to complete additional design work, tests, 103 | or other changes before your pull request can be ultimately accepted. 104 | 105 | ## Styleguides 106 | 107 | Before contributing, make sure to examine the project to get familiar with the patterns and style already being used. 108 | 109 | ### Git Commit Messages 110 | 111 | - Use the present tense ("Add feature" not "Added feature") 112 | - Use the imperative mood ("Move cursor to..." not "Moves cursor to...") 113 | - Limit the first line to 72 characters or less 114 | - Reference issues and pull requests liberally after the first line 115 | 116 | ### Go Styleguide 117 | 118 | The majority of this project is written Go. 119 | 120 | We try to follow the coding guidelines from the Go community. 121 | 122 | - Code should be formatted using `gofmt` 123 | - Code should pass the linter: `make lint` 124 | - Code should follow the guidelines covered in 125 | [Effective Go](http://golang.org/doc/effective_go.html) 126 | and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) 127 | - Code should be commented 128 | - Code should pass all tests: `make test` 129 | 130 | ## Additional Notes 131 | 132 | Thank you for your interest in contributing to Atree! 133 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Atree 2 | Copyright 2021-2024 Flow Foundation 3 | 4 | This product includes software developed at the Flow Foundation (https://flow.com/flow-foundation). -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 |

3 | 4 |

5 | 6 |

7 | 8 | 9 | 10 |

11 | 12 | # Atree 13 | 14 | __Atree__ provides scalable arrays and scalable ordered maps. It segments, encodes, and stores data into relatively small, relatively fixed-size segments of bytes (aka payloads, registers, or slabs). This enables blockchains to only hash and transmit modified segments instead of the entire array, map, or large element. 15 | 16 | Atree is used by [Cadence](https://github.com/onflow/cadence) in the [Flow](https://github.com/onflow/flow-go) blockchain. 17 | 18 | Inspired by patterns used in modern variants of B+ Trees, Atree provides two types of data structures: Scalable Array Type (SAT) and Ordered Map Type (OMT). 19 | 20 | - __Scalable Array Type (SAT)__ is a heterogeneous variable-size array, storing any type of values into a smaller ordered list of values and provides efficient functionality to lookup, insert and remove elements anywhere in the array. 21 | 22 | - __Ordered Map Type (OMT)__ is an ordered map of key-value pairs; keys can be any hashable type and values can be any serializable value type. It supports heterogeneous key or value types (e.g. first key storing a boolean and second key storing a string). OMT keeps values in specific sorted order and operations are deterministic so the state of the segments after a sequence of operations are always unique. OMT uses [CircleHash64f with Deferred+Segmented BLAKE3 Digests](#omt-uses-circlehash64-with-deferredsegmented-blake3-digests). 23 | 24 | ## Under the Hood 25 | 26 | Atree uses new types of high-fanout B+ tree and some heuristics to balance the trade-off between latency of operations and the number of reads and writes. 27 | 28 | Each data structure holds the data as several relatively fixed-size segments of bytes (aka payloads, registers, or slabs) forming a tree and as the size of data structures grows or shrinks, it adjusts the number of segments used. After each operation, Atree tries to keep segment size within an acceptable size range by merging segments when needed (lower than min threshold) and splitting large-size slabs (above max threshold) or moving some values to neighbouring segments (rebalancing). For ordered maps and arrays with small number of elements, Atree is designed to have a very minimal overhead in compare to less scalable standard array and ordermaps (using a single data segment at start). 29 | 30 | In order to minimize the number of bytes touched after each operation, Atree uses a deterministic greedy approach ("Optimistic Encasing Algorithm") to postpone merge, split and rebalancing the tree as much as possible. In other words, it tolerates the tree to get unbalanced with the cost of keeping some space for future insertions or growing a segment a bit larger than what it should be which would minimize the number of segments (and bytes) that are touched at each operation. 31 | 32 | ## Example 33 | 34 |

35 | 36 |

37 | 38 | **1** - An ordered map metadata slab keeps the very first key hash of any children to navigate the path. It uses a combination of linear scan and binary search to find the next slab. 39 | 40 | **2** - Similarly the array metadata slab keeps the count of each child and uses that to navigate the path. 41 | 42 | **3** - Nested structures (e.g. map holding an array under a key) are handled by storing nested map or array as separate objects and using a one-way reference from parent to the nested object. 43 | 44 | **4** - Extremely large objects are handled by storing them as an external data slab and using a pointer to the external data slab. This way we maintain the size requirements of slabs and preserve the performance of atree. In the future work external data slabs can be broken into a sequence of smaller size slabs. 45 | 46 | **5** - Atree Ordered Map uses a collision handling design that is performant and resilient against hash-flooding attacks. It uses multi-level hashing that combines a fast 64-bit non-cryptographic hash with a 256-bit cryptographic hash. For speed, the cryptographic hash is only computed if there's a collision. For smaller storage size, the digests are divided into 64-bit segments with only the minimum required being stored. Collisions that cannot be resolved by hashes will eventually use linear lookup, but that is very unlikely as it would require collisions on two different hashes (CircleHash64f + BLAKE3) from the same input. 47 | 48 | **6** - Forwarding data slab pointers are used to make sequential iterations more efficient. 49 | 50 | ## OMT uses CircleHash64f with Deferred+Segmented BLAKE3 Digests 51 | 52 | Inputs hashed by OMT are typically short inputs (usually smaller than 128 bytes). OMT uses state-of-the-art hash algorithms and a novel collision-handling design to balance speed, security, and storage space. 53 | 54 | | | CircleHash64f 🏅
(seeded) | SipHash
(seeded) | BLAKE3 🏅
(crypto) | SHA3-256
(crypto) | 55 | |:-------------|:---:|:---:|:---:|:---:| 56 | | 4 bytes | 1.34 GB/s | 0.361 GB/s | 0.027 GB/s | 0.00491 GB/s | 57 | | 8 bytes | 2.70 GB/s | 0.642 GB/s | 0.106 GB/s | 0.00984 GB/s | 58 | | 16 bytes | 5.48 GB/s | 1.03 GB/s | 0.217 GB/s | 0.0197 GB/s | 59 | | 32 bytes | 8.01 GB/s | 1.46 GB/s | 0.462 GB/s | 0.0399 GB/s | 60 | | 64 bytes | 10.3 GB/s | 1.83 GB/s | 0.911 GB/s | 0.0812 GB/s | 61 | | 128 bytes | 12.8 GB/s | 2.09 GB/s | 1.03 GB/s | 0.172 GB/s | 62 | | 192 bytes | 14.2 GB/s | 2.17 GB/s | 1.04 GB/s | 0.158 GB/s | 63 | | 256 bytes | 15.0 GB/s | 2.22 GB/s | 1.06 GB/s | 0.219 GB/s | 64 | 65 | - Using Go 1.17.7, darwin_amd64, i7-1068NG7 CPU. 66 | - Results are from `go test -bench=. -count=20` and `benchstat`. 67 | - Some hash libraries have slowdowns at some larger sizes. 68 | 69 | ## API Reference 70 | 71 | Atree's API is [documented](https://pkg.go.dev/github.com/onflow/atree#section-documentation) with godoc at pkg.go.dev and will be updated when new versions of Atree are tagged. 72 | 73 | ## Contributing 74 | 75 | If you would like to contribute to Atree, have a look at the [contributing guide](https://github.com/onflow/atree/blob/main/CONTRIBUTING.md). 76 | 77 | Additionally, all non-error code paths must be covered by tests. And pull requests should not lower the code coverage percent. 78 | 79 | ## License 80 | 81 | The Atree library is licensed under the terms of the Apache license. See [LICENSE](LICENSE) for more information. 82 | 83 | Logo is based on the artwork of Raisul Hadi licensed under Creative Commons. 84 | 85 | Copyright © 2021-2024 Flow Foundation 86 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Responsible Disclosure Policy 2 | 3 | Flow was built from the ground up with security in mind. Our code, infrastructure, and development methodology helps us keep our users safe. 4 | 5 | We really appreciate the community's help. Responsible disclosure of vulnerabilities helps to maintain the security and privacy of everyone. 6 | 7 | If you care about making a difference, please follow the guidelines below. 8 | 9 | # **Guidelines For Responsible Disclosure** 10 | 11 | We ask that all researchers adhere to these guidelines [here](https://flow.com/flow-responsible-disclosure) 12 | -------------------------------------------------------------------------------- /array_data_slab_encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "encoding/binary" 23 | "fmt" 24 | ) 25 | 26 | // encodeAsInlined encodes inlined array data slab. Encoding is 27 | // version 1 with CBOR tag having tag number CBORTagInlinedArray, 28 | // and tag contant as 3-element array: 29 | // 30 | // +------------------+----------------+----------+ 31 | // | extra data index | value ID index | elements | 32 | // +------------------+----------------+----------+ 33 | func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { 34 | if a.extraData == nil { 35 | return NewEncodingError( 36 | fmt.Errorf("failed to encode non-root array data slab as inlined")) 37 | } 38 | 39 | if !a.inlined { 40 | return NewEncodingError( 41 | fmt.Errorf("failed to encode standalone array data slab as inlined")) 42 | } 43 | 44 | extraDataIndex, err := enc.inlinedExtraData().addArrayExtraData(a.extraData) 45 | if err != nil { 46 | // err is already categorized by InlinedExtraData.addArrayExtraData(). 47 | return err 48 | } 49 | 50 | if extraDataIndex > maxInlinedExtraDataIndex { 51 | return NewEncodingError( 52 | fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) 53 | } 54 | 55 | // Encode tag number and array head of 3 elements 56 | err = enc.CBOR.EncodeRawBytes([]byte{ 57 | // tag number 58 | 0xd8, CBORTagInlinedArray, 59 | // array head of 3 elements 60 | 0x83, 61 | }) 62 | if err != nil { 63 | return NewEncodingError(err) 64 | } 65 | 66 | // element 0: extra data index 67 | // NOTE: encoded extra data index is fixed sized CBOR uint 68 | err = enc.CBOR.EncodeRawBytes([]byte{ 69 | 0x18, 70 | byte(extraDataIndex), 71 | }) 72 | if err != nil { 73 | return NewEncodingError(err) 74 | } 75 | 76 | // element 1: slab index 77 | err = enc.CBOR.EncodeBytes(a.header.slabID.index[:]) 78 | if err != nil { 79 | return NewEncodingError(err) 80 | } 81 | 82 | // element 2: array elements 83 | err = a.encodeElements(enc) 84 | if err != nil { 85 | // err is already categorized by ArrayDataSlab.encodeElements(). 86 | return err 87 | } 88 | 89 | err = enc.CBOR.Flush() 90 | if err != nil { 91 | return NewEncodingError(err) 92 | } 93 | 94 | return nil 95 | } 96 | 97 | // Encode encodes this array data slab to the given encoder. 98 | // 99 | // DataSlab Header: 100 | // 101 | // +-------------------------------+----------------------+---------------------------------+-----------------------------+ 102 | // | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | 103 | // +-------------------------------+----------------------+---------------------------------+-----------------------------+ 104 | // 105 | // Content: 106 | // 107 | // CBOR encoded array of elements 108 | // 109 | // See ArrayExtraData.Encode() for extra data section format. 110 | // See InlinedExtraData.Encode() for inlined extra data section format. 111 | func (a *ArrayDataSlab) Encode(enc *Encoder) error { 112 | 113 | if a.inlined { 114 | return a.encodeAsInlined(enc) 115 | } 116 | 117 | // Encoding is done in two steps: 118 | // 119 | // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. 120 | // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. 121 | 122 | // Get a buffer from a pool to encode elements. 123 | elementBuf := getBuffer() 124 | defer putBuffer(elementBuf) 125 | 126 | elementEnc := NewEncoder(elementBuf, enc.encMode) 127 | 128 | err := a.encodeElements(elementEnc) 129 | if err != nil { 130 | // err is already categorized by Array.encodeElements(). 131 | return err 132 | } 133 | 134 | err = elementEnc.CBOR.Flush() 135 | if err != nil { 136 | return NewEncodingError(err) 137 | } 138 | 139 | const version = 1 140 | 141 | h, err := newArraySlabHead(version, slabArrayData) 142 | if err != nil { 143 | return NewEncodingError(err) 144 | } 145 | 146 | if a.HasPointer() { 147 | h.setHasPointers() 148 | } 149 | 150 | if a.next != SlabIDUndefined { 151 | h.setHasNextSlabID() 152 | } 153 | 154 | if a.extraData != nil { 155 | h.setRoot() 156 | } 157 | 158 | if elementEnc.hasInlinedExtraData() { 159 | h.setHasInlinedSlabs() 160 | } 161 | 162 | // Encode head (version + flag) 163 | _, err = enc.Write(h[:]) 164 | if err != nil { 165 | return NewEncodingError(err) 166 | } 167 | 168 | // Encode extra data 169 | if a.extraData != nil { 170 | // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. 171 | err = a.extraData.Encode(enc, defaultEncodeTypeInfo) 172 | if err != nil { 173 | // err is already categorized by ArrayExtraData.Encode(). 174 | return err 175 | } 176 | } 177 | 178 | // Encode inlined extra data 179 | if elementEnc.hasInlinedExtraData() { 180 | err = elementEnc.inlinedExtraData().Encode(enc) 181 | if err != nil { 182 | // err is already categorized by inlinedExtraData.Encode(). 183 | return err 184 | } 185 | } 186 | 187 | // Encode next slab ID 188 | if a.next != SlabIDUndefined { 189 | n, err := a.next.ToRawBytes(enc.Scratch[:]) 190 | if err != nil { 191 | // Don't need to wrap because err is already categorized by SlabID.ToRawBytes(). 192 | return err 193 | } 194 | 195 | _, err = enc.Write(enc.Scratch[:n]) 196 | if err != nil { 197 | return NewEncodingError(err) 198 | } 199 | } 200 | 201 | // Encode elements by copying raw bytes from previous buffer 202 | err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) 203 | if err != nil { 204 | return NewEncodingError(err) 205 | } 206 | 207 | err = enc.CBOR.Flush() 208 | if err != nil { 209 | return NewEncodingError(err) 210 | } 211 | 212 | return nil 213 | } 214 | 215 | func (a *ArrayDataSlab) encodeElements(enc *Encoder) error { 216 | // Encode CBOR array size manually for fix-sized encoding 217 | 218 | enc.Scratch[0] = 0x80 | 25 219 | 220 | countOffset := 1 221 | const countSize = 2 222 | binary.BigEndian.PutUint16( 223 | enc.Scratch[countOffset:], 224 | uint16(len(a.elements)), 225 | ) 226 | 227 | // Write scratch content to encoder 228 | totalSize := countOffset + countSize 229 | err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) 230 | if err != nil { 231 | return NewEncodingError(err) 232 | } 233 | 234 | // Encode data slab content (array of elements) 235 | for _, e := range a.elements { 236 | err = e.Encode(enc) 237 | if err != nil { 238 | // Wrap err as external error (if needed) because err is returned by Storable interface. 239 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") 240 | } 241 | } 242 | 243 | err = enc.CBOR.Flush() 244 | if err != nil { 245 | return NewEncodingError(err) 246 | } 247 | 248 | return nil 249 | } 250 | -------------------------------------------------------------------------------- /array_dump.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "errors" 23 | "fmt" 24 | "strings" 25 | ) 26 | 27 | // PrintArray prints array slab data to stdout. 28 | func PrintArray(a *Array) { 29 | dumps, err := DumpArraySlabs(a) 30 | if err != nil { 31 | fmt.Println(err) 32 | return 33 | } 34 | fmt.Println(strings.Join(dumps, "\n")) 35 | } 36 | 37 | func DumpArraySlabs(a *Array) ([]string, error) { 38 | var dumps []string 39 | 40 | nextLevelIDs := []SlabID{a.SlabID()} 41 | 42 | var overflowIDs []SlabID 43 | 44 | level := 0 45 | for len(nextLevelIDs) > 0 { 46 | 47 | ids := nextLevelIDs 48 | 49 | nextLevelIDs = []SlabID(nil) 50 | 51 | for _, id := range ids { 52 | 53 | slab, err := getArraySlab(a.Storage, id) 54 | if err != nil { 55 | // Don't need to wrap error as external error because err is already categorized by getArraySlab(). 56 | return nil, err 57 | } 58 | 59 | switch slab := slab.(type) { 60 | case *ArrayDataSlab: 61 | dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) 62 | 63 | overflowIDs = getSlabIDFromStorable(slab, overflowIDs) 64 | 65 | case *ArrayMetaDataSlab: 66 | dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) 67 | 68 | for _, storable := range slab.ChildStorables() { 69 | id, ok := storable.(SlabIDStorable) 70 | if !ok { 71 | return nil, NewFatalError(errors.New("metadata slab's child storables are not of type SlabIDStorable")) 72 | } 73 | nextLevelIDs = append(nextLevelIDs, SlabID(id)) 74 | } 75 | } 76 | } 77 | 78 | level++ 79 | } 80 | 81 | for _, id := range overflowIDs { 82 | slab, found, err := a.Storage.Retrieve(id) 83 | if err != nil { 84 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 85 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 86 | } 87 | if !found { 88 | return nil, NewSlabNotFoundErrorf(id, "slab not found during array slab dump") 89 | } 90 | dumps = append(dumps, slab.String()) 91 | } 92 | 93 | return dumps, nil 94 | } 95 | -------------------------------------------------------------------------------- /array_extradata.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/fxamacker/cbor/v2" 25 | ) 26 | 27 | type ArrayExtraData struct { 28 | TypeInfo TypeInfo // array type 29 | } 30 | 31 | var _ ExtraData = &ArrayExtraData{} 32 | 33 | const arrayExtraDataLength = 1 34 | 35 | func newArrayExtraDataFromData( 36 | data []byte, 37 | decMode cbor.DecMode, 38 | decodeTypeInfo TypeInfoDecoder, 39 | ) ( 40 | *ArrayExtraData, 41 | []byte, 42 | error, 43 | ) { 44 | dec := decMode.NewByteStreamDecoder(data) 45 | 46 | extraData, err := newArrayExtraData(dec, decodeTypeInfo) 47 | if err != nil { 48 | return nil, data, err 49 | } 50 | 51 | return extraData, data[dec.NumBytesDecoded():], nil 52 | } 53 | 54 | // newArrayExtraData decodes CBOR array to extra data: 55 | // 56 | // cborArray{type info} 57 | func newArrayExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (*ArrayExtraData, error) { 58 | length, err := dec.DecodeArrayHead() 59 | if err != nil { 60 | return nil, NewDecodingError(err) 61 | } 62 | 63 | if length != arrayExtraDataLength { 64 | return nil, NewDecodingError( 65 | fmt.Errorf( 66 | "array extra data has invalid length %d, want %d", 67 | length, 68 | arrayExtraDataLength, 69 | )) 70 | } 71 | 72 | typeInfo, err := decodeTypeInfo(dec) 73 | if err != nil { 74 | // Wrap err as external error (if needed) because err is returned by TypeInfoDecoder callback. 75 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode type info") 76 | } 77 | 78 | return &ArrayExtraData{TypeInfo: typeInfo}, nil 79 | } 80 | 81 | // Encode encodes extra data as CBOR array: 82 | // 83 | // [type info] 84 | func (a *ArrayExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { 85 | err := enc.CBOR.EncodeArrayHead(arrayExtraDataLength) 86 | if err != nil { 87 | return NewEncodingError(err) 88 | } 89 | 90 | err = encodeTypeInfo(enc, a.TypeInfo) 91 | if err != nil { 92 | // Wrap err as external error (if needed) because err is returned by TypeInfo interface. 93 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") 94 | } 95 | 96 | err = enc.CBOR.Flush() 97 | if err != nil { 98 | return NewEncodingError(err) 99 | } 100 | 101 | return nil 102 | } 103 | 104 | func (a *ArrayExtraData) isExtraData() bool { 105 | return true 106 | } 107 | 108 | func (a *ArrayExtraData) Type() TypeInfo { 109 | return a.TypeInfo 110 | } 111 | -------------------------------------------------------------------------------- /array_metadata_slab_encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "encoding/binary" 23 | ) 24 | 25 | // Encode encodes this array meta-data slab to the given encoder. 26 | // 27 | // Root MetaDataSlab Header: 28 | // 29 | // +------------------------------+------------+--------------------------------+------------------------------+ 30 | // | slab version + flag (2 byte) | extra data | child shared address (8 bytes) | child header count (2 bytes) | 31 | // +------------------------------+------------+--------------------------------+------------------------------+ 32 | // 33 | // Non-root MetaDataSlab Header (12 bytes): 34 | // 35 | // +------------------------------+--------------------------------+------------------------------+ 36 | // | slab version + flag (2 byte) | child shared address (8 bytes) | child header count (2 bytes) | 37 | // +------------------------------+--------------------------------+------------------------------+ 38 | // 39 | // Content (n * 14 bytes): 40 | // 41 | // [[slab index (8 bytes), count (4 bytes), size (2 bytes)], ...] 42 | // 43 | // See ArrayExtraData.Encode() for extra data section format. 44 | func (a *ArrayMetaDataSlab) Encode(enc *Encoder) error { 45 | 46 | const version = 1 47 | 48 | h, err := newArraySlabHead(version, slabArrayMeta) 49 | if err != nil { 50 | return NewEncodingError(err) 51 | } 52 | 53 | if a.extraData != nil { 54 | h.setRoot() 55 | } 56 | 57 | // Write head (version + flag) 58 | _, err = enc.Write(h[:]) 59 | if err != nil { 60 | return NewEncodingError(err) 61 | } 62 | 63 | // Encode extra data if present 64 | if a.extraData != nil { 65 | // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. 66 | err = a.extraData.Encode(enc, defaultEncodeTypeInfo) 67 | if err != nil { 68 | // Don't need to wrap because err is already categorized by ArrayExtraData.Encode(). 69 | return err 70 | } 71 | } 72 | 73 | // Encode shared address to scratch 74 | copy(enc.Scratch[:], a.header.slabID.address[:]) 75 | 76 | // Encode child header count to scratch 77 | const childHeaderCountOffset = SlabAddressLength 78 | binary.BigEndian.PutUint16( 79 | enc.Scratch[childHeaderCountOffset:], 80 | uint16(len(a.childrenHeaders)), 81 | ) 82 | 83 | // Write scratch content to encoder 84 | const totalSize = childHeaderCountOffset + 2 85 | _, err = enc.Write(enc.Scratch[:totalSize]) 86 | if err != nil { 87 | return NewEncodingError(err) 88 | } 89 | 90 | // Encode children headers 91 | for _, h := range a.childrenHeaders { 92 | // Encode slab index to scratch 93 | copy(enc.Scratch[:], h.slabID.index[:]) 94 | 95 | // Encode count 96 | const countOffset = SlabIndexLength 97 | binary.BigEndian.PutUint32(enc.Scratch[countOffset:], h.count) 98 | 99 | // Encode size 100 | const sizeOffset = countOffset + 4 101 | binary.BigEndian.PutUint16(enc.Scratch[sizeOffset:], uint16(h.size)) 102 | 103 | const totalSize = sizeOffset + 2 104 | _, err = enc.Write(enc.Scratch[:totalSize]) 105 | if err != nil { 106 | return NewEncodingError(err) 107 | } 108 | } 109 | 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /array_size_consts.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | // NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, 22 | // such as merge and split, so size constants here are related to encoding size. 23 | const ( 24 | 25 | // version and flag size: version (1 byte) + flag (1 byte) 26 | versionAndFlagSize = 2 27 | 28 | // slab header size: slab index (8 bytes) + count (4 bytes) + size (2 bytes) 29 | // Support up to 4,294,967,295 elements in each array. 30 | // Support up to 65,535 bytes for slab size limit (default limit is 1536 max bytes). 31 | arraySlabHeaderSize = SlabIndexLength + 4 + 2 32 | 33 | // meta data slab prefix size: version (1 byte) + flag (1 byte) + address (8 bytes) + child header count (2 bytes) 34 | // Support up to 65,535 children per metadata slab. 35 | arrayMetaDataSlabPrefixSize = versionAndFlagSize + SlabAddressLength + 2 36 | 37 | // Encoded element head in array data slab (fixed-size for easy computation). 38 | arrayDataSlabElementHeadSize = 3 39 | 40 | // non-root data slab prefix size: version (1 byte) + flag (1 byte) + next id (16 bytes) + element array head (3 bytes) 41 | // Support up to 65,535 elements in the array per data slab. 42 | arrayDataSlabPrefixSize = versionAndFlagSize + SlabIDLength + arrayDataSlabElementHeadSize 43 | 44 | // root data slab prefix size: version (1 byte) + flag (1 byte) + element array head (3 bytes) 45 | // Support up to 65,535 elements in the array per data slab. 46 | arrayRootDataSlabPrefixSize = versionAndFlagSize + arrayDataSlabElementHeadSize 47 | 48 | // inlined tag number size: CBOR tag number CBORTagInlinedArray or CBORTagInlinedMap 49 | inlinedTagNumSize = 2 50 | 51 | // inlined CBOR array head size: CBOR array head of 3 elements (extra data index, value id, elements) 52 | inlinedCBORArrayHeadSize = 1 53 | 54 | // inlined extra data index size: CBOR positive number encoded in 2 bytes [0, 255] (fixed-size for easy computation) 55 | inlinedExtraDataIndexSize = 2 56 | 57 | // inlined CBOR byte string head size for value ID: CBOR byte string head for byte string of 8 bytes 58 | inlinedCBORValueIDHeadSize = 1 59 | 60 | // inlined value id size: encoded in 8 bytes 61 | inlinedValueIDSize = 8 62 | 63 | // inlined array data slab prefix size: 64 | // tag number (2 bytes) + 65 | // 3-element array head (1 byte) + 66 | // extra data index (2 bytes) [0, 255] + 67 | // value ID index head (1 byte) + 68 | // value ID index (8 bytes) + 69 | // element array head (3 bytes) 70 | inlinedArrayDataSlabPrefixSize = inlinedTagNumSize + 71 | inlinedCBORArrayHeadSize + 72 | inlinedExtraDataIndexSize + 73 | inlinedCBORValueIDHeadSize + 74 | inlinedValueIDSize + 75 | arrayDataSlabElementHeadSize 76 | 77 | maxInlinedExtraDataIndex = 255 78 | ) 79 | -------------------------------------------------------------------------------- /array_slab.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | type ArraySlabHeader struct { 22 | slabID SlabID // id is used to retrieve slab from storage 23 | size uint32 // size is used to split and merge; leaf: size of all element; internal: size of all headers 24 | count uint32 // count is used to lookup element; leaf: number of elements; internal: number of elements in all its headers 25 | } 26 | 27 | type ArraySlab interface { 28 | Slab 29 | 30 | Get(storage SlabStorage, index uint64) (Storable, error) 31 | Set(storage SlabStorage, address Address, index uint64, value Value) (Storable, error) 32 | Insert(storage SlabStorage, address Address, index uint64, value Value) error 33 | Remove(storage SlabStorage, index uint64) (Storable, error) 34 | 35 | IsData() bool 36 | 37 | IsFull() bool 38 | IsUnderflow() (uint32, bool) 39 | CanLendToLeft(size uint32) bool 40 | CanLendToRight(size uint32) bool 41 | 42 | SetSlabID(SlabID) 43 | 44 | Header() ArraySlabHeader 45 | 46 | ExtraData() *ArrayExtraData 47 | RemoveExtraData() *ArrayExtraData 48 | SetExtraData(*ArrayExtraData) 49 | 50 | PopIterate(SlabStorage, ArrayPopIterationFunc) error 51 | 52 | Inlined() bool 53 | Inlinable(maxInlineSize uint64) bool 54 | Inline(SlabStorage) error 55 | Uninline(SlabStorage) error 56 | } 57 | 58 | func getArraySlab(storage SlabStorage, id SlabID) (ArraySlab, error) { 59 | slab, found, err := storage.Retrieve(id) 60 | if err != nil { 61 | // err can be an external error because storage is an interface. 62 | return nil, wrapErrorAsExternalErrorIfNeeded(err) 63 | } 64 | if !found { 65 | return nil, NewSlabNotFoundErrorf(id, "array slab not found") 66 | } 67 | arraySlab, ok := slab.(ArraySlab) 68 | if !ok { 69 | return nil, NewSlabDataErrorf("slab %s isn't ArraySlab", id) 70 | } 71 | return arraySlab, nil 72 | } 73 | 74 | func firstArrayDataSlab(storage SlabStorage, slab ArraySlab) (*ArrayDataSlab, error) { 75 | switch slab := slab.(type) { 76 | case *ArrayDataSlab: 77 | return slab, nil 78 | 79 | case *ArrayMetaDataSlab: 80 | firstChildID := slab.childrenHeaders[0].slabID 81 | firstChild, err := getArraySlab(storage, firstChildID) 82 | if err != nil { 83 | // Don't need to wrap error as external error because err is already categorized by getArraySlab(). 84 | return nil, err 85 | } 86 | // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). 87 | return firstArrayDataSlab(storage, firstChild) 88 | 89 | default: 90 | return nil, NewUnreachableError() 91 | } 92 | } 93 | 94 | // getArrayDataSlabWithIndex returns data slab containing element at specified index 95 | func getArrayDataSlabWithIndex(storage SlabStorage, slab ArraySlab, index uint64) (*ArrayDataSlab, uint64, error) { 96 | if slab.IsData() { 97 | dataSlab := slab.(*ArrayDataSlab) 98 | if index >= uint64(len(dataSlab.elements)) { 99 | return nil, 0, NewIndexOutOfBoundsError(index, 0, uint64(len(dataSlab.elements))) 100 | } 101 | return dataSlab, index, nil 102 | } 103 | 104 | metaSlab := slab.(*ArrayMetaDataSlab) 105 | _, adjustedIndex, childID, err := metaSlab.childSlabIndexInfo(index) 106 | if err != nil { 107 | // Don't need to wrap error as external error because err is already categorized by ArrayMetadataSlab.childSlabIndexInfo(). 108 | return nil, 0, err 109 | } 110 | 111 | child, err := getArraySlab(storage, childID) 112 | if err != nil { 113 | // Don't need to wrap error as external error because err is already categorized by getArraySlab(). 114 | return nil, 0, err 115 | } 116 | 117 | // Don't need to wrap error as external error because err is already categorized by getArrayDataSlabWithIndex(). 118 | return getArrayDataSlabWithIndex(storage, child, adjustedIndex) 119 | } 120 | -------------------------------------------------------------------------------- /array_slab_stats.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | type arrayStats struct { 24 | Levels uint64 25 | ElementCount uint64 26 | MetaDataSlabCount uint64 27 | DataSlabCount uint64 28 | StorableSlabCount uint64 29 | } 30 | 31 | func (s *arrayStats) SlabCount() uint64 { 32 | return s.DataSlabCount + s.MetaDataSlabCount + s.StorableSlabCount 33 | } 34 | 35 | // GetArrayStats returns stats about array slabs. 36 | func GetArrayStats(a *Array) (arrayStats, error) { 37 | level := uint64(0) 38 | metaDataSlabCount := uint64(0) 39 | dataSlabCount := uint64(0) 40 | storableSlabCount := uint64(0) 41 | 42 | nextLevelIDs := []SlabID{a.SlabID()} 43 | 44 | for len(nextLevelIDs) > 0 { 45 | 46 | ids := nextLevelIDs 47 | 48 | nextLevelIDs = []SlabID(nil) 49 | 50 | for _, id := range ids { 51 | 52 | slab, err := getArraySlab(a.Storage, id) 53 | if err != nil { 54 | // Don't need to wrap error as external error because err is already categorized by getArraySlab(). 55 | return arrayStats{}, err 56 | } 57 | 58 | switch slab.(type) { 59 | case *ArrayDataSlab: 60 | dataSlabCount++ 61 | 62 | ids := getSlabIDFromStorable(slab, nil) 63 | storableSlabCount += uint64(len(ids)) 64 | 65 | case *ArrayMetaDataSlab: 66 | metaDataSlabCount++ 67 | 68 | for _, storable := range slab.ChildStorables() { 69 | id, ok := storable.(SlabIDStorable) 70 | if !ok { 71 | return arrayStats{}, NewFatalError(fmt.Errorf("metadata slab's child storables are not of type SlabIDStorable")) 72 | } 73 | nextLevelIDs = append(nextLevelIDs, SlabID(id)) 74 | } 75 | } 76 | } 77 | 78 | level++ 79 | 80 | } 81 | 82 | return arrayStats{ 83 | Levels: level, 84 | ElementCount: a.Count(), 85 | MetaDataSlabCount: metaDataSlabCount, 86 | DataSlabCount: dataSlabCount, 87 | StorableSlabCount: storableSlabCount, 88 | }, nil 89 | } 90 | -------------------------------------------------------------------------------- /buffer.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "bytes" 23 | "sync" 24 | ) 25 | 26 | var bufferPool = sync.Pool{ 27 | New: func() any { 28 | e := new(bytes.Buffer) 29 | e.Grow(int(maxThreshold)) 30 | return e 31 | }, 32 | } 33 | 34 | func getBuffer() *bytes.Buffer { 35 | return bufferPool.Get().(*bytes.Buffer) 36 | } 37 | 38 | func putBuffer(e *bytes.Buffer) { 39 | e.Reset() 40 | bufferPool.Put(e) 41 | } 42 | -------------------------------------------------------------------------------- /cbor_tag_nums.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | const ( 24 | // WARNING: tag numbers defined in here in github.com/onflow/atree 25 | // MUST not overlap with tag numbers used by Cadence internal value encoding. 26 | // As of Aug. 14, 2024, Cadence uses tag numbers from 128 to 230. 27 | // See runtime/interpreter/encode.go at github.com/onflow/cadence. 28 | 29 | // Atree reserves CBOR tag numbers [240, 255] for internal use. 30 | // Applications must use non-overlapping CBOR tag numbers to encode 31 | // elements managed by atree containers. 32 | minInternalCBORTagNumber = 240 33 | maxInternalCBORTagNumber = 255 34 | 35 | // Reserved CBOR tag numbers for atree internal use. 36 | 37 | // Replace _ when new tag number is needed (use higher tag numbers first). 38 | // Atree will use higher tag numbers first because Cadence will use lower tag numbers first. 39 | // This approach allows more flexibility in case we need to revisit ranges used by Atree and Cadence. 40 | 41 | _ = 240 42 | _ = 241 43 | _ = 242 44 | _ = 243 45 | _ = 244 46 | _ = 245 47 | 48 | CBORTagTypeInfoRef = 246 49 | 50 | CBORTagInlinedArrayExtraData = 247 51 | CBORTagInlinedMapExtraData = 248 52 | CBORTagInlinedCompactMapExtraData = 249 53 | 54 | CBORTagInlinedArray = 250 55 | CBORTagInlinedMap = 251 56 | CBORTagInlinedCompactMap = 252 57 | 58 | CBORTagInlineCollisionGroup = 253 59 | CBORTagExternalCollisionGroup = 254 60 | 61 | CBORTagSlabID = 255 62 | ) 63 | 64 | // IsCBORTagNumberRangeAvailable returns true if the specified range is not reserved for internal use by atree. 65 | // Applications must only use available (unreserved) CBOR tag numbers to encode elements in atree managed containers. 66 | func IsCBORTagNumberRangeAvailable(minTagNum, maxTagNum uint64) (bool, error) { 67 | if minTagNum > maxTagNum { 68 | return false, NewUserError(fmt.Errorf("min CBOR tag number %d must be <= max CBOR tag number %d", minTagNum, maxTagNum)) 69 | } 70 | 71 | return maxTagNum < minInternalCBORTagNumber || minTagNum > maxInternalCBORTagNumber, nil 72 | } 73 | 74 | // ReservedCBORTagNumberRange returns minTagNum and maxTagNum of the range of CBOR tag numbers 75 | // reserved for internal use by atree. 76 | func ReservedCBORTagNumberRange() (minTagNum, maxTagNum uint64) { 77 | return minInternalCBORTagNumber, maxInternalCBORTagNumber 78 | } 79 | -------------------------------------------------------------------------------- /check-headers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | files=$(find . -name \*.go -type f -print0 | xargs -0 grep -L -E '(Licensed under the Apache License)|(Code generated (from|by))') 4 | if [ -n "$files" ]; then 5 | echo "Missing license header in:" 6 | echo "$files" 7 | exit 1 8 | fi 9 | -------------------------------------------------------------------------------- /cmd/smoke/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package main 20 | 21 | import ( 22 | "flag" 23 | "fmt" 24 | "io" 25 | "os" 26 | "os/signal" 27 | "strconv" 28 | "strings" 29 | "syscall" 30 | "time" 31 | 32 | "github.com/onflow/atree" 33 | "github.com/onflow/atree/test_utils" 34 | 35 | "github.com/fxamacker/cbor/v2" 36 | ) 37 | 38 | const maxStatusLength = 128 39 | 40 | type Status interface { 41 | Write() 42 | } 43 | 44 | func writeStatus(status string) { 45 | // Clear old status 46 | s := fmt.Sprintf("\r%s\r", strings.Repeat(" ", maxStatusLength)) 47 | _, _ = io.WriteString(os.Stdout, s) 48 | 49 | // Write new status 50 | _, _ = io.WriteString(os.Stdout, status) 51 | } 52 | 53 | func updateStatus(sigc <-chan os.Signal, status Status) { 54 | 55 | status.Write() 56 | 57 | ticker := time.NewTicker(3 * time.Second) 58 | 59 | for { 60 | select { 61 | case <-ticker.C: 62 | status.Write() 63 | 64 | case <-sigc: 65 | status.Write() 66 | fmt.Fprintf(os.Stdout, "\n") 67 | 68 | ticker.Stop() 69 | os.Exit(1) 70 | } 71 | } 72 | } 73 | 74 | var cborEncMode = func() cbor.EncMode { 75 | encMode, err := cbor.EncOptions{}.EncMode() 76 | if err != nil { 77 | panic(fmt.Sprintf("Failed to create CBOR encoding mode: %s", err)) 78 | } 79 | return encMode 80 | }() 81 | 82 | var cborDecMode = func() cbor.DecMode { 83 | decMode, err := cbor.DecOptions{}.DecMode() 84 | if err != nil { 85 | panic(fmt.Sprintf("Failed to create CBOR decoding mode: %s\n", err)) 86 | } 87 | return decMode 88 | }() 89 | 90 | var ( 91 | flagType string 92 | flagCheckSlabEnabled bool 93 | flagMaxLength uint64 94 | flagSeedHex string 95 | flagMinHeapAllocMiB, flagMaxHeapAllocMiB uint64 96 | flagMinOpsForStorageHealthCheck uint64 97 | flagAlwaysUseWrapperValue bool 98 | flagIterationCount uint64 99 | ) 100 | 101 | func main() { 102 | 103 | flag.StringVar(&flagType, "type", "array", "array or map") 104 | flag.BoolVar(&flagCheckSlabEnabled, "slabcheck", false, "in memory and serialized slab check") 105 | flag.Uint64Var(&flagMinOpsForStorageHealthCheck, "minOpsForStorageHealthCheck", 100, "number of operations for storage health check") 106 | flag.Uint64Var(&flagMaxLength, "maxlen", 10_000, "max number of elements") 107 | flag.StringVar(&flagSeedHex, "seed", "", "seed for prng in hex (default is Unix time)") 108 | flag.Uint64Var(&flagMinHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") 109 | flag.Uint64Var(&flagMaxHeapAllocMiB, "maxheap", 2000, "max HeapAlloc in MiB to trigger extra removal of elements") 110 | flag.BoolVar(&flagAlwaysUseWrapperValue, "wrappervalue", false, "always use wrapper value") 111 | flag.Uint64Var(&flagIterationCount, "count", 0, "(testing) number of ops") 112 | 113 | flag.Parse() 114 | 115 | var seed int64 116 | if len(flagSeedHex) != 0 { 117 | var err error 118 | seed, err = strconv.ParseInt(strings.ReplaceAll(flagSeedHex, "0x", ""), 16, 64) 119 | if err != nil { 120 | fmt.Fprintf(os.Stderr, "failed to parse seed flag (hex string) %s: %s", flagSeedHex, err) 121 | return 122 | } 123 | } 124 | 125 | r = newRand(seed) 126 | 127 | flagType = strings.ToLower(flagType) 128 | 129 | if flagType != "array" && flagType != "map" { 130 | fmt.Fprintf(os.Stderr, "Please specify type as either \"array\" or \"map\"") 131 | return 132 | } 133 | 134 | sigc := make(chan os.Signal, 1) 135 | signal.Notify(sigc, os.Interrupt, syscall.SIGTERM) 136 | 137 | baseStorage := test_utils.NewInMemBaseStorage() 138 | 139 | storage := atree.NewPersistentSlabStorage( 140 | baseStorage, 141 | cborEncMode, 142 | cborDecMode, 143 | test_utils.DecodeStorable, 144 | decodeTypeInfo, 145 | ) 146 | 147 | clearStorage := func() { 148 | // Clear storage deltas and cache 149 | storage.DropDeltas() 150 | storage.DropCache() 151 | 152 | // Clear base storage reports 153 | baseStorage.ResetReporter() 154 | } 155 | 156 | address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} 157 | 158 | switch flagType { 159 | 160 | case "array": 161 | var msg string 162 | if flagCheckSlabEnabled { 163 | msg = fmt.Sprintf("Starting array stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) 164 | } else { 165 | msg = fmt.Sprintf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) 166 | } 167 | fmt.Println(msg) 168 | 169 | status := newArrayStatus() 170 | 171 | go updateStatus(sigc, status) 172 | 173 | testArray(storage, address, status, clearStorage) 174 | 175 | case "map": 176 | var msg string 177 | if flagCheckSlabEnabled { 178 | msg = fmt.Sprintf("Starting map stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) 179 | } else { 180 | msg = fmt.Sprintf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) 181 | } 182 | fmt.Println(msg) 183 | 184 | status := newMapStatus() 185 | 186 | go updateStatus(sigc, status) 187 | 188 | testMap(storage, address, status, clearStorage) 189 | } 190 | 191 | } 192 | -------------------------------------------------------------------------------- /cmd/smoke/storable.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package main 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/onflow/atree" 25 | ) 26 | 27 | const ( 28 | reservedMinTagNum = 161 29 | reservedMinTagNumForContainerType = 230 30 | reservedMaxTagNum = 239 31 | ) 32 | 33 | const ( 34 | // CBOR tag numbers used to encode container types. 35 | // Replace _ when new tag number is needed (use lower tag numbers first). 36 | 37 | arrayTypeTagNum = reservedMinTagNumForContainerType + iota 38 | compositeTypeTagNum 39 | mapTypeTagNum 40 | _ 41 | _ 42 | _ 43 | _ 44 | _ 45 | _ 46 | _ 47 | ) 48 | 49 | func init() { 50 | // Check if the CBOR tag number range is reserved for internal use by atree. 51 | // Smoke tests must only use available (unreserved by atree) CBOR tag numbers 52 | // to encode elements in atree managed containers. 53 | 54 | // As of Aug 15, 2024: 55 | // - Atree reserves CBOR tag numbers [240, 255] for atree internal use. 56 | // - Smoke tests reserve CBOR tag numbers [161, 239] to encode elements. 57 | 58 | tagNumOK, err := atree.IsCBORTagNumberRangeAvailable(reservedMinTagNum, reservedMaxTagNum) 59 | if err != nil { 60 | panic(err) 61 | } 62 | 63 | if !tagNumOK { 64 | atreeMinTagNum, atreeMaxTagNum := atree.ReservedCBORTagNumberRange() 65 | panic(fmt.Errorf( 66 | "smoke test tag numbers [%d, %d] overlaps with atree internal tag numbers [%d, %d]", 67 | reservedMinTagNum, 68 | reservedMaxTagNum, 69 | atreeMinTagNum, 70 | atreeMaxTagNum)) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "cmd/**/*" 3 | - "logs/" 4 | -------------------------------------------------------------------------------- /compactmap_extradata.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "encoding/binary" 23 | "fmt" 24 | "sort" 25 | "strings" 26 | 27 | "github.com/fxamacker/cbor/v2" 28 | ) 29 | 30 | // compactMapExtraData is used for inlining compact values. 31 | // compactMapExtraData includes hkeys and keys with map extra data 32 | // because hkeys and keys are the same in order and content for 33 | // all values with the same compact type and map seed. 34 | type compactMapExtraData struct { 35 | mapExtraData *MapExtraData 36 | hkeys []Digest // hkeys is ordered by mapExtraData.Seed 37 | keys []ComparableStorable // keys is ordered by mapExtraData.Seed 38 | } 39 | 40 | var _ ExtraData = &compactMapExtraData{} 41 | 42 | const compactMapExtraDataLength = 3 43 | 44 | func newCompactMapExtraData( 45 | dec *cbor.StreamDecoder, 46 | decodeTypeInfo TypeInfoDecoder, 47 | decodeStorable StorableDecoder, 48 | ) (*compactMapExtraData, error) { 49 | 50 | length, err := dec.DecodeArrayHead() 51 | if err != nil { 52 | return nil, NewDecodingError(err) 53 | } 54 | 55 | if length != compactMapExtraDataLength { 56 | return nil, NewDecodingError( 57 | fmt.Errorf( 58 | "compact extra data has invalid length %d, want %d", 59 | length, 60 | arrayExtraDataLength, 61 | )) 62 | } 63 | 64 | // element 0: map extra data 65 | mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) 66 | if err != nil { 67 | // err is already categorized by newMapExtraData(). 68 | return nil, err 69 | } 70 | 71 | // element 1: digests 72 | digestBytes, err := dec.DecodeBytes() 73 | if err != nil { 74 | return nil, NewDecodingError(err) 75 | } 76 | 77 | if len(digestBytes)%digestSize != 0 { 78 | return nil, NewDecodingError( 79 | fmt.Errorf( 80 | "decoding digests failed: number of bytes %d is not multiple of %d", 81 | len(digestBytes), 82 | digestSize)) 83 | } 84 | 85 | digestCount := len(digestBytes) / digestSize 86 | 87 | // element 2: keys 88 | keyCount, err := dec.DecodeArrayHead() 89 | if err != nil { 90 | return nil, NewDecodingError(err) 91 | } 92 | 93 | if keyCount != uint64(digestCount) { 94 | return nil, NewDecodingError( 95 | fmt.Errorf( 96 | "decoding compact map key failed: number of keys %d is different from number of digests %d", 97 | keyCount, 98 | digestCount)) 99 | } 100 | 101 | hkeys := make([]Digest, digestCount) 102 | for i := range hkeys { 103 | hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) 104 | } 105 | 106 | keys := make([]ComparableStorable, keyCount) 107 | for i := range keys { 108 | // Decode compact map key 109 | key, err := decodeStorable(dec, SlabIDUndefined, nil) 110 | if err != nil { 111 | // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. 112 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") 113 | } 114 | compactMapKey, ok := key.(ComparableStorable) 115 | if !ok { 116 | return nil, NewDecodingError(fmt.Errorf("failed to decode key's storable: got %T, expect ComparableStorable", key)) 117 | } 118 | keys[i] = compactMapKey 119 | } 120 | 121 | return &compactMapExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil 122 | } 123 | 124 | func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { 125 | err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) 126 | if err != nil { 127 | return NewEncodingError(err) 128 | } 129 | 130 | // element 0: map extra data 131 | err = c.mapExtraData.Encode(enc, encodeTypeInfo) 132 | if err != nil { 133 | // err is already categorized by MapExtraData.Encode(). 134 | return err 135 | } 136 | 137 | // element 1: digests 138 | totalDigestSize := len(c.hkeys) * digestSize 139 | 140 | var digests []byte 141 | if totalDigestSize <= len(enc.Scratch) { 142 | digests = enc.Scratch[:totalDigestSize] 143 | } else { 144 | digests = make([]byte, totalDigestSize) 145 | } 146 | 147 | for i := range c.hkeys { 148 | binary.BigEndian.PutUint64(digests[i*digestSize:], uint64(c.hkeys[i])) 149 | } 150 | 151 | err = enc.CBOR.EncodeBytes(digests) 152 | if err != nil { 153 | return NewEncodingError(err) 154 | } 155 | 156 | // element 2: field names 157 | err = enc.CBOR.EncodeArrayHead(uint64(len(c.keys))) 158 | if err != nil { 159 | return NewEncodingError(err) 160 | } 161 | 162 | for _, key := range c.keys { 163 | err = key.Encode(enc) 164 | if err != nil { 165 | // Wrap err as external error (if needed) because err is returned by ComparableStorable.Encode(). 166 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode key's storable") 167 | } 168 | } 169 | 170 | err = enc.CBOR.Flush() 171 | if err != nil { 172 | return NewEncodingError(err) 173 | } 174 | 175 | return nil 176 | } 177 | 178 | func (c *compactMapExtraData) isExtraData() bool { 179 | return true 180 | } 181 | 182 | func (c *compactMapExtraData) Type() TypeInfo { 183 | return c.mapExtraData.TypeInfo 184 | } 185 | 186 | // makeCompactMapTypeID returns id of concatenated t.ID() with sorted names with "," as separator. 187 | func makeCompactMapTypeID(encodedTypeInfo string, names []ComparableStorable) string { 188 | const separator = "," 189 | 190 | if len(names) == 0 { 191 | return encodedTypeInfo 192 | } 193 | 194 | if len(names) == 1 { 195 | return encodedTypeInfo + separator + names[0].ID() 196 | } 197 | 198 | sorter := newFieldNameSorter(names) 199 | 200 | sort.Sort(sorter) 201 | 202 | return encodedTypeInfo + separator + sorter.join(separator) 203 | } 204 | 205 | // fieldNameSorter sorts names by index (not in place sort). 206 | type fieldNameSorter struct { 207 | names []ComparableStorable 208 | index []int 209 | } 210 | 211 | func newFieldNameSorter(names []ComparableStorable) *fieldNameSorter { 212 | index := make([]int, len(names)) 213 | for i := range index { 214 | index[i] = i 215 | } 216 | return &fieldNameSorter{ 217 | names: names, 218 | index: index, 219 | } 220 | } 221 | 222 | func (fn *fieldNameSorter) Len() int { 223 | return len(fn.names) 224 | } 225 | 226 | func (fn *fieldNameSorter) Less(i, j int) bool { 227 | i = fn.index[i] 228 | j = fn.index[j] 229 | return fn.names[i].Less(fn.names[j]) 230 | } 231 | 232 | func (fn *fieldNameSorter) Swap(i, j int) { 233 | fn.index[i], fn.index[j] = fn.index[j], fn.index[i] 234 | } 235 | 236 | func (fn *fieldNameSorter) join(sep string) string { 237 | var sb strings.Builder 238 | for i, index := range fn.index { 239 | if i > 0 { 240 | sb.WriteString(sep) 241 | } 242 | sb.WriteString(fn.names[index].ID()) 243 | } 244 | return sb.String() 245 | } 246 | -------------------------------------------------------------------------------- /decode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "github.com/fxamacker/cbor/v2" 22 | 23 | type StorableDecoder func( 24 | decoder *cbor.StreamDecoder, 25 | storableSlabID SlabID, 26 | inlinedExtraData []ExtraData, 27 | ) ( 28 | Storable, 29 | error, 30 | ) 31 | 32 | func DecodeSlab( 33 | id SlabID, 34 | data []byte, 35 | decMode cbor.DecMode, 36 | decodeStorable StorableDecoder, 37 | decodeTypeInfo TypeInfoDecoder, 38 | ) ( 39 | Slab, 40 | error, 41 | ) { 42 | if len(data) < versionAndFlagSize { 43 | return nil, NewDecodingErrorf("data is too short") 44 | } 45 | 46 | h, err := newHeadFromData(data[:versionAndFlagSize]) 47 | if err != nil { 48 | return nil, NewDecodingError(err) 49 | } 50 | 51 | switch h.getSlabType() { 52 | 53 | case slabArray: 54 | 55 | arrayDataType := h.getSlabArrayType() 56 | 57 | switch arrayDataType { 58 | case slabArrayData: 59 | return newArrayDataSlabFromData(id, data, decMode, decodeStorable, decodeTypeInfo) 60 | case slabArrayMeta: 61 | return newArrayMetaDataSlabFromData(id, data, decMode, decodeTypeInfo) 62 | default: 63 | return nil, NewDecodingErrorf("data has invalid head 0x%x", h[:]) 64 | } 65 | 66 | case slabMap: 67 | 68 | mapDataType := h.getSlabMapType() 69 | 70 | switch mapDataType { 71 | case slabMapData: 72 | return newMapDataSlabFromData(id, data, decMode, decodeStorable, decodeTypeInfo) 73 | case slabMapMeta: 74 | return newMapMetaDataSlabFromData(id, data, decMode, decodeTypeInfo) 75 | case slabMapCollisionGroup: 76 | return newMapDataSlabFromData(id, data, decMode, decodeStorable, decodeTypeInfo) 77 | default: 78 | return nil, NewDecodingErrorf("data has invalid head 0x%x", h[:]) 79 | } 80 | 81 | case slabStorable: 82 | cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) 83 | storable, err := decodeStorable(cborDec, id, nil) 84 | if err != nil { 85 | // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. 86 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode slab storable") 87 | } 88 | return &StorableSlab{ 89 | slabID: id, 90 | storable: storable, 91 | }, nil 92 | 93 | default: 94 | return nil, NewDecodingErrorf("data has invalid head 0x%x", h[:]) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Atree - Scalable Arrays and Ordered Maps 2 | // 3 | // Copyright Flow Foundation 4 | // 5 | // Licensed under the Apache License, Version 2.0 (the "License"); 6 | // you may not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, software 12 | // distributed under the License is distributed on an "AS IS" BASIS, 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | // See the License for the specific language governing permissions and 15 | // limitations under the License. 16 | 17 | /* 18 | Package atree provides scalable arrays and scalable ordered maps. It is 19 | used by Cadence in the Flow blockchain. 20 | 21 | Atree is maintained at https://github.com/onflow/atree 22 | */ 23 | package atree 24 | -------------------------------------------------------------------------------- /encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "bytes" 23 | "io" 24 | "math" 25 | 26 | "github.com/fxamacker/cbor/v2" 27 | ) 28 | 29 | // Encoder writes atree slabs to io.Writer. 30 | type Encoder struct { 31 | io.Writer 32 | CBOR *cbor.StreamEncoder 33 | Scratch [64]byte 34 | encMode cbor.EncMode 35 | _inlinedExtraData *InlinedExtraData 36 | } 37 | 38 | func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { 39 | streamEncoder := encMode.NewStreamEncoder(w) 40 | return &Encoder{ 41 | Writer: w, 42 | CBOR: streamEncoder, 43 | encMode: encMode, 44 | } 45 | } 46 | 47 | func (enc *Encoder) inlinedExtraData() *InlinedExtraData { 48 | if enc._inlinedExtraData == nil { 49 | enc._inlinedExtraData = newInlinedExtraData() 50 | } 51 | return enc._inlinedExtraData 52 | } 53 | 54 | func (enc *Encoder) hasInlinedExtraData() bool { 55 | if enc._inlinedExtraData == nil { 56 | return false 57 | } 58 | return !enc._inlinedExtraData.empty() 59 | } 60 | 61 | func EncodeSlab(slab Slab, encMode cbor.EncMode) ([]byte, error) { 62 | var buf bytes.Buffer 63 | enc := NewEncoder(&buf, encMode) 64 | 65 | err := slab.Encode(enc) 66 | if err != nil { 67 | // Wrap err as external error (if needed) because err is returned by Storable interface. 68 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") 69 | } 70 | 71 | err = enc.CBOR.Flush() 72 | if err != nil { 73 | return nil, NewEncodingError(err) 74 | } 75 | 76 | return buf.Bytes(), nil 77 | } 78 | 79 | func GetUintCBORSize(n uint64) uint32 { 80 | if n <= 23 { 81 | return 1 82 | } 83 | if n <= math.MaxUint8 { 84 | return 2 85 | } 86 | if n <= math.MaxUint16 { 87 | return 3 88 | } 89 | if n <= math.MaxUint32 { 90 | return 5 91 | } 92 | return 9 93 | } 94 | -------------------------------------------------------------------------------- /export_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | ) 24 | 25 | // Exported functions of PersistentSlabStorage for testing. 26 | var ( 27 | GetBaseStorage = (*PersistentSlabStorage).getBaseStorage 28 | GetCache = (*PersistentSlabStorage).getCache 29 | GetDeltas = (*PersistentSlabStorage).getDeltas 30 | GetCBOREncMode = (*PersistentSlabStorage).getCBOREncMode 31 | GetCBORDecMode = (*PersistentSlabStorage).getCBORDecMode 32 | ) 33 | 34 | // Exported function of slab size settings for testing. 35 | var ( 36 | TargetSlabSize = targetSlabSize 37 | MaxInlineMapValueSize = maxInlineMapValueSize 38 | ) 39 | 40 | // Exported function of Array for testing. 41 | var ( 42 | GetArrayRootSlab = (*Array).rootSlab 43 | ArrayHasParentUpdater = (*Array).hasParentUpdater 44 | GetArrayMutableElementIndexCount = (*Array).getMutableElementIndexCount 45 | GetArrayMutableElementIndex = (*Array).getMutableElementIndex 46 | ) 47 | 48 | // Exported function of OrderedMap for testing. 49 | var ( 50 | GetMapRootSlab = (*OrderedMap).rootSlab 51 | GetMapDigesterBuilder = (*OrderedMap).getDigesterBuilder 52 | ) 53 | 54 | // Exported function of MapDataSlab for testing. 55 | var ( 56 | IsMapDataSlabCollisionGroup = (*MapDataSlab).isCollisionGroup 57 | GetMapDataSlabElementCount = (*MapDataSlab).elementCount 58 | ) 59 | 60 | // Exported function for testing 61 | var ( 62 | UnwrapValue = unwrapValue 63 | UnwrapStorable = unwrapStorable 64 | ) 65 | 66 | func NewArrayRootDataSlab(id SlabID, storables []Storable) ArraySlab { 67 | size := uint32(arrayRootDataSlabPrefixSize) 68 | 69 | for _, storable := range storables { 70 | size += storable.ByteSize() 71 | } 72 | 73 | return &ArrayDataSlab{ 74 | header: ArraySlabHeader{ 75 | slabID: id, 76 | size: size, 77 | count: uint32(len(storables)), 78 | }, 79 | elements: storables, 80 | } 81 | } 82 | 83 | func GetArrayRootSlabStorables(array *Array) []Storable { 84 | return array.rootSlab().ChildStorables() 85 | } 86 | 87 | func GetMapRootSlabStorables(m *OrderedMap) []Storable { 88 | return m.rootSlab().ChildStorables() 89 | } 90 | 91 | func GetMapSlabStorables(m MapSlab) []Storable { 92 | return m.ChildStorables() 93 | } 94 | 95 | func GetArrayMetaDataSlabChildInfo(metaDataSlab *ArrayMetaDataSlab) (childSlabIDs []SlabID, childCounts []uint32) { 96 | childSlabIDs = make([]SlabID, len(metaDataSlab.childrenHeaders)) 97 | childCounts = make([]uint32, len(metaDataSlab.childrenHeaders)) 98 | 99 | for i, childHeader := range metaDataSlab.childrenHeaders { 100 | childSlabIDs[i] = childHeader.slabID 101 | childCounts[i] = childHeader.count 102 | } 103 | 104 | return childSlabIDs, childCounts 105 | } 106 | 107 | func GetMapMetaDataSlabChildInfo(metaDataSlab *MapMetaDataSlab) (childSlabIDs []SlabID, childSizes []uint32, childFirstKeys []Digest) { 108 | childSlabIDs = make([]SlabID, len(metaDataSlab.childrenHeaders)) 109 | childSizes = make([]uint32, len(metaDataSlab.childrenHeaders)) 110 | childFirstKeys = make([]Digest, len(metaDataSlab.childrenHeaders)) 111 | 112 | for i, childHeader := range metaDataSlab.childrenHeaders { 113 | childSlabIDs[i] = childHeader.slabID 114 | childSizes[i] = childHeader.size 115 | childFirstKeys[i] = childHeader.firstKey 116 | } 117 | 118 | return childSlabIDs, childSizes, childFirstKeys 119 | } 120 | 121 | func GetMutableValueNotifierValueID(v Value) (ValueID, error) { 122 | m, ok := v.(mutableValueNotifier) 123 | if !ok { 124 | return ValueID{}, fmt.Errorf("v (%T) isn't mutableValueNotifier", v) 125 | } 126 | return m.ValueID(), nil 127 | } 128 | 129 | func ComputeArrayRootDataSlabByteSizeWithFixSizedElement(storableByteSize uint32, count uint64) uint32 { 130 | storableByteSizes := make([]uint32, count) 131 | for i := range storableByteSizes { 132 | storableByteSizes[i] = storableByteSize 133 | } 134 | return ComputeArrayRootDataSlabByteSize(storableByteSizes) 135 | } 136 | 137 | func ComputeArrayRootDataSlabByteSize(storableByteSizes []uint32) uint32 { 138 | slabSize := uint32(arrayRootDataSlabPrefixSize) 139 | for _, storableByteSize := range storableByteSizes { 140 | slabSize += storableByteSize 141 | } 142 | return slabSize 143 | } 144 | 145 | func ComputeInlinedArraySlabByteSizeWithFixSizedElement(storableByteSize uint32, count uint64) uint32 { 146 | storableByteSizes := make([]uint32, count) 147 | for i := range storableByteSizes { 148 | storableByteSizes[i] = storableByteSize 149 | } 150 | return ComputeInlinedArraySlabByteSize(storableByteSizes) 151 | } 152 | 153 | func ComputeInlinedArraySlabByteSize(storableByteSizes []uint32) uint32 { 154 | slabSize := uint32(inlinedArrayDataSlabPrefixSize) 155 | for _, storableByteSize := range storableByteSizes { 156 | slabSize += storableByteSize 157 | } 158 | return slabSize 159 | } 160 | 161 | func ComputeMapRootDataSlabByteSizeWithFixSizedElement(keyStorableByteSize, valueStorableByteSize uint32, count int) uint32 { 162 | elementStorableByteSizes := make([][2]uint32, count) 163 | for i := range elementStorableByteSizes { 164 | elementStorableByteSizes[i] = [2]uint32{keyStorableByteSize, valueStorableByteSize} 165 | } 166 | return ComputeMapRootDataSlabByteSize(elementStorableByteSizes) 167 | } 168 | 169 | func ComputeMapRootDataSlabByteSize(elementStorableByteSizes [][2]uint32) uint32 { 170 | slabSize := uint32(mapRootDataSlabPrefixSize + hkeyElementsPrefixSize) 171 | for _, elementStorableByteSize := range elementStorableByteSizes { 172 | keyStorableByteSize := elementStorableByteSize[0] 173 | valueStorableByteSize := elementStorableByteSize[1] 174 | 175 | elementSize := singleElementPrefixSize + 176 | digestSize + 177 | keyStorableByteSize + 178 | valueStorableByteSize 179 | 180 | slabSize += elementSize 181 | } 182 | 183 | return slabSize 184 | } 185 | 186 | func ComputeInlinedMapSlabByteSizeWithFixSizedElement(keyStorableByteSize, valueStorableByteSize uint32, count int) uint32 { 187 | elementStorableByteSizes := make([][2]uint32, count) 188 | for i := range elementStorableByteSizes { 189 | elementStorableByteSizes[i] = [2]uint32{keyStorableByteSize, valueStorableByteSize} 190 | } 191 | return ComputeInlinedMapSlabByteSize(elementStorableByteSizes) 192 | } 193 | 194 | func ComputeInlinedMapSlabByteSize(elementStorableByteSizes [][2]uint32) uint32 { 195 | slabSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) 196 | for _, elementStorableByteSize := range elementStorableByteSizes { 197 | keyStorableByteSize := elementStorableByteSize[0] 198 | valueStorableByteSize := elementStorableByteSize[1] 199 | 200 | elementSize := singleElementPrefixSize + 201 | digestSize + 202 | keyStorableByteSize + 203 | valueStorableByteSize 204 | 205 | slabSize += elementSize 206 | } 207 | 208 | return slabSize 209 | } 210 | -------------------------------------------------------------------------------- /files/example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onflow/atree/51657de142f26465cc9167cb3cfeba5c5e9f5c2e/files/example.jpg -------------------------------------------------------------------------------- /files/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onflow/atree/51657de142f26465cc9167cb3cfeba5c5e9f5c2e/files/logo.png -------------------------------------------------------------------------------- /flag.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | ) 24 | 25 | type slabType int 26 | 27 | const ( 28 | slabTypeUndefined slabType = iota 29 | slabArray 30 | slabMap 31 | slabStorable 32 | ) 33 | 34 | type slabArrayType int 35 | 36 | const ( 37 | slabArrayUndefined slabArrayType = iota 38 | slabArrayData 39 | slabArrayMeta 40 | slabLargeImmutableArray 41 | ) 42 | 43 | type slabMapType int 44 | 45 | const ( 46 | slabMapUndefined slabMapType = iota 47 | slabMapData 48 | slabMapMeta 49 | slabMapLargeEntry 50 | slabMapCollisionGroup 51 | ) 52 | 53 | // Version and flag masks for the 1st byte of encoded slab. 54 | // Flags in this group are only for v1 and above. 55 | const ( 56 | maskVersion byte = 0b1111_0000 57 | maskHasNextSlabID byte = 0b0000_0010 // This flag is only relevant for data slab. 58 | maskHasInlinedSlabs byte = 0b0000_0001 59 | ) 60 | 61 | // Flag masks for the 2nd byte of encoded slab. 62 | // Flags in this group are available for all versions. 63 | const ( 64 | // Slab flags: 3 high bits 65 | maskSlabRoot byte = 0b100_00000 66 | maskSlabHasPointers byte = 0b010_00000 67 | maskSlabAnySize byte = 0b001_00000 68 | 69 | // Array flags: 3 low bits (4th and 5th bits are 0) 70 | maskArrayData byte = 0b000_00000 71 | maskArrayMeta byte = 0b000_00001 72 | // maskLargeImmutableArray byte = 0b000_00010 // not used for now 73 | 74 | // Map flags: 3 low bits (4th bit is 0, 5th bit is 1) 75 | maskMapData byte = 0b000_01000 76 | maskMapMeta byte = 0b000_01001 77 | // maskLargeMapEntry byte = 0b000_01010 // not used for now 78 | maskCollisionGroup byte = 0b000_01011 79 | 80 | // Storable flags: 3 low bits (4th bit is 1, 5th bit is 1) 81 | maskStorable byte = 0b000_11111 82 | ) 83 | 84 | const ( 85 | maxVersion = 0b0000_1111 86 | ) 87 | 88 | type head [2]byte 89 | 90 | // newArraySlabHead returns an array slab head of given version and slab type. 91 | func newArraySlabHead(version byte, t slabArrayType) (*head, error) { 92 | if version > maxVersion { 93 | return nil, fmt.Errorf("encoding version must be less than %d, got %d", maxVersion+1, version) 94 | } 95 | 96 | var h head 97 | 98 | h[0] = version << 4 99 | 100 | switch t { 101 | case slabArrayData: 102 | h[1] = maskArrayData 103 | 104 | case slabArrayMeta: 105 | h[1] = maskArrayMeta 106 | 107 | default: 108 | return nil, fmt.Errorf("unsupported array slab type %d", t) 109 | } 110 | 111 | return &h, nil 112 | } 113 | 114 | // newMapSlabHead returns a map slab head of given version and slab type. 115 | func newMapSlabHead(version byte, t slabMapType) (*head, error) { 116 | if version > maxVersion { 117 | return nil, fmt.Errorf("encoding version must be less than %d, got %d", maxVersion+1, version) 118 | } 119 | 120 | var h head 121 | 122 | h[0] = version << 4 123 | 124 | switch t { 125 | case slabMapData: 126 | h[1] = maskMapData 127 | 128 | case slabMapMeta: 129 | h[1] = maskMapMeta 130 | 131 | case slabMapCollisionGroup: 132 | h[1] = maskCollisionGroup 133 | 134 | default: 135 | return nil, fmt.Errorf("unsupported map slab type %d", t) 136 | } 137 | 138 | return &h, nil 139 | } 140 | 141 | // newStorableSlabHead returns a storable slab head of given version. 142 | func newStorableSlabHead(version byte) (*head, error) { 143 | if version > maxVersion { 144 | return nil, fmt.Errorf("encoding version must be less than %d, got %d", maxVersion+1, version) 145 | } 146 | 147 | var h head 148 | h[0] = version << 4 149 | h[1] = maskStorable 150 | return &h, nil 151 | } 152 | 153 | // newHeadFromData returns a head with given data. 154 | func newHeadFromData(data []byte) (head, error) { 155 | if len(data) != 2 { 156 | return head{}, fmt.Errorf("head must be 2 bytes, got %d bytes", len(data)) 157 | } 158 | 159 | return head{data[0], data[1]}, nil 160 | } 161 | 162 | func (h *head) version() byte { 163 | return (h[0] & maskVersion) >> 4 164 | } 165 | 166 | func (h *head) isRoot() bool { 167 | return h[1]&maskSlabRoot > 0 168 | } 169 | 170 | func (h *head) setRoot() { 171 | h[1] |= maskSlabRoot 172 | } 173 | 174 | func (h *head) hasPointers() bool { 175 | return h[1]&maskSlabHasPointers > 0 176 | } 177 | 178 | func (h *head) setHasPointers() { 179 | h[1] |= maskSlabHasPointers 180 | } 181 | 182 | func (h *head) hasSizeLimit() bool { 183 | return h[1]&maskSlabAnySize == 0 184 | } 185 | 186 | func (h *head) setNoSizeLimit() { 187 | h[1] |= maskSlabAnySize 188 | } 189 | 190 | func (h *head) hasInlinedSlabs() bool { 191 | return h[0]&maskHasInlinedSlabs > 0 192 | } 193 | 194 | func (h *head) setHasInlinedSlabs() { 195 | h[0] |= maskHasInlinedSlabs 196 | } 197 | 198 | func (h *head) hasNextSlabID() bool { 199 | if h.version() == 0 { 200 | return !h.isRoot() 201 | } 202 | return h[0]&maskHasNextSlabID > 0 203 | } 204 | 205 | func (h *head) setHasNextSlabID() { 206 | h[0] |= maskHasNextSlabID 207 | } 208 | 209 | func (h head) getSlabType() slabType { 210 | f := h[1] 211 | // Extract 4th and 5th bits for slab type. 212 | dataType := (f & byte(0b000_11000)) >> 3 213 | switch dataType { 214 | case 0: 215 | // 4th and 5th bits are 0. 216 | return slabArray 217 | case 1: 218 | // 4th bit is 0 and 5th bit is 1. 219 | return slabMap 220 | case 3: 221 | // 4th and 5th bit are 1. 222 | return slabStorable 223 | default: 224 | return slabTypeUndefined 225 | } 226 | } 227 | 228 | func (h head) getSlabArrayType() slabArrayType { 229 | if h.getSlabType() != slabArray { 230 | return slabArrayUndefined 231 | } 232 | 233 | f := h[1] 234 | 235 | // Extract 3 low bits for slab array type. 236 | dataType := (f & byte(0b000_00111)) 237 | switch dataType { 238 | case 0: 239 | return slabArrayData 240 | case 1: 241 | return slabArrayMeta 242 | case 2: 243 | return slabLargeImmutableArray 244 | default: 245 | return slabArrayUndefined 246 | } 247 | } 248 | 249 | func (h head) getSlabMapType() slabMapType { 250 | if h.getSlabType() != slabMap { 251 | return slabMapUndefined 252 | } 253 | 254 | f := h[1] 255 | 256 | // Extract 3 low bits for slab map type. 257 | dataType := (f & byte(0b000_00111)) 258 | switch dataType { 259 | case 0: 260 | return slabMapData 261 | case 1: 262 | return slabMapMeta 263 | case 2: 264 | return slabMapLargeEntry 265 | case 3: 266 | return slabMapCollisionGroup 267 | default: 268 | return slabMapUndefined 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/onflow/atree 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 7 | github.com/fxamacker/circlehash v0.3.0 8 | github.com/stretchr/testify v1.10.0 9 | github.com/zeebo/blake3 v0.2.4 10 | lukechampine.com/blake3 v1.4.1 11 | ) 12 | 13 | require ( 14 | github.com/davecgh/go-spew v1.1.1 // indirect 15 | github.com/klauspost/cpuid/v2 v2.0.12 // indirect 16 | github.com/pmezard/go-difflib v1.0.0 // indirect 17 | github.com/stretchr/objx v0.5.2 // indirect 18 | github.com/x448/float16 v0.8.4 // indirect 19 | gopkg.in/yaml.v3 v3.0.1 // indirect 20 | ) 21 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 h1:qOglMkJ5YBwog/GU/NXhP9gFqxUGMuqnmCkbj65JMhk= 4 | github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= 5 | github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= 6 | github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= 7 | github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE= 8 | github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= 9 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 10 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 11 | github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 12 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 13 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 14 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 15 | github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= 16 | github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= 17 | github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= 18 | github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= 19 | github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= 20 | github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= 21 | github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= 22 | github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= 23 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 24 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 25 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 26 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 27 | lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= 28 | lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= 29 | -------------------------------------------------------------------------------- /hash.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright Flow Foundation 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package atree 18 | 19 | import ( 20 | "encoding/binary" 21 | "sync" 22 | 23 | "github.com/fxamacker/circlehash" 24 | "github.com/zeebo/blake3" 25 | ) 26 | 27 | type HashInputProvider func(value Value, buffer []byte) ([]byte, error) 28 | 29 | type Digest uint64 30 | 31 | type DigesterBuilder interface { 32 | SetSeed(k0 uint64, k1 uint64) 33 | Digest(HashInputProvider, Value) (Digester, error) 34 | } 35 | 36 | type Digester interface { 37 | // DigestPrefix returns digests before specified level. 38 | // If level is 0, DigestPrefix returns nil. 39 | DigestPrefix(level uint) ([]Digest, error) 40 | 41 | // Digest returns digest at specified level. 42 | Digest(level uint) (Digest, error) 43 | 44 | // Reset data for reuse 45 | Reset() 46 | 47 | Levels() uint 48 | } 49 | 50 | type basicDigesterBuilder struct { 51 | k0 uint64 52 | k1 uint64 53 | } 54 | 55 | var _ DigesterBuilder = &basicDigesterBuilder{} 56 | 57 | type basicDigester struct { 58 | circleHash64 uint64 59 | blake3Hash [4]uint64 60 | scratch [32]byte 61 | msg []byte 62 | } 63 | 64 | // basicDigesterPool caches unused basicDigester objects for later reuse. 65 | var basicDigesterPool = sync.Pool{ 66 | New: func() any { 67 | return &basicDigester{} 68 | }, 69 | } 70 | 71 | func getBasicDigester() *basicDigester { 72 | return basicDigesterPool.Get().(*basicDigester) 73 | } 74 | 75 | func putDigester(e Digester) { 76 | if _, ok := e.(*basicDigester); !ok { 77 | return 78 | } 79 | e.Reset() 80 | basicDigesterPool.Put(e) 81 | } 82 | 83 | var ( 84 | emptyBlake3Hash [4]uint64 85 | ) 86 | 87 | func NewDefaultDigesterBuilder() DigesterBuilder { 88 | return newBasicDigesterBuilder() 89 | } 90 | 91 | func newBasicDigesterBuilder() *basicDigesterBuilder { 92 | return &basicDigesterBuilder{} 93 | } 94 | 95 | func (bdb *basicDigesterBuilder) SetSeed(k0 uint64, k1 uint64) { 96 | bdb.k0 = k0 97 | bdb.k1 = k1 98 | } 99 | 100 | func (bdb *basicDigesterBuilder) Digest(hip HashInputProvider, value Value) (Digester, error) { 101 | if bdb.k0 == 0 { 102 | return nil, NewHashSeedUninitializedError() 103 | } 104 | 105 | digester := getBasicDigester() 106 | 107 | msg, err := hip(value, digester.scratch[:]) 108 | if err != nil { 109 | putDigester(digester) 110 | // Wrap err as external error (if needed) because err is returned by HashInputProvider callback. 111 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to generate hash input") 112 | } 113 | 114 | digester.msg = msg 115 | digester.circleHash64 = circlehash.Hash64(msg, bdb.k0) 116 | 117 | return digester, nil 118 | } 119 | 120 | func (bd *basicDigester) Reset() { 121 | bd.circleHash64 = 0 122 | bd.blake3Hash = emptyBlake3Hash 123 | bd.msg = nil 124 | } 125 | 126 | func (bd *basicDigester) DigestPrefix(level uint) ([]Digest, error) { 127 | if level > bd.Levels() { 128 | // level must be [0, bd.Levels()] (inclusive) for prefix 129 | return nil, NewHashLevelErrorf("cannot get digest < level %d: level must be [0, %d]", level, bd.Levels()) 130 | } 131 | var prefix []Digest 132 | for i := range level { 133 | d, err := bd.Digest(i) 134 | if err != nil { 135 | // Don't need to wrap error as external error because err is already categorized by basicDigester.Digest(). 136 | return nil, err 137 | } 138 | prefix = append(prefix, d) 139 | } 140 | return prefix, nil 141 | } 142 | 143 | func (bd *basicDigester) Digest(level uint) (Digest, error) { 144 | if level >= bd.Levels() { 145 | // level must be [0, bd.Levels()) (not inclusive) for digest 146 | return 0, NewHashLevelErrorf("cannot get digest at level %d: level must be [0, %d)", level, bd.Levels()) 147 | } 148 | 149 | switch level { 150 | case 0: 151 | return Digest(bd.circleHash64), nil 152 | 153 | case 1, 2, 3: 154 | if bd.blake3Hash == emptyBlake3Hash { 155 | sum := blake3.Sum256(bd.msg) 156 | bd.blake3Hash[0] = binary.BigEndian.Uint64(sum[:]) 157 | bd.blake3Hash[1] = binary.BigEndian.Uint64(sum[8:]) 158 | bd.blake3Hash[2] = binary.BigEndian.Uint64(sum[16:]) 159 | bd.blake3Hash[3] = binary.BigEndian.Uint64(sum[24:]) 160 | } 161 | return Digest(bd.blake3Hash[level-1]), nil 162 | 163 | default: // list mode 164 | return 0, nil 165 | } 166 | } 167 | 168 | func (bd *basicDigester) Levels() uint { 169 | return 4 170 | } 171 | -------------------------------------------------------------------------------- /inline_utils.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | // uninlineStorableIfNeeded uninlines given storable if needed, and 22 | // returns uninlined Storable and its ValueID. 23 | // If given storable is a WrapperStorable, this function uninlines 24 | // wrapped storable if needed and returns a new WrapperStorable 25 | // with wrapped uninlined storable and its ValidID. 26 | func uninlineStorableIfNeeded(storage SlabStorage, storable Storable) (Storable, ValueID, bool, error) { 27 | if storable == nil { 28 | return storable, emptyValueID, false, nil 29 | } 30 | 31 | switch s := storable.(type) { 32 | case ArraySlab: // inlined array slab 33 | err := s.Uninline(storage) 34 | if err != nil { 35 | return nil, emptyValueID, false, err 36 | } 37 | 38 | slabID := s.SlabID() 39 | 40 | newStorable := SlabIDStorable(slabID) 41 | valueID := slabIDToValueID(slabID) 42 | 43 | return newStorable, valueID, true, nil 44 | 45 | case MapSlab: // inlined map slab 46 | err := s.Uninline(storage) 47 | if err != nil { 48 | return nil, emptyValueID, false, err 49 | } 50 | 51 | slabID := s.SlabID() 52 | 53 | newStorable := SlabIDStorable(slabID) 54 | valueID := slabIDToValueID(slabID) 55 | 56 | return newStorable, valueID, true, nil 57 | 58 | case SlabIDStorable: // uninlined slab 59 | valueID := slabIDToValueID(SlabID(s)) 60 | 61 | return storable, valueID, false, nil 62 | 63 | case WrapperStorable: 64 | unwrappedStorable := unwrapStorable(s) 65 | 66 | // Uninline wrapped storable if needed. 67 | uninlinedWrappedStorable, valueID, uninlined, err := uninlineStorableIfNeeded(storage, unwrappedStorable) 68 | if err != nil { 69 | return nil, emptyValueID, false, err 70 | } 71 | 72 | if !uninlined { 73 | return storable, valueID, uninlined, nil 74 | } 75 | 76 | // Create a new WrapperStorable with uninlinedWrappedStorable 77 | newStorable := s.WrapAtreeStorable(uninlinedWrappedStorable) 78 | 79 | return newStorable, valueID, uninlined, nil 80 | } 81 | 82 | return storable, emptyValueID, false, nil 83 | } 84 | -------------------------------------------------------------------------------- /logs/2021-07-07/README.md: -------------------------------------------------------------------------------- 1 | Log files from July 7, 2021. 2 | 3 | See 2021-07-08 or newer. 4 | -------------------------------------------------------------------------------- /logs/2021-07-08/README.md: -------------------------------------------------------------------------------- 1 | Benchmark logs (10x -short runs) from July 8, 2021 benchmarks run on linode.com "dedicated CPU" server. 2 | 3 | These logs should be cleaned up and reorganized. 4 | 5 | atree_short_10x.tar.gz contains 6 | - 10 "-short" logs using same seed 7 | - 10 "-short" logs using time with ns resolution as seed 8 | -------------------------------------------------------------------------------- /logs/2021-07-08/atree_short_10x.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/onflow/atree/51657de142f26465cc9167cb3cfeba5c5e9f5c2e/logs/2021-07-08/atree_short_10x.tar.gz -------------------------------------------------------------------------------- /map_dump.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "errors" 23 | "fmt" 24 | "strings" 25 | ) 26 | 27 | func PrintMap(m *OrderedMap) { 28 | dumps, err := DumpMapSlabs(m) 29 | if err != nil { 30 | fmt.Println(err) 31 | return 32 | } 33 | fmt.Println(strings.Join(dumps, "\n")) 34 | } 35 | 36 | func DumpMapSlabs(m *OrderedMap) ([]string, error) { 37 | var dumps []string 38 | 39 | nextLevelIDs := []SlabID{m.SlabID()} 40 | 41 | var overflowIDs []SlabID 42 | var collisionSlabIDs []SlabID 43 | 44 | level := 0 45 | for len(nextLevelIDs) > 0 { 46 | 47 | ids := nextLevelIDs 48 | 49 | nextLevelIDs = []SlabID(nil) 50 | 51 | for _, id := range ids { 52 | 53 | slab, err := getMapSlab(m.Storage, id) 54 | if err != nil { 55 | // Don't need to wrap error as external error because err is already categorized by getMapSlab(). 56 | return nil, err 57 | } 58 | 59 | switch slab := slab.(type) { 60 | case *MapDataSlab: 61 | dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) 62 | 63 | for i := 0; i < int(slab.elements.Count()); i++ { 64 | elem, err := slab.elements.Element(i) 65 | if err != nil { 66 | // Don't need to wrap error as external error because err is already categorized by elements.Element(). 67 | return nil, err 68 | } 69 | if group, ok := elem.(elementGroup); ok { 70 | if !group.Inline() { 71 | extSlab := group.(*externalCollisionGroup) 72 | collisionSlabIDs = append(collisionSlabIDs, extSlab.slabID) 73 | } 74 | } 75 | } 76 | 77 | overflowIDs = getSlabIDFromStorable(slab, overflowIDs) 78 | 79 | case *MapMetaDataSlab: 80 | dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) 81 | 82 | for _, storable := range slab.ChildStorables() { 83 | id, ok := storable.(SlabIDStorable) 84 | if !ok { 85 | return nil, NewFatalError(errors.New("metadata slab's child storables are not of type SlabIDStorable")) 86 | } 87 | nextLevelIDs = append(nextLevelIDs, SlabID(id)) 88 | } 89 | } 90 | } 91 | 92 | level++ 93 | } 94 | 95 | for _, id := range collisionSlabIDs { 96 | slab, err := getMapSlab(m.Storage, id) 97 | if err != nil { 98 | // Don't need to wrap error as external error because err is already categorized by getMapSlab(). 99 | return nil, err 100 | } 101 | dumps = append(dumps, fmt.Sprintf("collision: %s", slab.String())) 102 | } 103 | 104 | // overflowIDs include collisionSlabIDs 105 | for _, id := range overflowIDs { 106 | found := false 107 | for _, cid := range collisionSlabIDs { 108 | if id == cid { 109 | found = true 110 | break 111 | } 112 | } 113 | if found { 114 | continue 115 | } 116 | slab, found, err := m.Storage.Retrieve(id) 117 | if err != nil { 118 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 119 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 120 | } 121 | if !found { 122 | return nil, NewSlabNotFoundErrorf(id, "slab not found during map slab dump") 123 | } 124 | dumps = append(dumps, slab.String()) 125 | } 126 | 127 | return dumps, nil 128 | } 129 | -------------------------------------------------------------------------------- /map_element_decode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/fxamacker/cbor/v2" 25 | ) 26 | 27 | func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (element, error) { 28 | nt, err := cborDec.NextType() 29 | if err != nil { 30 | return nil, NewDecodingError(err) 31 | } 32 | 33 | switch nt { 34 | case cbor.ArrayType: 35 | // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). 36 | return newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 37 | 38 | case cbor.TagType: 39 | tagNum, err := cborDec.DecodeTagNumber() 40 | if err != nil { 41 | return nil, NewDecodingError(err) 42 | } 43 | switch tagNum { 44 | case CBORTagInlineCollisionGroup: 45 | // Don't need to wrap error as external error because err is already categorized by newInlineCollisionGroupFromData(). 46 | return newInlineCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 47 | case CBORTagExternalCollisionGroup: 48 | // Don't need to wrap error as external error because err is already categorized by newExternalCollisionGroupFromData(). 49 | return newExternalCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 50 | default: 51 | return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized tag number %d", tagNum)) 52 | } 53 | 54 | default: 55 | return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized CBOR type %s", nt)) 56 | } 57 | } 58 | 59 | func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*singleElement, error) { 60 | elemCount, err := cborDec.DecodeArrayHead() 61 | if err != nil { 62 | return nil, NewDecodingError(err) 63 | } 64 | 65 | if elemCount != 2 { 66 | return nil, NewDecodingError(fmt.Errorf("failed to decode single element: expect array of 2 elements, got %d elements", elemCount)) 67 | } 68 | 69 | key, err := decodeStorable(cborDec, slabID, inlinedExtraData) 70 | if err != nil { 71 | // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. 72 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") 73 | } 74 | 75 | value, err := decodeStorable(cborDec, slabID, inlinedExtraData) 76 | if err != nil { 77 | // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. 78 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode value's storable") 79 | } 80 | 81 | return &singleElement{ 82 | key: key, 83 | value: value, 84 | size: singleElementPrefixSize + key.ByteSize() + value.ByteSize(), 85 | }, nil 86 | } 87 | 88 | func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*inlineCollisionGroup, error) { 89 | elements, err := newElementsFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 90 | if err != nil { 91 | // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). 92 | return nil, err 93 | } 94 | 95 | return &inlineCollisionGroup{elements}, nil 96 | } 97 | 98 | func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*externalCollisionGroup, error) { 99 | 100 | storable, err := decodeStorable(cborDec, slabID, inlinedExtraData) 101 | if err != nil { 102 | // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. 103 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode Storable") 104 | } 105 | 106 | idStorable, ok := storable.(SlabIDStorable) 107 | if !ok { 108 | return nil, NewDecodingError(fmt.Errorf("failed to decode external collision group: expect slab ID, got %T", storable)) 109 | } 110 | 111 | return &externalCollisionGroup{ 112 | slabID: SlabID(idStorable), 113 | size: externalCollisionGroupPrefixSize + idStorable.ByteSize(), 114 | }, nil 115 | } 116 | -------------------------------------------------------------------------------- /map_element_encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | // Encode encodes singleElement to the given encoder. 22 | // 23 | // CBOR encoded array of 2 elements (key, value). 24 | func (e *singleElement) Encode(enc *Encoder) error { 25 | 26 | // Encode CBOR array head for 2 elements 27 | err := enc.CBOR.EncodeRawBytes([]byte{0x82}) 28 | if err != nil { 29 | return NewEncodingError(err) 30 | } 31 | 32 | // Encode key 33 | err = e.key.Encode(enc) 34 | if err != nil { 35 | // Wrap err as external error (if needed) because err is returned by Storable interface. 36 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map key storable") 37 | } 38 | 39 | // Encode value 40 | err = e.value.Encode(enc) 41 | if err != nil { 42 | // Wrap err as external error (if needed) because err is returned by Storable interface. 43 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value storable") 44 | } 45 | 46 | err = enc.CBOR.Flush() 47 | if err != nil { 48 | return NewEncodingError(err) 49 | } 50 | 51 | return nil 52 | } 53 | 54 | // Encode encodes inlineCollisionGroup to the given encoder. 55 | // 56 | // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) 57 | func (e *inlineCollisionGroup) Encode(enc *Encoder) error { 58 | 59 | err := enc.CBOR.EncodeRawBytes([]byte{ 60 | // tag number CBORTagInlineCollisionGroup 61 | 0xd8, CBORTagInlineCollisionGroup, 62 | }) 63 | if err != nil { 64 | return NewEncodingError(err) 65 | } 66 | 67 | err = e.elements.Encode(enc) 68 | if err != nil { 69 | // Don't need to wrap error as external error because err is already categorized by elements.Encode(). 70 | return err 71 | } 72 | 73 | // TODO: is Flush necessary? 74 | err = enc.CBOR.Flush() 75 | if err != nil { 76 | return NewEncodingError(err) 77 | } 78 | 79 | return nil 80 | } 81 | 82 | // Encode encodes externalCollisionGroup to the given encoder. 83 | // 84 | // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) 85 | func (e *externalCollisionGroup) Encode(enc *Encoder) error { 86 | err := enc.CBOR.EncodeRawBytes([]byte{ 87 | // tag number CBORTagExternalCollisionGroup 88 | 0xd8, CBORTagExternalCollisionGroup, 89 | }) 90 | if err != nil { 91 | return NewEncodingError(err) 92 | } 93 | 94 | err = SlabIDStorable(e.slabID).Encode(enc) 95 | if err != nil { 96 | // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.Encode(). 97 | return err 98 | } 99 | 100 | // TODO: is Flush necessary? 101 | err = enc.CBOR.Flush() 102 | if err != nil { 103 | return NewEncodingError(err) 104 | } 105 | 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /map_elements.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | ) 24 | 25 | // elements is a list of elements. 26 | type elements interface { 27 | fmt.Stringer 28 | 29 | getElementAndNextKey( 30 | storage SlabStorage, 31 | digester Digester, 32 | level uint, 33 | hkey Digest, 34 | comparator ValueComparator, 35 | key Value, 36 | ) (MapKey, MapValue, MapKey, error) 37 | 38 | Get( 39 | storage SlabStorage, 40 | digester Digester, 41 | level uint, 42 | hkey Digest, 43 | comparator ValueComparator, 44 | key Value, 45 | ) (MapKey, MapValue, error) 46 | 47 | Set( 48 | storage SlabStorage, 49 | address Address, 50 | b DigesterBuilder, 51 | digester Digester, 52 | level uint, 53 | hkey Digest, 54 | comparator ValueComparator, 55 | hip HashInputProvider, 56 | key Value, 57 | value Value, 58 | ) (MapKey, MapValue, error) 59 | 60 | Remove( 61 | storage SlabStorage, 62 | digester Digester, 63 | level uint, 64 | hkey Digest, 65 | comparator ValueComparator, 66 | key Value, 67 | ) (MapKey, MapValue, error) 68 | 69 | Merge(elements) error 70 | Split() (elements, elements, error) 71 | 72 | LendToRight(elements) error 73 | BorrowFromRight(elements) error 74 | 75 | CanLendToLeft(size uint32) bool 76 | CanLendToRight(size uint32) bool 77 | 78 | Element(int) (element, error) 79 | 80 | Encode(*Encoder) error 81 | 82 | hasPointer() bool 83 | 84 | firstKey() Digest 85 | 86 | Count() uint32 87 | 88 | Size() uint32 89 | 90 | PopIterate(SlabStorage, MapPopIterationFunc) error 91 | } 92 | 93 | func firstKeyInMapSlab(storage SlabStorage, slab MapSlab) (MapKey, error) { 94 | dataSlab, err := firstMapDataSlab(storage, slab) 95 | if err != nil { 96 | return nil, err 97 | } 98 | return firstKeyInElements(storage, dataSlab.elements) 99 | } 100 | 101 | func firstKeyInElements(storage SlabStorage, elems elements) (MapKey, error) { 102 | switch elements := elems.(type) { 103 | case *hkeyElements: 104 | if len(elements.elems) == 0 { 105 | return nil, nil 106 | } 107 | firstElem := elements.elems[0] 108 | return firstKeyInElement(storage, firstElem) 109 | 110 | case *singleElements: 111 | if len(elements.elems) == 0 { 112 | return nil, nil 113 | } 114 | firstElem := elements.elems[0] 115 | return firstElem.key, nil 116 | 117 | default: 118 | return nil, NewUnreachableError() 119 | } 120 | } 121 | 122 | func firstKeyInElement(storage SlabStorage, elem element) (MapKey, error) { 123 | switch elem := elem.(type) { 124 | case *singleElement: 125 | return elem.key, nil 126 | 127 | case elementGroup: 128 | group, err := elem.Elements(storage) 129 | if err != nil { 130 | return nil, err 131 | } 132 | return firstKeyInElements(storage, group) 133 | 134 | default: 135 | return nil, NewUnreachableError() 136 | } 137 | } 138 | 139 | func elementsStorables(elems elements, childStorables []Storable) []Storable { 140 | 141 | switch v := elems.(type) { 142 | 143 | case *hkeyElements: 144 | for i := range v.elems { 145 | childStorables = elementStorables(v.elems[i], childStorables) 146 | } 147 | 148 | case *singleElements: 149 | for i := range v.elems { 150 | childStorables = elementStorables(v.elems[i], childStorables) 151 | } 152 | 153 | } 154 | 155 | return childStorables 156 | } 157 | 158 | func elementStorables(e element, childStorables []Storable) []Storable { 159 | 160 | switch v := e.(type) { 161 | 162 | case *externalCollisionGroup: 163 | return append(childStorables, SlabIDStorable(v.slabID)) 164 | 165 | case *inlineCollisionGroup: 166 | return elementsStorables(v.elements, childStorables) 167 | 168 | case *singleElement: 169 | return append(childStorables, v.key, v.value) 170 | } 171 | 172 | panic(NewUnreachableError()) 173 | } 174 | -------------------------------------------------------------------------------- /map_elements_decode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "encoding/binary" 23 | "fmt" 24 | 25 | "github.com/fxamacker/cbor/v2" 26 | ) 27 | 28 | func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (elements, error) { 29 | 30 | arrayCount, err := cborDec.DecodeArrayHead() 31 | if err != nil { 32 | return nil, NewDecodingError(err) 33 | } 34 | 35 | if arrayCount != 3 { 36 | return nil, NewDecodingError(fmt.Errorf("decoding elements failed: expect array of 3 elements, got %d elements", arrayCount)) 37 | } 38 | 39 | level, err := cborDec.DecodeUint64() 40 | if err != nil { 41 | return nil, NewDecodingError(err) 42 | } 43 | 44 | digestBytes, err := cborDec.DecodeBytes() 45 | if err != nil { 46 | return nil, NewDecodingError(err) 47 | } 48 | 49 | if len(digestBytes)%digestSize != 0 { 50 | return nil, NewDecodingError(fmt.Errorf("decoding digests failed: number of bytes is not multiple of %d", digestSize)) 51 | } 52 | 53 | digestCount := len(digestBytes) / digestSize 54 | hkeys := make([]Digest, digestCount) 55 | for i := range hkeys { 56 | hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) 57 | } 58 | 59 | elemCount, err := cborDec.DecodeArrayHead() 60 | if err != nil { 61 | return nil, NewDecodingError(err) 62 | } 63 | 64 | if digestCount != 0 && uint64(digestCount) != elemCount { 65 | return nil, NewDecodingError(fmt.Errorf("decoding elements failed: number of hkeys %d isn't the same as number of elements %d", digestCount, elemCount)) 66 | } 67 | 68 | if digestCount == 0 && elemCount > 0 { 69 | // elements are singleElements 70 | 71 | // Decode elements 72 | size := uint32(singleElementsPrefixSize) 73 | elems := make([]*singleElement, elemCount) 74 | for i := range elems { 75 | elem, err := newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 76 | if err != nil { 77 | // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). 78 | return nil, err 79 | } 80 | 81 | elems[i] = elem 82 | size += elem.Size() 83 | } 84 | 85 | // Create singleElements 86 | elements := &singleElements{ 87 | elems: elems, 88 | level: uint(level), 89 | size: size, 90 | } 91 | 92 | return elements, nil 93 | } 94 | 95 | // elements are hkeyElements 96 | 97 | // Decode elements 98 | size := uint32(hkeyElementsPrefixSize) 99 | elems := make([]element, elemCount) 100 | for i := range elems { 101 | elem, err := newElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) 102 | if err != nil { 103 | // Don't need to wrap error as external error because err is already categorized by newElementFromData(). 104 | return nil, err 105 | } 106 | 107 | elems[i] = elem 108 | size += digestSize + elem.Size() 109 | } 110 | 111 | // Create hkeyElements 112 | elements := &hkeyElements{ 113 | hkeys: hkeys, 114 | elems: elems, 115 | level: uint(level), 116 | size: size, 117 | } 118 | 119 | return elements, nil 120 | } 121 | -------------------------------------------------------------------------------- /map_elements_encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "encoding/binary" 23 | "fmt" 24 | ) 25 | 26 | // Encode encodes hkeyElements to the given encoder. 27 | // 28 | // CBOR encoded array [ 29 | // 0: level (uint) 30 | // 1: hkeys (byte string) 31 | // 2: elements (array) 32 | // ] 33 | func (e *hkeyElements) Encode(enc *Encoder) error { 34 | 35 | if e.level > maxDigestLevel { 36 | return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) 37 | } 38 | 39 | // Encode CBOR array head of 3 elements (level, hkeys, elements) 40 | const cborArrayHeadOfThreeElements = 0x83 41 | enc.Scratch[0] = cborArrayHeadOfThreeElements 42 | 43 | // Encode hash level 44 | enc.Scratch[1] = byte(e.level) 45 | 46 | // Encode hkeys as byte string 47 | 48 | // Encode hkeys bytes header manually for fix-sized encoding 49 | // TODO: maybe make this header dynamic to reduce size 50 | // CBOR byte string head 0x59 indicates that the number of bytes in byte string are encoded in the next 2 bytes. 51 | const cborByteStringHead = 0x59 52 | enc.Scratch[2] = cborByteStringHead 53 | 54 | binary.BigEndian.PutUint16(enc.Scratch[3:], uint16(len(e.hkeys)*8)) 55 | 56 | // Write scratch content to encoder 57 | const totalSize = 5 58 | err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) 59 | if err != nil { 60 | return NewEncodingError(err) 61 | } 62 | 63 | // Encode hkeys 64 | for i := range e.hkeys { 65 | binary.BigEndian.PutUint64(enc.Scratch[:], uint64(e.hkeys[i])) 66 | err = enc.CBOR.EncodeRawBytes(enc.Scratch[:digestSize]) 67 | if err != nil { 68 | return NewEncodingError(err) 69 | } 70 | } 71 | 72 | // Encode elements 73 | 74 | // Encode elements array header manually for fix-sized encoding 75 | // TODO: maybe make this header dynamic to reduce size 76 | // CBOR array head 0x99 indicating that the number of array elements are encoded in the next 2 bytes. 77 | const cborArrayHead = 0x99 78 | enc.Scratch[0] = cborArrayHead 79 | binary.BigEndian.PutUint16(enc.Scratch[1:], uint16(len(e.elems))) 80 | err = enc.CBOR.EncodeRawBytes(enc.Scratch[:3]) 81 | if err != nil { 82 | return NewEncodingError(err) 83 | } 84 | 85 | // Encode each element 86 | for _, e := range e.elems { 87 | err = e.Encode(enc) 88 | if err != nil { 89 | // Don't need to wrap error as external error because err is already categorized by element.Encode(). 90 | return err 91 | } 92 | } 93 | 94 | // TODO: is Flush necessary 95 | err = enc.CBOR.Flush() 96 | if err != nil { 97 | return NewEncodingError(err) 98 | } 99 | 100 | return nil 101 | } 102 | 103 | // Encode encodes singleElements to the given encoder. 104 | // 105 | // CBOR encoded array [ 106 | // 0: level (uint) 107 | // 1: hkeys (0 length byte string) 108 | // 2: elements (array) 109 | // ] 110 | func (e *singleElements) Encode(enc *Encoder) error { 111 | 112 | if e.level > maxDigestLevel { 113 | return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) 114 | } 115 | 116 | // Encode CBOR array header for 3 elements (level, hkeys, elements) 117 | enc.Scratch[0] = 0x83 118 | 119 | // Encode hash level 120 | enc.Scratch[1] = byte(e.level) 121 | 122 | // Encode hkeys (empty byte string) 123 | enc.Scratch[2] = 0x40 124 | 125 | // Encode elements 126 | 127 | // Encode elements array header manually for fix-sized encoding 128 | // TODO: maybe make this header dynamic to reduce size 129 | enc.Scratch[3] = 0x99 130 | binary.BigEndian.PutUint16(enc.Scratch[4:], uint16(len(e.elems))) 131 | 132 | // Write scratch content to encoder 133 | const totalSize = 6 134 | err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) 135 | if err != nil { 136 | return NewEncodingError(err) 137 | } 138 | 139 | // Encode each element 140 | for _, e := range e.elems { 141 | err = e.Encode(enc) 142 | if err != nil { 143 | // Don't need to wrap error as external error because err is already categorized by singleElement.Encode(). 144 | return err 145 | } 146 | } 147 | 148 | // TODO: is Flush necessar? 149 | err = enc.CBOR.Flush() 150 | if err != nil { 151 | return NewEncodingError(err) 152 | } 153 | 154 | return nil 155 | } 156 | -------------------------------------------------------------------------------- /map_extradata.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/fxamacker/cbor/v2" 25 | ) 26 | 27 | type MapExtraData struct { 28 | TypeInfo TypeInfo 29 | Count uint64 30 | Seed uint64 31 | } 32 | 33 | var _ ExtraData = &MapExtraData{} 34 | 35 | const mapExtraDataLength = 3 36 | 37 | // newMapExtraDataFromData decodes CBOR array to extra data: 38 | // 39 | // [type info, count, seed] 40 | func newMapExtraDataFromData( 41 | data []byte, 42 | decMode cbor.DecMode, 43 | decodeTypeInfo TypeInfoDecoder, 44 | ) ( 45 | *MapExtraData, 46 | []byte, 47 | error, 48 | ) { 49 | dec := decMode.NewByteStreamDecoder(data) 50 | 51 | extraData, err := newMapExtraData(dec, decodeTypeInfo) 52 | if err != nil { 53 | return nil, data, err 54 | } 55 | 56 | return extraData, data[dec.NumBytesDecoded():], nil 57 | } 58 | 59 | func newMapExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (*MapExtraData, error) { 60 | 61 | length, err := dec.DecodeArrayHead() 62 | if err != nil { 63 | return nil, NewDecodingError(err) 64 | } 65 | 66 | if length != mapExtraDataLength { 67 | return nil, NewDecodingError( 68 | fmt.Errorf( 69 | "data has invalid length %d, want %d", 70 | length, 71 | mapExtraDataLength, 72 | )) 73 | } 74 | 75 | typeInfo, err := decodeTypeInfo(dec) 76 | if err != nil { 77 | // Wrap err as external error (if needed) because err is returned by TypeInfoDecoder callback. 78 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode type info") 79 | } 80 | 81 | count, err := dec.DecodeUint64() 82 | if err != nil { 83 | return nil, NewDecodingError(err) 84 | } 85 | 86 | seed, err := dec.DecodeUint64() 87 | if err != nil { 88 | return nil, NewDecodingError(err) 89 | } 90 | 91 | return &MapExtraData{ 92 | TypeInfo: typeInfo, 93 | Count: count, 94 | Seed: seed, 95 | }, nil 96 | } 97 | 98 | func (m *MapExtraData) isExtraData() bool { 99 | return true 100 | } 101 | 102 | func (m *MapExtraData) Type() TypeInfo { 103 | return m.TypeInfo 104 | } 105 | 106 | // Encode encodes extra data as CBOR array: 107 | // 108 | // [type info, count, seed] 109 | func (m *MapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { 110 | 111 | err := enc.CBOR.EncodeArrayHead(mapExtraDataLength) 112 | if err != nil { 113 | return NewEncodingError(err) 114 | } 115 | 116 | err = encodeTypeInfo(enc, m.TypeInfo) 117 | if err != nil { 118 | // Wrap err as external error (if needed) because err is returned by TypeInfo interface. 119 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") 120 | } 121 | 122 | err = enc.CBOR.EncodeUint64(m.Count) 123 | if err != nil { 124 | return NewEncodingError(err) 125 | } 126 | 127 | err = enc.CBOR.EncodeUint64(m.Seed) 128 | if err != nil { 129 | return NewEncodingError(err) 130 | } 131 | 132 | err = enc.CBOR.Flush() 133 | if err != nil { 134 | return NewEncodingError(err) 135 | } 136 | 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /map_metadata_slab_encode.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "encoding/binary" 22 | 23 | // Encode encodes map meta-data slab to the given encoder. 24 | // 25 | // Root MetaDataSlab Header: 26 | // 27 | // +------------------------------+------------+--------------------------------+------------------------------+ 28 | // | slab version + flag (2 byte) | extra data | child shared address (8 bytes) | child header count (2 bytes) | 29 | // +------------------------------+------------+--------------------------------+------------------------------+ 30 | // 31 | // Non-root MetaDataSlab Header (12 bytes): 32 | // 33 | // +------------------------------+--------------------------------+------------------------------+ 34 | // | slab version + flag (2 byte) | child shared address (8 bytes) | child header count (2 bytes) | 35 | // +------------------------------+--------------------------------+------------------------------+ 36 | // 37 | // Content (n * 18 bytes): 38 | // 39 | // [ +[slab index (8 bytes), first key (8 bytes), size (2 bytes)]] 40 | // 41 | // See MapExtraData.Encode() for extra data section format. 42 | func (m *MapMetaDataSlab) Encode(enc *Encoder) error { 43 | 44 | const version = 1 45 | 46 | h, err := newMapSlabHead(version, slabMapMeta) 47 | if err != nil { 48 | return NewEncodingError(err) 49 | } 50 | 51 | if m.extraData != nil { 52 | h.setRoot() 53 | } 54 | 55 | // Write head (version and flag) 56 | _, err = enc.Write(h[:]) 57 | if err != nil { 58 | return NewEncodingError(err) 59 | } 60 | 61 | // Encode extra data if present 62 | if m.extraData != nil { 63 | // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. 64 | err = m.extraData.Encode(enc, defaultEncodeTypeInfo) 65 | if err != nil { 66 | // Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode(). 67 | return err 68 | } 69 | } 70 | 71 | // Encode shared address to scratch 72 | copy(enc.Scratch[:], m.header.slabID.address[:]) 73 | 74 | // Encode child header count to scratch 75 | const childHeaderCountOffset = SlabAddressLength 76 | binary.BigEndian.PutUint16( 77 | enc.Scratch[childHeaderCountOffset:], 78 | uint16(len(m.childrenHeaders)), 79 | ) 80 | 81 | // Write scratch content to encoder 82 | const totalSize = childHeaderCountOffset + 2 83 | _, err = enc.Write(enc.Scratch[:totalSize]) 84 | if err != nil { 85 | return NewEncodingError(err) 86 | } 87 | 88 | // Encode children headers 89 | for _, h := range m.childrenHeaders { 90 | // Encode slab index to scratch 91 | copy(enc.Scratch[:], h.slabID.index[:]) 92 | 93 | const firstKeyOffset = SlabIndexLength 94 | binary.BigEndian.PutUint64(enc.Scratch[firstKeyOffset:], uint64(h.firstKey)) 95 | 96 | const sizeOffset = firstKeyOffset + digestSize 97 | binary.BigEndian.PutUint16(enc.Scratch[sizeOffset:], uint16(h.size)) 98 | 99 | const totalSize = sizeOffset + 2 100 | _, err = enc.Write(enc.Scratch[:totalSize]) 101 | if err != nil { 102 | return NewEncodingError(err) 103 | } 104 | } 105 | 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /map_size_consts.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | // NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, 22 | // such as merge and split, so size constants here are related to encoding size. 23 | const ( 24 | digestSize = 8 25 | 26 | // Encoded size of single element prefix size: CBOR array header (1 byte) 27 | singleElementPrefixSize = 1 28 | 29 | // Encoded size of inline collision group prefix size: CBOR tag number (2 bytes) 30 | inlineCollisionGroupPrefixSize = 2 31 | 32 | // Encoded size of external collision group prefix size: CBOR tag number (2 bytes) 33 | externalCollisionGroupPrefixSize = 2 34 | 35 | // Encoded size of digests: CBOR byte string head (3 bytes) 36 | digestPrefixSize = 3 37 | 38 | // Encoded size of number of elements: CBOR array head (3 bytes). 39 | elementPrefixSize = 3 40 | 41 | // hkey elements prefix size: 42 | // CBOR array header (1 byte) + level (1 byte) + hkeys byte string header (3 bytes) + elements array header (3 bytes) 43 | // Support up to 8,191 elements in the map per data slab. 44 | hkeyElementsPrefixSize = 1 + 1 + digestPrefixSize + elementPrefixSize 45 | 46 | // single elements prefix size: 47 | // CBOR array header (1 byte) + encoded level (1 byte) + hkeys byte string header (1 bytes) + elements array header (3 bytes) 48 | // Support up to 65,535 elements in the map per data slab. 49 | singleElementsPrefixSize = 1 + 1 + 1 + elementPrefixSize 50 | 51 | // slab header size: slab index (8 bytes) + size (2 bytes) + first digest (8 bytes) 52 | // Support up to 65,535 bytes for slab size limit (default limit is 1536 max bytes). 53 | mapSlabHeaderSize = SlabIndexLength + 2 + digestSize 54 | 55 | // meta data slab prefix size: version (1 byte) + flag (1 byte) + address (8 bytes) + child header count (2 bytes) 56 | // Support up to 65,535 children per metadata slab. 57 | mapMetaDataSlabPrefixSize = versionAndFlagSize + SlabAddressLength + 2 58 | 59 | // version (1 byte) + flag (1 byte) + next id (16 bytes) 60 | mapDataSlabPrefixSize = versionAndFlagSize + SlabIDLength 61 | 62 | // version (1 byte) + flag (1 byte) 63 | mapRootDataSlabPrefixSize = versionAndFlagSize 64 | 65 | // maxDigestLevel is max levels of 64-bit digests allowed 66 | maxDigestLevel = 8 67 | 68 | // inlined map data slab prefix size: 69 | // tag number (2 bytes) + 70 | // 3-element array head (1 byte) + 71 | // extra data ref index (2 bytes) [0, 255] + 72 | // value index head (1 byte) + 73 | // value index (8 bytes) 74 | inlinedMapDataSlabPrefixSize = inlinedTagNumSize + 75 | inlinedCBORArrayHeadSize + 76 | inlinedExtraDataIndexSize + 77 | inlinedCBORValueIDHeadSize + 78 | inlinedValueIDSize 79 | ) 80 | -------------------------------------------------------------------------------- /map_slab.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | type MapSlabHeader struct { 24 | slabID SlabID // id is used to retrieve slab from storage 25 | size uint32 // size is used to split and merge; leaf: size of all element; internal: size of all headers 26 | firstKey Digest // firstKey (first hashed key) is used to lookup value 27 | } 28 | 29 | type MapSlab interface { 30 | Slab 31 | 32 | getElementAndNextKey( 33 | storage SlabStorage, 34 | digester Digester, 35 | level uint, 36 | hkey Digest, 37 | comparator ValueComparator, 38 | key Value, 39 | ) (MapKey, MapValue, MapKey, error) 40 | 41 | Get( 42 | storage SlabStorage, 43 | digester Digester, 44 | level uint, 45 | hkey Digest, 46 | comparator ValueComparator, 47 | key Value, 48 | ) (MapKey, MapValue, error) 49 | 50 | Set( 51 | storage SlabStorage, 52 | b DigesterBuilder, 53 | digester Digester, 54 | level uint, 55 | hkey Digest, 56 | comparator ValueComparator, 57 | hip HashInputProvider, 58 | key Value, 59 | value Value, 60 | ) (MapKey, MapValue, error) 61 | 62 | Remove( 63 | storage SlabStorage, 64 | digester Digester, 65 | level uint, 66 | hkey Digest, 67 | comparator ValueComparator, 68 | key Value, 69 | ) (MapKey, MapValue, error) 70 | 71 | IsData() bool 72 | 73 | IsFull() bool 74 | IsUnderflow() (uint32, bool) 75 | CanLendToLeft(size uint32) bool 76 | CanLendToRight(size uint32) bool 77 | 78 | SetSlabID(SlabID) 79 | 80 | Header() MapSlabHeader 81 | 82 | ExtraData() *MapExtraData 83 | RemoveExtraData() *MapExtraData 84 | SetExtraData(*MapExtraData) 85 | 86 | PopIterate(SlabStorage, MapPopIterationFunc) error 87 | 88 | Inlined() bool 89 | Inlinable(maxInlineSize uint64) bool 90 | Inline(SlabStorage) error 91 | Uninline(SlabStorage) error 92 | } 93 | 94 | func getMapSlab(storage SlabStorage, id SlabID) (MapSlab, error) { 95 | slab, found, err := storage.Retrieve(id) 96 | if err != nil { 97 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 98 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 99 | } 100 | if !found { 101 | return nil, NewSlabNotFoundErrorf(id, "map slab not found") 102 | } 103 | mapSlab, ok := slab.(MapSlab) 104 | if !ok { 105 | return nil, NewSlabDataErrorf("slab %s isn't MapSlab", id) 106 | } 107 | return mapSlab, nil 108 | } 109 | 110 | func firstMapDataSlab(storage SlabStorage, slab MapSlab) (*MapDataSlab, error) { 111 | switch slab := slab.(type) { 112 | case *MapDataSlab: 113 | return slab, nil 114 | 115 | case *MapMetaDataSlab: 116 | firstChildID := slab.childrenHeaders[0].slabID 117 | firstChild, err := getMapSlab(storage, firstChildID) 118 | if err != nil { 119 | // Don't need to wrap error as external error because err is already categorized by getMapSlab(). 120 | return nil, err 121 | } 122 | // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). 123 | return firstMapDataSlab(storage, firstChild) 124 | 125 | default: 126 | return nil, NewUnreachableError() 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /map_slab_stats.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | type MapStats struct { 24 | Levels uint64 25 | ElementCount uint64 26 | MetaDataSlabCount uint64 27 | DataSlabCount uint64 28 | CollisionDataSlabCount uint64 29 | StorableSlabCount uint64 30 | } 31 | 32 | func (s *MapStats) SlabCount() uint64 { 33 | return s.DataSlabCount + s.MetaDataSlabCount + s.CollisionDataSlabCount + s.StorableSlabCount 34 | } 35 | 36 | // GetMapStats returns stats about the map slabs. 37 | func GetMapStats(m *OrderedMap) (MapStats, error) { 38 | level := uint64(0) 39 | metaDataSlabCount := uint64(0) 40 | dataSlabCount := uint64(0) 41 | collisionDataSlabCount := uint64(0) 42 | storableDataSlabCount := uint64(0) 43 | 44 | nextLevelIDs := []SlabID{m.SlabID()} 45 | 46 | for len(nextLevelIDs) > 0 { 47 | 48 | ids := nextLevelIDs 49 | 50 | nextLevelIDs = []SlabID(nil) 51 | 52 | for _, id := range ids { 53 | 54 | slab, err := getMapSlab(m.Storage, id) 55 | if err != nil { 56 | // Don't need to wrap error as external error because err is already categorized by getMapSlab(). 57 | return MapStats{}, err 58 | } 59 | 60 | switch slab := slab.(type) { 61 | case *MapDataSlab: 62 | dataSlabCount++ 63 | 64 | elementGroups := []elements{slab.elements} 65 | 66 | for len(elementGroups) > 0 { 67 | 68 | var nestedElementGroups []elements 69 | 70 | for _, group := range elementGroups { 71 | for i := 0; i < int(group.Count()); i++ { 72 | elem, err := group.Element(i) 73 | if err != nil { 74 | // Don't need to wrap error as external error because err is already categorized by elements.Element(). 75 | return MapStats{}, err 76 | } 77 | 78 | switch e := elem.(type) { 79 | case elementGroup: 80 | nestedGroup := e 81 | 82 | if !nestedGroup.Inline() { 83 | collisionDataSlabCount++ 84 | } 85 | 86 | nested, err := nestedGroup.Elements(m.Storage) 87 | if err != nil { 88 | // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). 89 | return MapStats{}, err 90 | } 91 | 92 | nestedElementGroups = append(nestedElementGroups, nested) 93 | 94 | case *singleElement: 95 | if _, ok := e.key.(SlabIDStorable); ok { 96 | storableDataSlabCount++ 97 | } 98 | if _, ok := e.value.(SlabIDStorable); ok { 99 | storableDataSlabCount++ 100 | } 101 | // This handles use case of inlined array or map value containing SlabID 102 | ids := getSlabIDFromStorable(e.value, nil) 103 | storableDataSlabCount += uint64(len(ids)) 104 | } 105 | } 106 | } 107 | 108 | elementGroups = nestedElementGroups 109 | } 110 | 111 | case *MapMetaDataSlab: 112 | metaDataSlabCount++ 113 | 114 | for _, storable := range slab.ChildStorables() { 115 | id, ok := storable.(SlabIDStorable) 116 | if !ok { 117 | return MapStats{}, NewFatalError(fmt.Errorf("metadata slab's child storables are not of type SlabIDStorable")) 118 | } 119 | nextLevelIDs = append(nextLevelIDs, SlabID(id)) 120 | } 121 | } 122 | } 123 | 124 | level++ 125 | } 126 | 127 | return MapStats{ 128 | Levels: level, 129 | ElementCount: m.Count(), 130 | MetaDataSlabCount: metaDataSlabCount, 131 | DataSlabCount: dataSlabCount, 132 | CollisionDataSlabCount: collisionDataSlabCount, 133 | StorableSlabCount: storableDataSlabCount, 134 | }, nil 135 | } 136 | -------------------------------------------------------------------------------- /mapcollision_bench_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree_test 20 | 21 | import ( 22 | "encoding/binary" 23 | "fmt" 24 | "testing" 25 | 26 | "github.com/stretchr/testify/require" 27 | "github.com/zeebo/blake3" 28 | 29 | "github.com/onflow/atree" 30 | "github.com/onflow/atree/test_utils" 31 | ) 32 | 33 | type collisionDigesterBuilder struct { 34 | digest uint64 35 | collisionCount uint32 36 | maxCollisionCount uint32 37 | } 38 | 39 | var _ atree.DigesterBuilder = &collisionDigesterBuilder{} 40 | 41 | func NewCollisionDigesterBuilder(maxCollisionLimitPerDigest uint32) atree.DigesterBuilder { 42 | return &collisionDigesterBuilder{ 43 | maxCollisionCount: maxCollisionLimitPerDigest + 1, 44 | } 45 | } 46 | 47 | func (db *collisionDigesterBuilder) Digest(hip atree.HashInputProvider, value atree.Value) (atree.Digester, error) { 48 | 49 | if db.collisionCount < db.maxCollisionCount { 50 | db.collisionCount++ 51 | } else { 52 | db.digest++ 53 | db.collisionCount = 0 54 | } 55 | firstLevelHash := db.digest 56 | 57 | var scratch [32]byte 58 | msg, err := hip(value, scratch[:]) 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | return &collisionDigester{ 64 | firstLevelHash: firstLevelHash, 65 | msg: msg, 66 | }, nil 67 | } 68 | 69 | func (db *collisionDigesterBuilder) SetSeed(uint64, uint64) { 70 | } 71 | 72 | type collisionDigester struct { 73 | firstLevelHash uint64 74 | blake3Hash [4]uint64 75 | msg []byte 76 | } 77 | 78 | var _ atree.Digester = &collisionDigester{} 79 | 80 | func (d *collisionDigester) Digest(level uint) (atree.Digest, error) { 81 | if level >= d.Levels() { 82 | return atree.Digest(0), fmt.Errorf("invalid digest level %d", level) 83 | } 84 | 85 | switch level { 86 | case 0: 87 | return atree.Digest(d.firstLevelHash), nil 88 | default: 89 | if d.blake3Hash == emptyBlake3Hash { 90 | sum := blake3.Sum256(d.msg) 91 | d.blake3Hash[0] = binary.BigEndian.Uint64(sum[:]) 92 | d.blake3Hash[1] = binary.BigEndian.Uint64(sum[8:]) 93 | d.blake3Hash[2] = binary.BigEndian.Uint64(sum[16:]) 94 | d.blake3Hash[3] = binary.BigEndian.Uint64(sum[24:]) 95 | } 96 | return atree.Digest(d.blake3Hash[level-1]), nil 97 | } 98 | } 99 | 100 | func (d *collisionDigester) DigestPrefix(uint) ([]atree.Digest, error) { 101 | return nil, nil 102 | } 103 | 104 | func (d *collisionDigester) Levels() uint { 105 | return 4 106 | } 107 | 108 | func (d *collisionDigester) Reset() { 109 | } 110 | 111 | var ( 112 | emptyBlake3Hash [4]uint64 113 | ) 114 | 115 | func BenchmarkCollisionPerDigest(b *testing.B) { 116 | 117 | savedMaxCollisionLimitPerDigest := atree.MaxCollisionLimitPerDigest 118 | defer func() { 119 | atree.MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest 120 | }() 121 | 122 | const mapCount = 1_000_000 123 | 124 | collisionPerDigests := []uint32{0, 10, 255, 500, 1_000, 2_000, 5_000, 10_000} 125 | 126 | for _, collisionPerDigest := range collisionPerDigests { 127 | 128 | name := fmt.Sprintf("%d elements %d collision per digest", mapCount, collisionPerDigest) 129 | 130 | b.Run(name, func(b *testing.B) { 131 | 132 | atree.MaxCollisionLimitPerDigest = collisionPerDigest 133 | 134 | digesterBuilder := NewCollisionDigesterBuilder(collisionPerDigest) 135 | keyValues := make(map[atree.Value]atree.Value, mapCount) 136 | for i := range mapCount { 137 | k := test_utils.Uint64Value(i) 138 | v := test_utils.Uint64Value(i) 139 | keyValues[k] = v 140 | } 141 | 142 | typeInfo := test_utils.NewSimpleTypeInfo(42) 143 | address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} 144 | storage := newTestPersistentStorage(b) 145 | 146 | m, err := atree.NewMap(storage, address, digesterBuilder, typeInfo) 147 | require.NoError(b, err) 148 | 149 | b.StartTimer() 150 | 151 | for range b.N { 152 | for k, v := range keyValues { 153 | _, _ = m.Set(test_utils.CompareValue, test_utils.GetHashInput, k, v) 154 | } 155 | } 156 | }) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /settings.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | // Slab invariants: 24 | // - each element can't take up more than half of slab size (including encoding overhead and digest) 25 | // - data slab must have at least 2 elements when slab size > maxThreshold 26 | 27 | const ( 28 | defaultSlabSize = uint64(1024) 29 | minSlabSize = uint64(256) 30 | minElementCountInSlab = 2 31 | ) 32 | 33 | var ( 34 | targetThreshold uint64 35 | minThreshold uint64 36 | maxThreshold uint64 37 | maxInlineArrayElementSize uint64 38 | maxInlineMapElementSize uint64 39 | maxInlineMapKeySize uint64 40 | ) 41 | 42 | func init() { 43 | SetThreshold(defaultSlabSize) 44 | } 45 | 46 | func SetThreshold(threshold uint64) (uint64, uint64, uint64, uint64) { 47 | if threshold < minSlabSize { 48 | panic(fmt.Sprintf("Slab size %d is smaller than minSlabSize %d", threshold, minSlabSize)) 49 | } 50 | 51 | targetThreshold = threshold 52 | minThreshold = targetThreshold / 2 53 | maxThreshold = uint64(float64(targetThreshold) * 1.5) 54 | 55 | // Total slab size available for array elements, excluding slab encoding overhead 56 | availableArrayElementsSize := targetThreshold - arrayDataSlabPrefixSize 57 | maxInlineArrayElementSize = availableArrayElementsSize / minElementCountInSlab 58 | 59 | // Total slab size available for map elements, excluding slab encoding overhead 60 | availableMapElementsSize := targetThreshold - mapDataSlabPrefixSize - hkeyElementsPrefixSize 61 | 62 | // Total encoding overhead for one map element (key+value) 63 | mapElementOverheadSize := uint64(digestSize) 64 | 65 | // Max inline size for a map's element 66 | maxInlineMapElementSize = availableMapElementsSize/minElementCountInSlab - mapElementOverheadSize 67 | 68 | // Max inline size for a map's key, excluding element overhead 69 | maxInlineMapKeySize = (maxInlineMapElementSize - singleElementPrefixSize) / 2 70 | 71 | return minThreshold, maxThreshold, maxInlineArrayElementSize, maxInlineMapKeySize 72 | } 73 | 74 | func MaxInlineArrayElementSize() uint64 { 75 | return maxInlineArrayElementSize 76 | } 77 | 78 | func MaxInlineMapElementSize() uint64 { 79 | return maxInlineMapElementSize 80 | } 81 | 82 | func MaxInlineMapKeySize() uint64 { 83 | return maxInlineMapKeySize 84 | } 85 | 86 | func maxInlineMapValueSize(keySize uint64) uint64 { 87 | return maxInlineMapElementSize - keySize - singleElementPrefixSize 88 | } 89 | 90 | func targetSlabSize() uint64 { 91 | return targetThreshold 92 | } 93 | -------------------------------------------------------------------------------- /slab.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | type Slab interface { 24 | Storable 25 | fmt.Stringer 26 | 27 | SlabID() SlabID 28 | Split(SlabStorage) (Slab, Slab, error) 29 | Merge(Slab) error 30 | // LendToRight rebalances slabs by moving elements from left to right 31 | LendToRight(Slab) error 32 | // BorrowFromRight rebalances slabs by moving elements from right to left 33 | BorrowFromRight(Slab) error 34 | } 35 | 36 | func IsRootOfAnObject(slabData []byte) (bool, error) { 37 | if len(slabData) < versionAndFlagSize { 38 | return false, NewDecodingErrorf("data is too short") 39 | } 40 | 41 | h, err := newHeadFromData(slabData[:versionAndFlagSize]) 42 | if err != nil { 43 | return false, NewDecodingError(err) 44 | } 45 | 46 | return h.isRoot(), nil 47 | } 48 | 49 | func HasPointers(slabData []byte) (bool, error) { 50 | if len(slabData) < versionAndFlagSize { 51 | return false, NewDecodingErrorf("data is too short") 52 | } 53 | 54 | h, err := newHeadFromData(slabData[:versionAndFlagSize]) 55 | if err != nil { 56 | return false, NewDecodingError(err) 57 | } 58 | 59 | return h.hasPointers(), nil 60 | } 61 | 62 | func HasSizeLimit(slabData []byte) (bool, error) { 63 | if len(slabData) < versionAndFlagSize { 64 | return false, NewDecodingErrorf("data is too short") 65 | } 66 | 67 | h, err := newHeadFromData(slabData[:versionAndFlagSize]) 68 | if err != nil { 69 | return false, NewDecodingError(err) 70 | } 71 | 72 | return h.hasSizeLimit(), nil 73 | } 74 | -------------------------------------------------------------------------------- /slab_id.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "bytes" 23 | "encoding/binary" 24 | "fmt" 25 | ) 26 | 27 | const ( 28 | SlabAddressLength = 8 29 | SlabIndexLength = 8 30 | SlabIDLength = SlabAddressLength + SlabIndexLength 31 | ) 32 | 33 | // WARNING: Any changes to SlabID or its components (Address and SlabIndex) 34 | // require updates to ValueID definition and functions. 35 | type ( 36 | Address [SlabAddressLength]byte 37 | SlabIndex [SlabIndexLength]byte 38 | 39 | // SlabID identifies slab in storage. 40 | // SlabID should only be used to retrieve, 41 | // store, and remove slab in storage. 42 | SlabID struct { 43 | address Address 44 | index SlabIndex 45 | } 46 | ) 47 | 48 | var ( 49 | AddressUndefined = Address{} 50 | SlabIndexUndefined = SlabIndex{} 51 | SlabIDUndefined = SlabID{} 52 | ) 53 | 54 | // SlabIndex 55 | 56 | // Next returns new SlabIndex with index+1 value. 57 | // The caller is responsible for preventing overflow 58 | // by checking if the index value is valid before 59 | // calling this function. 60 | func (index SlabIndex) Next() SlabIndex { 61 | i := binary.BigEndian.Uint64(index[:]) 62 | 63 | var next SlabIndex 64 | binary.BigEndian.PutUint64(next[:], i+1) 65 | 66 | return next 67 | } 68 | 69 | func SlabIndexToLedgerKey(ind SlabIndex) []byte { 70 | return []byte(LedgerBaseStorageSlabPrefix + string(ind[:])) 71 | } 72 | 73 | // SlabID 74 | 75 | func NewSlabID(address Address, index SlabIndex) SlabID { 76 | return SlabID{address, index} 77 | } 78 | 79 | func NewSlabIDFromRawBytes(b []byte) (SlabID, error) { 80 | if len(b) < SlabIDLength { 81 | return SlabID{}, NewSlabIDErrorf("incorrect slab ID buffer length %d", len(b)) 82 | } 83 | 84 | var address Address 85 | copy(address[:], b) 86 | 87 | var index SlabIndex 88 | copy(index[:], b[SlabAddressLength:]) 89 | 90 | return SlabID{address, index}, nil 91 | } 92 | 93 | func (id SlabID) ToRawBytes(b []byte) (int, error) { 94 | if len(b) < SlabIDLength { 95 | return 0, NewSlabIDErrorf("incorrect slab ID buffer length %d", len(b)) 96 | } 97 | copy(b, id.address[:]) 98 | copy(b[SlabAddressLength:], id.index[:]) 99 | return SlabIDLength, nil 100 | } 101 | 102 | func (id SlabID) String() string { 103 | return fmt.Sprintf( 104 | "0x%x.%d", 105 | binary.BigEndian.Uint64(id.address[:]), 106 | binary.BigEndian.Uint64(id.index[:]), 107 | ) 108 | } 109 | 110 | func (id SlabID) AddressAsUint64() uint64 { 111 | return binary.BigEndian.Uint64(id.address[:]) 112 | } 113 | 114 | // Address returns the address of SlabID. 115 | func (id SlabID) Address() Address { 116 | return id.address 117 | } 118 | 119 | func (id SlabID) IndexAsUint64() uint64 { 120 | return binary.BigEndian.Uint64(id.index[:]) 121 | } 122 | 123 | func (id SlabID) HasTempAddress() bool { 124 | return id.address == AddressUndefined 125 | } 126 | 127 | func (id SlabID) Index() SlabIndex { 128 | return id.index 129 | } 130 | 131 | func (id SlabID) Valid() error { 132 | if id == SlabIDUndefined { 133 | return NewSlabIDError("undefined slab ID") 134 | } 135 | if id.index == SlabIndexUndefined { 136 | return NewSlabIDError("undefined slab index") 137 | } 138 | return nil 139 | } 140 | 141 | func (id SlabID) Compare(other SlabID) int { 142 | result := bytes.Compare(id.address[:], other.address[:]) 143 | if result == 0 { 144 | return bytes.Compare(id.index[:], other.index[:]) 145 | } 146 | return result 147 | } 148 | -------------------------------------------------------------------------------- /slab_id_storable.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/fxamacker/cbor/v2" 25 | ) 26 | 27 | type SlabIDStorable SlabID 28 | 29 | var _ ContainerStorable = SlabIDStorable{} 30 | 31 | func (v SlabIDStorable) HasPointer() bool { 32 | return true 33 | } 34 | 35 | func (v SlabIDStorable) ChildStorables() []Storable { 36 | return nil 37 | } 38 | 39 | func (v SlabIDStorable) StoredValue(storage SlabStorage) (Value, error) { 40 | id := SlabID(v) 41 | if err := id.Valid(); err != nil { 42 | // Don't need to wrap error as external error because err is already categorized by SlabID.Valid(). 43 | return nil, err 44 | } 45 | 46 | slab, found, err := storage.Retrieve(id) 47 | if err != nil { 48 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 49 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) 50 | } 51 | if !found { 52 | return nil, NewSlabNotFoundErrorf(id, "slab not found for stored value") 53 | } 54 | value, err := slab.StoredValue(storage) 55 | if err != nil { 56 | // Wrap err as external error (if needed) because err is returned by Storable interface. 57 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") 58 | } 59 | return value, nil 60 | } 61 | 62 | // Encode encodes SlabIDStorable as 63 | // 64 | // cbor.Tag{ 65 | // Number: cborTagSlabID, 66 | // Content: byte(v), 67 | // } 68 | func (v SlabIDStorable) Encode(enc *Encoder) error { 69 | err := enc.CBOR.EncodeRawBytes([]byte{ 70 | // tag number 71 | 0xd8, CBORTagSlabID, 72 | }) 73 | if err != nil { 74 | return NewEncodingError(err) 75 | } 76 | 77 | copy(enc.Scratch[:], v.address[:]) 78 | copy(enc.Scratch[8:], v.index[:]) 79 | 80 | err = enc.CBOR.EncodeBytes(enc.Scratch[:SlabIDLength]) 81 | if err != nil { 82 | return NewEncodingError(err) 83 | } 84 | 85 | return nil 86 | } 87 | 88 | func (v SlabIDStorable) ByteSize() uint32 { 89 | // tag number (2 bytes) + byte string header (1 byte) + slab id (16 bytes) 90 | return 2 + 1 + SlabIDLength 91 | } 92 | 93 | func (v SlabIDStorable) String() string { 94 | return fmt.Sprintf("SlabIDStorable(%d)", v) 95 | } 96 | 97 | func DecodeSlabIDStorable(dec *cbor.StreamDecoder) (Storable, error) { 98 | b, err := dec.DecodeBytes() 99 | if err != nil { 100 | return nil, NewDecodingError(err) 101 | } 102 | 103 | id, err := NewSlabIDFromRawBytes(b) 104 | if err != nil { 105 | // Don't need to wrap error as external error because err is already categorized by NewSlabIDFromRawBytes(). 106 | return nil, err 107 | } 108 | 109 | return SlabIDStorable(id), nil 110 | } 111 | -------------------------------------------------------------------------------- /slab_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree_test 20 | 21 | import ( 22 | "testing" 23 | 24 | "github.com/stretchr/testify/require" 25 | 26 | "github.com/onflow/atree" 27 | ) 28 | 29 | func TestIsRootOfAnObject(t *testing.T) { 30 | // We just need first 2 bytes of slab data to test. 31 | testCases := []struct { 32 | name string 33 | isRoot bool 34 | data []byte 35 | }{ 36 | {name: "array data as root", isRoot: true, data: []byte{0x00, 0x80}}, 37 | {name: "array metadata as root", isRoot: true, data: []byte{0x00, 0x81}}, 38 | {name: "map data as root", isRoot: true, data: []byte{0x00, 0x88}}, 39 | {name: "map metadata as root", isRoot: true, data: []byte{0x00, 0x89}}, 40 | {name: "array data as non-root", isRoot: false, data: []byte{0x00, 0x00}}, 41 | {name: "array metadata as non-root", isRoot: false, data: []byte{0x00, 0x01}}, 42 | {name: "map data as non-root", isRoot: false, data: []byte{0x00, 0x08}}, 43 | {name: "map metadata as non-root", isRoot: false, data: []byte{0x00, 0x09}}, 44 | } 45 | 46 | for _, tc := range testCases { 47 | t.Run(tc.name, func(t *testing.T) { 48 | isRoot, err := atree.IsRootOfAnObject(tc.data) 49 | require.NoError(t, err) 50 | require.Equal(t, tc.isRoot, isRoot) 51 | }) 52 | } 53 | 54 | t.Run("data too short", func(t *testing.T) { 55 | var fatalError *atree.FatalError 56 | var decodingError *atree.DecodingError 57 | var isRoot bool 58 | var err error 59 | 60 | isRoot, err = atree.IsRootOfAnObject(nil) 61 | require.False(t, isRoot) 62 | require.Equal(t, 1, errorCategorizationCount(err)) 63 | require.ErrorAs(t, err, &fatalError) 64 | require.ErrorAs(t, err, &decodingError) 65 | require.ErrorAs(t, fatalError, &decodingError) 66 | 67 | isRoot, err = atree.IsRootOfAnObject([]byte{}) 68 | require.False(t, isRoot) 69 | require.Equal(t, 1, errorCategorizationCount(err)) 70 | require.ErrorAs(t, err, &fatalError) 71 | require.ErrorAs(t, err, &decodingError) 72 | require.ErrorAs(t, fatalError, &decodingError) 73 | 74 | isRoot, err = atree.IsRootOfAnObject([]byte{0x00}) 75 | require.False(t, isRoot) 76 | require.Equal(t, 1, errorCategorizationCount(err)) 77 | require.ErrorAs(t, err, &fatalError) 78 | require.ErrorAs(t, err, &decodingError) 79 | require.ErrorAs(t, fatalError, &decodingError) 80 | }) 81 | } 82 | 83 | func TestHasPointers(t *testing.T) { 84 | // We just need first 2 bytes of slab data to test. 85 | testCases := []struct { 86 | name string 87 | hasPointers bool 88 | data []byte 89 | }{ 90 | {name: "array data has pointer", hasPointers: true, data: []byte{0x00, 0x40}}, 91 | {name: "array metadata has pointer", hasPointers: true, data: []byte{0x00, 0x41}}, 92 | {name: "map data has pointer", hasPointers: true, data: []byte{0x00, 0x48}}, 93 | {name: "map metadata has pointer", hasPointers: true, data: []byte{0x00, 0x49}}, 94 | {name: "array data no pointer", hasPointers: false, data: []byte{0x00, 0x00}}, 95 | {name: "array metadata no pointer", hasPointers: false, data: []byte{0x00, 0x01}}, 96 | {name: "map data no pointer", hasPointers: false, data: []byte{0x00, 0x08}}, 97 | {name: "map metadata no pointer", hasPointers: false, data: []byte{0x00, 0x09}}, 98 | } 99 | 100 | for _, tc := range testCases { 101 | t.Run(tc.name, func(t *testing.T) { 102 | hasPointers, err := atree.HasPointers(tc.data) 103 | require.NoError(t, err) 104 | require.Equal(t, tc.hasPointers, hasPointers) 105 | }) 106 | } 107 | 108 | t.Run("data too short", func(t *testing.T) { 109 | var fatalError *atree.FatalError 110 | var decodingError *atree.DecodingError 111 | var hasPointers bool 112 | var err error 113 | 114 | hasPointers, err = atree.HasPointers(nil) 115 | require.False(t, hasPointers) 116 | require.Equal(t, 1, errorCategorizationCount(err)) 117 | require.ErrorAs(t, err, &fatalError) 118 | require.ErrorAs(t, err, &decodingError) 119 | require.ErrorAs(t, fatalError, &decodingError) 120 | 121 | hasPointers, err = atree.HasPointers([]byte{}) 122 | require.False(t, hasPointers) 123 | require.Equal(t, 1, errorCategorizationCount(err)) 124 | require.ErrorAs(t, err, &fatalError) 125 | require.ErrorAs(t, err, &decodingError) 126 | require.ErrorAs(t, fatalError, &decodingError) 127 | 128 | hasPointers, err = atree.HasPointers([]byte{0x00}) 129 | require.False(t, hasPointers) 130 | require.Equal(t, 1, errorCategorizationCount(err)) 131 | require.ErrorAs(t, err, &fatalError) 132 | require.ErrorAs(t, err, &decodingError) 133 | require.ErrorAs(t, fatalError, &decodingError) 134 | }) 135 | } 136 | 137 | func TestHasSizeLimit(t *testing.T) { 138 | // We just need first 2 bytes of slab data to test. 139 | testCases := []struct { 140 | name string 141 | hasSizeLimit bool 142 | data []byte 143 | }{ 144 | {name: "array data without size limit", hasSizeLimit: false, data: []byte{0x00, 0x20}}, 145 | {name: "array metadata without size limit", hasSizeLimit: false, data: []byte{0x00, 0x21}}, 146 | {name: "map data without size limit", hasSizeLimit: false, data: []byte{0x00, 0x28}}, 147 | {name: "map metadata without size limit", hasSizeLimit: false, data: []byte{0x00, 0x29}}, 148 | {name: "array data with size limit", hasSizeLimit: true, data: []byte{0x00, 0x00}}, 149 | {name: "array metadata with size limit", hasSizeLimit: true, data: []byte{0x00, 0x01}}, 150 | {name: "map data with size limit", hasSizeLimit: true, data: []byte{0x00, 0x08}}, 151 | {name: "map metadata with size limit", hasSizeLimit: true, data: []byte{0x00, 0x09}}, 152 | } 153 | 154 | for _, tc := range testCases { 155 | t.Run(tc.name, func(t *testing.T) { 156 | hasSizeLimit, err := atree.HasSizeLimit(tc.data) 157 | require.NoError(t, err) 158 | require.Equal(t, tc.hasSizeLimit, hasSizeLimit) 159 | }) 160 | } 161 | 162 | t.Run("data too short", func(t *testing.T) { 163 | var fatalError *atree.FatalError 164 | var decodingError *atree.DecodingError 165 | var hasSizeLimit bool 166 | var err error 167 | 168 | hasSizeLimit, err = atree.HasSizeLimit(nil) 169 | require.False(t, hasSizeLimit) 170 | require.Equal(t, 1, errorCategorizationCount(err)) 171 | require.ErrorAs(t, err, &fatalError) 172 | require.ErrorAs(t, err, &decodingError) 173 | require.ErrorAs(t, fatalError, &decodingError) 174 | 175 | hasSizeLimit, err = atree.HasSizeLimit([]byte{}) 176 | require.False(t, hasSizeLimit) 177 | require.Equal(t, 1, errorCategorizationCount(err)) 178 | require.ErrorAs(t, err, &fatalError) 179 | require.ErrorAs(t, err, &decodingError) 180 | require.ErrorAs(t, fatalError, &decodingError) 181 | 182 | hasSizeLimit, err = atree.HasSizeLimit([]byte{0x00}) 183 | require.False(t, hasSizeLimit) 184 | require.Equal(t, 1, errorCategorizationCount(err)) 185 | require.ErrorAs(t, err, &fatalError) 186 | require.ErrorAs(t, err, &decodingError) 187 | require.ErrorAs(t, fatalError, &decodingError) 188 | }) 189 | } 190 | -------------------------------------------------------------------------------- /slice_utils.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "slices" 22 | 23 | // split splits s into two slices with left slice of leftCount length and right slice of remaining elements. 24 | // Returned left is resliced s, and returned right is new slice. 25 | func split[S ~[]E, E any](s S, leftCount int) (left S, right S) { 26 | _ = s[leftCount:] // bounds check 27 | 28 | right = slices.Clone(s[leftCount:]) 29 | left = slices.Delete(s, leftCount, len(s)) 30 | return left, right 31 | } 32 | 33 | // merge returnes concatenated left and right slices. 34 | // If left slice has sufficient capacity, returned slice is resliced to append elements from right. 35 | // Right slice is cleared. 36 | func merge[S ~[]E, E any](left, right S) S { 37 | left = append(left, right...) 38 | clear(right) 39 | return left 40 | } 41 | 42 | // lendToRight moves elements from tail of left slice to head of right slice. 43 | func lendToRight[S ~[]E, E any](left, right S, count int) (S, S) { 44 | leftIndex := len(left) - count 45 | 46 | _ = left[leftIndex:] // bounds check 47 | 48 | // Prepend elements from the tail of left slice to the head of right slice. 49 | right = slices.Insert( 50 | right, 51 | 0, 52 | left[leftIndex:]..., 53 | ) 54 | 55 | // Remove moved elements from left 56 | left = slices.Delete(left, leftIndex, len(left)) 57 | 58 | return left, right 59 | } 60 | 61 | // borrowFromRight moves elements from head of right slice to tail of left slice. 62 | func borrowFromRight[S ~[]E, E any](left, right S, count int) (S, S) { 63 | _ = right[:count] // bounds check 64 | 65 | // Append moved elements to left 66 | left = append(left, right[:count]...) 67 | 68 | // Move remaining elements in the right slice to the front 69 | right = slices.Insert( 70 | right[:0], 71 | 0, 72 | right[count:]...) 73 | 74 | // Clear moved elements to prevent memory leak 75 | clear(right[len(right):cap(right)]) 76 | 77 | return left, right 78 | } 79 | -------------------------------------------------------------------------------- /storable.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | type Storable interface { 22 | Encode(*Encoder) error 23 | 24 | ByteSize() uint32 25 | 26 | StoredValue(storage SlabStorage) (Value, error) 27 | 28 | // ChildStorables only returns child storables in this storable 29 | // (not recursive). This function shouldn't load extra slabs. 30 | ChildStorables() []Storable 31 | } 32 | 33 | // ComparableStorable is an interface that supports comparison and cloning of Storable. 34 | // This is only used for compact keys. 35 | type ComparableStorable interface { 36 | Storable 37 | 38 | // Equal returns true if the given storable is equal to this storable. 39 | Equal(Storable) bool 40 | 41 | // Less returns true if the given storable is less than this storable. 42 | Less(Storable) bool 43 | 44 | // ID returns a unique identifier. 45 | ID() string 46 | 47 | Copy() Storable 48 | } 49 | 50 | // ContainerStorable is an interface that supports Storable containing other storables. 51 | type ContainerStorable interface { 52 | Storable 53 | 54 | // HasPointer returns true if any of its child storables is SlabIDStorable 55 | // (references to another slab). This function is used during encoding. 56 | HasPointer() bool 57 | } 58 | 59 | // WrapperStorable is an interface that supports storable wrapping another storable. 60 | type WrapperStorable interface { 61 | Storable 62 | 63 | // UnwrapAtreeStorable returns innermost wrapped Storable. 64 | UnwrapAtreeStorable() Storable 65 | 66 | // WrapAtreeStorable returns a new WrapperStorable with given storable as innermost wrapped storable. 67 | WrapAtreeStorable(Storable) Storable 68 | } 69 | 70 | func hasPointer(storable Storable) bool { 71 | if cs, ok := storable.(ContainerStorable); ok { 72 | return cs.HasPointer() 73 | } 74 | return false 75 | } 76 | 77 | func unwrapStorable(s Storable) Storable { 78 | switch s := s.(type) { 79 | case WrapperStorable: 80 | return s.UnwrapAtreeStorable() 81 | default: 82 | return s 83 | } 84 | } 85 | 86 | func getLoadedValue(storage SlabStorage, storable Storable) (Value, error) { 87 | switch storable := storable.(type) { 88 | case SlabIDStorable: 89 | slab := storage.RetrieveIfLoaded(SlabID(storable)) 90 | if slab == nil { 91 | // Skip because it references unloaded slab. 92 | return nil, nil 93 | } 94 | 95 | v, err := slab.StoredValue(storage) 96 | if err != nil { 97 | // Wrap err as external error (if needed) because err is returned by Storable interface. 98 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") 99 | } 100 | 101 | return v, nil 102 | 103 | case WrapperStorable: 104 | // Check if wrapped storable is SlabIDStorable. 105 | wrappedStorable := unwrapStorable(storable) 106 | 107 | if wrappedSlabIDStorable, isSlabIDStorable := wrappedStorable.(SlabIDStorable); isSlabIDStorable { 108 | slab := storage.RetrieveIfLoaded(SlabID(wrappedSlabIDStorable)) 109 | if slab == nil { 110 | // Skip because it references unloaded slab. 111 | return nil, nil 112 | } 113 | } 114 | 115 | v, err := storable.StoredValue(storage) 116 | if err != nil { 117 | // Wrap err as external error (if needed) because err is returned by Storable interface. 118 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") 119 | } 120 | 121 | return v, nil 122 | 123 | default: 124 | v, err := storable.StoredValue(storage) 125 | if err != nil { 126 | // Wrap err as external error (if needed) because err is returned by Storable interface. 127 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") 128 | } 129 | 130 | return v, nil 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /storable_slab.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | // StorableSlab allows storing storables (CBOR encoded data) directly in a slab. 24 | // Eventually we will only have a dictionary at the account storage root, 25 | // so this won't be needed, but during the refactor we have the need to store 26 | // other non-dictionary values (e.g. strings, integers, etc.) directly in accounts 27 | // (i.e. directly in slabs aka registers) 28 | type StorableSlab struct { 29 | slabID SlabID 30 | storable Storable 31 | } 32 | 33 | var _ Slab = &StorableSlab{} 34 | 35 | func NewStorableSlab(storage SlabStorage, address Address, storable Storable) (Storable, error) { 36 | id, err := storage.GenerateSlabID(address) 37 | if err != nil { 38 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 39 | return nil, wrapErrorfAsExternalErrorIfNeeded( 40 | err, 41 | fmt.Sprintf( 42 | "failed to generate slab ID for address 0x%x", 43 | address, 44 | ), 45 | ) 46 | } 47 | 48 | slab := &StorableSlab{ 49 | slabID: id, 50 | storable: storable, 51 | } 52 | 53 | err = storeSlab(storage, slab) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | return SlabIDStorable(id), nil 59 | } 60 | 61 | func (s *StorableSlab) String() string { 62 | return fmt.Sprintf("StorableSlab id:%s storable:%s", s.slabID, s.storable) 63 | } 64 | 65 | func (s *StorableSlab) ChildStorables() []Storable { 66 | return []Storable{s.storable} 67 | } 68 | 69 | func (s *StorableSlab) Encode(enc *Encoder) error { 70 | 71 | const version = 1 72 | 73 | h, err := newStorableSlabHead(version) 74 | if err != nil { 75 | return NewEncodingError(err) 76 | } 77 | 78 | h.setNoSizeLimit() 79 | 80 | if hasPointer(s.storable) { 81 | h.setHasPointers() 82 | } 83 | 84 | _, err = enc.Write(h[:]) 85 | if err != nil { 86 | return NewEncodingError(err) 87 | } 88 | 89 | err = s.storable.Encode(enc) 90 | if err != nil { 91 | // Wrap err as external error (if needed) because err is returned by Storable interface. 92 | return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") 93 | } 94 | 95 | if enc.hasInlinedExtraData() { 96 | return NewEncodingError(fmt.Errorf("failed to encode storable slab because storable contains inlined array/map")) 97 | } 98 | 99 | return nil 100 | } 101 | 102 | func (s *StorableSlab) ByteSize() uint32 { 103 | return versionAndFlagSize + s.storable.ByteSize() 104 | } 105 | 106 | func (s *StorableSlab) SlabID() SlabID { 107 | return s.slabID 108 | } 109 | 110 | func (s *StorableSlab) StoredValue(storage SlabStorage) (Value, error) { 111 | value, err := s.storable.StoredValue(storage) 112 | if err != nil { 113 | // Wrap err as external error (if needed) because err is returned by Storable interface. 114 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") 115 | } 116 | return value, nil 117 | } 118 | 119 | func (*StorableSlab) Split(_ SlabStorage) (Slab, Slab, error) { 120 | return nil, nil, NewNotApplicableError("StorableSlab", "Slab", "Split") 121 | } 122 | 123 | func (*StorableSlab) Merge(_ Slab) error { 124 | return NewNotApplicableError("StorableSlab", "Slab", "Merge") 125 | } 126 | 127 | func (*StorableSlab) LendToRight(_ Slab) error { 128 | return NewNotApplicableError("StorableSlab", "Slab", "LendToRight") 129 | } 130 | 131 | func (*StorableSlab) BorrowFromRight(_ Slab) error { 132 | return NewNotApplicableError("StorableSlab", "Slab", "BorrowFromRight") 133 | } 134 | -------------------------------------------------------------------------------- /storable_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree_test 20 | 21 | import ( 22 | "testing" 23 | 24 | "github.com/stretchr/testify/require" 25 | 26 | "github.com/onflow/atree" 27 | ) 28 | 29 | func TestIsCBORTagNumberRangeAvailable(t *testing.T) { 30 | minTagNum, maxTagNum := atree.ReservedCBORTagNumberRange() 31 | 32 | t.Run("error", func(t *testing.T) { 33 | _, err := atree.IsCBORTagNumberRangeAvailable(maxTagNum, minTagNum) 34 | var userError *atree.UserError 35 | require.ErrorAs(t, err, &userError) 36 | }) 37 | 38 | t.Run("identical", func(t *testing.T) { 39 | available, err := atree.IsCBORTagNumberRangeAvailable(minTagNum, maxTagNum) 40 | require.NoError(t, err) 41 | require.False(t, available) 42 | }) 43 | 44 | t.Run("subrange", func(t *testing.T) { 45 | available, err := atree.IsCBORTagNumberRangeAvailable(minTagNum, maxTagNum-1) 46 | require.NoError(t, err) 47 | require.False(t, available) 48 | 49 | available, err = atree.IsCBORTagNumberRangeAvailable(minTagNum+1, maxTagNum) 50 | require.NoError(t, err) 51 | require.False(t, available) 52 | }) 53 | 54 | t.Run("partial overlap", func(t *testing.T) { 55 | available, err := atree.IsCBORTagNumberRangeAvailable(minTagNum-1, maxTagNum-1) 56 | require.NoError(t, err) 57 | require.False(t, available) 58 | 59 | available, err = atree.IsCBORTagNumberRangeAvailable(minTagNum+1, maxTagNum+1) 60 | require.NoError(t, err) 61 | require.False(t, available) 62 | }) 63 | 64 | t.Run("non-overlap", func(t *testing.T) { 65 | available, err := atree.IsCBORTagNumberRangeAvailable(minTagNum-10, minTagNum-1) 66 | require.NoError(t, err) 67 | require.True(t, available) 68 | 69 | available, err = atree.IsCBORTagNumberRangeAvailable(minTagNum-1, minTagNum-1) 70 | require.NoError(t, err) 71 | require.True(t, available) 72 | 73 | available, err = atree.IsCBORTagNumberRangeAvailable(maxTagNum+1, maxTagNum+10) 74 | require.NoError(t, err) 75 | require.True(t, available) 76 | 77 | available, err = atree.IsCBORTagNumberRangeAvailable(maxTagNum+10, maxTagNum+10) 78 | require.NoError(t, err) 79 | require.True(t, available) 80 | }) 81 | } 82 | -------------------------------------------------------------------------------- /storage_health_check.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import "fmt" 22 | 23 | // CheckStorageHealth checks for the health of slab storage. 24 | // It traverses the slabs and checks these factors: 25 | // - All non-root slabs only has a single parent reference (no double referencing) 26 | // - Every child of a parent shares the same ownership (childSlabID.Address == parentSlabID.Address) 27 | // - The number of root slabs are equal to the expected number (skipped if expectedNumberOfRootSlabs is -1) 28 | // This should be used for testing purposes only, as it might be slow to process 29 | func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map[SlabID]struct{}, error) { 30 | parentOf := make(map[SlabID]SlabID) 31 | leaves := make([]SlabID, 0) 32 | 33 | slabIterator, err := storage.SlabIterator() 34 | if err != nil { 35 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 36 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create slab iterator") 37 | } 38 | 39 | slabs := map[SlabID]Slab{} 40 | 41 | for { 42 | id, slab := slabIterator() 43 | if id == SlabIDUndefined { 44 | break 45 | } 46 | 47 | if _, ok := slabs[id]; ok { 48 | return nil, NewFatalError(fmt.Errorf("duplicate slab %s", id)) 49 | } 50 | slabs[id] = slab 51 | 52 | atLeastOneExternalSlab := false 53 | childStorables := slab.ChildStorables() 54 | 55 | for len(childStorables) > 0 { 56 | 57 | var next []Storable 58 | 59 | for _, s := range childStorables { 60 | 61 | if sids, ok := s.(SlabIDStorable); ok { 62 | sid := SlabID(sids) 63 | if _, found := parentOf[sid]; found { 64 | return nil, NewFatalError(fmt.Errorf("two parents are captured for the slab %s", sid)) 65 | } 66 | parentOf[sid] = id 67 | atLeastOneExternalSlab = true 68 | } 69 | 70 | // This handles inlined slab because inlined slab is a child storable (s) and 71 | // we traverse s.ChildStorables() for its inlined elements. 72 | next = append(next, s.ChildStorables()...) 73 | } 74 | 75 | childStorables = next 76 | } 77 | 78 | if !atLeastOneExternalSlab { 79 | leaves = append(leaves, id) 80 | } 81 | } 82 | 83 | rootsMap := make(map[SlabID]struct{}) 84 | visited := make(map[SlabID]struct{}) 85 | var id SlabID 86 | for _, leaf := range leaves { 87 | id = leaf 88 | if _, ok := visited[id]; ok { 89 | return nil, NewFatalError(fmt.Errorf("at least two references found to the leaf slab %s", id)) 90 | } 91 | visited[id] = struct{}{} 92 | for { 93 | parentID, found := parentOf[id] 94 | if !found { 95 | // we reach the root 96 | rootsMap[id] = struct{}{} 97 | break 98 | } 99 | visited[parentID] = struct{}{} 100 | 101 | childSlab, ok, err := storage.Retrieve(id) 102 | if !ok { 103 | return nil, NewSlabNotFoundErrorf(id, "failed to get child slab") 104 | } 105 | if err != nil { 106 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 107 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve child slab %s", id)) 108 | } 109 | 110 | parentSlab, ok, err := storage.Retrieve(parentID) 111 | if !ok { 112 | return nil, NewSlabNotFoundErrorf(id, "failed to get parent slab") 113 | } 114 | if err != nil { 115 | // Wrap err as external error (if needed) because err is returned by SlabStorage interface. 116 | return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve parent slab %s", parentID)) 117 | } 118 | 119 | childOwner := childSlab.SlabID().address 120 | parentOwner := parentSlab.SlabID().address 121 | 122 | if childOwner != parentOwner { 123 | return nil, NewFatalError( 124 | fmt.Errorf( 125 | "parent and child are not owned by the same account: child.owner %s, parent.owner %s", 126 | childOwner, 127 | parentOwner, 128 | )) 129 | } 130 | id = parentID 131 | } 132 | } 133 | 134 | if len(visited) != len(slabs) { 135 | 136 | var unreachableID SlabID 137 | var unreachableSlab Slab 138 | 139 | for id, slab := range slabs { 140 | if _, ok := visited[id]; !ok { 141 | unreachableID = id 142 | unreachableSlab = slab 143 | break 144 | } 145 | } 146 | 147 | return nil, NewFatalError( 148 | fmt.Errorf( 149 | "slab was not reachable from leaves: %s: %s", 150 | unreachableID, 151 | unreachableSlab, 152 | )) 153 | } 154 | 155 | if (expectedNumberOfRootSlabs >= 0) && (len(rootsMap) != expectedNumberOfRootSlabs) { 156 | return nil, NewFatalError( 157 | fmt.Errorf( 158 | "number of root slabs doesn't match: expected %d, got %d", 159 | expectedNumberOfRootSlabs, 160 | len(rootsMap), 161 | )) 162 | } 163 | 164 | return rootsMap, nil 165 | } 166 | -------------------------------------------------------------------------------- /test_utils/expected_value_utils.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package test_utils 20 | 21 | import ( 22 | "fmt" 23 | "reflect" 24 | 25 | "github.com/onflow/atree" 26 | ) 27 | 28 | // ExpectedArrayValue 29 | 30 | type ExpectedArrayValue []atree.Value 31 | 32 | var _ atree.Value = &ExpectedArrayValue{} 33 | 34 | func (v ExpectedArrayValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { 35 | panic(atree.NewUnreachableError()) 36 | } 37 | 38 | // ExpectedMapValue 39 | 40 | type ExpectedMapValue map[atree.Value]atree.Value 41 | 42 | var _ atree.Value = &ExpectedMapValue{} 43 | 44 | func (v ExpectedMapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { 45 | panic(atree.NewUnreachableError()) 46 | } 47 | 48 | // ExpectedWrapperValue 49 | 50 | type ExpectedWrapperValue struct { 51 | Value atree.Value 52 | } 53 | 54 | var _ atree.Value = &ExpectedWrapperValue{} 55 | 56 | func NewExpectedWrapperValue(value atree.Value) ExpectedWrapperValue { 57 | return ExpectedWrapperValue{value} 58 | } 59 | 60 | func (v ExpectedWrapperValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { 61 | panic(atree.NewUnreachableError()) 62 | } 63 | 64 | func ValueEqual(expected atree.Value, actual atree.Value) (bool, error) { 65 | switch expected := expected.(type) { 66 | case ExpectedArrayValue: 67 | actual, ok := actual.(*atree.Array) 68 | if !ok { 69 | return false, nil 70 | } 71 | return ArrayEqual(expected, actual) 72 | 73 | case *atree.Array: 74 | return false, fmt.Errorf("ValueEqual failed: expected value shouldn't be *atree.Array") 75 | 76 | case ExpectedMapValue: 77 | actual, ok := actual.(*atree.OrderedMap) 78 | if !ok { 79 | return false, nil 80 | } 81 | return MapEqual(expected, actual) 82 | 83 | case *atree.OrderedMap: 84 | return false, fmt.Errorf("ValueEqual failed: expected value shouldn't be *atree.OrderedMap") 85 | 86 | case ExpectedWrapperValue: 87 | actual, ok := actual.(SomeValue) 88 | if !ok { 89 | return false, nil 90 | } 91 | return ValueEqual(expected.Value, actual.Value) 92 | 93 | case SomeValue: 94 | return false, fmt.Errorf("ValueEqual failed: expected value shouldn't be SomeValue") 95 | 96 | default: 97 | return reflect.DeepEqual(expected, actual), nil 98 | } 99 | } 100 | 101 | func ArrayEqual(expected ExpectedArrayValue, actual *atree.Array) (bool, error) { 102 | if uint64(len(expected)) != actual.Count() { 103 | return false, nil 104 | } 105 | 106 | iterator, err := actual.ReadOnlyIterator() 107 | if err != nil { 108 | return false, err 109 | } 110 | 111 | i := 0 112 | for { 113 | actualValue, err := iterator.Next() 114 | if err != nil { 115 | return false, err 116 | } 117 | 118 | if actualValue == nil { 119 | break 120 | } 121 | 122 | if i >= len(expected) { 123 | return false, nil 124 | } 125 | 126 | equal, err := ValueEqual(expected[i], actualValue) 127 | if !equal || err != nil { 128 | return equal, err 129 | } 130 | 131 | i++ 132 | } 133 | 134 | if len(expected) != i { 135 | return false, fmt.Errorf("ArrayEqual failed: iterated %d time, expected %d elements", i, len(expected)) 136 | } 137 | 138 | return true, nil 139 | } 140 | 141 | func MapEqual(expected ExpectedMapValue, actual *atree.OrderedMap) (bool, error) { 142 | if uint64(len(expected)) != actual.Count() { 143 | return false, nil 144 | } 145 | 146 | iterator, err := actual.ReadOnlyIterator() 147 | if err != nil { 148 | return false, err 149 | } 150 | 151 | i := 0 152 | for { 153 | actualKey, actualValue, err := iterator.Next() 154 | if err != nil { 155 | return false, err 156 | } 157 | 158 | if actualKey == nil { 159 | break 160 | } 161 | 162 | expectedValue, exist := expected[actualKey] 163 | if !exist { 164 | return false, nil 165 | } 166 | 167 | equal, err := ValueEqual(expectedValue, actualValue) 168 | if !equal || err != nil { 169 | return equal, err 170 | } 171 | 172 | i++ 173 | } 174 | 175 | if len(expected) != i { 176 | return false, fmt.Errorf("MapEqual failed: iterated %d time, expected %d elements", i, len(expected)) 177 | } 178 | 179 | return true, nil 180 | } 181 | -------------------------------------------------------------------------------- /test_utils/storage_utils.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package test_utils 20 | 21 | import ( 22 | "github.com/onflow/atree" 23 | ) 24 | 25 | type InMemBaseStorage struct { 26 | segments map[atree.SlabID][]byte 27 | slabIndex map[atree.Address]atree.SlabIndex 28 | bytesRetrieved int 29 | bytesStored int 30 | segmentsReturned map[atree.SlabID]struct{} 31 | segmentsUpdated map[atree.SlabID]struct{} 32 | segmentsTouched map[atree.SlabID]struct{} 33 | } 34 | 35 | var _ atree.BaseStorage = &InMemBaseStorage{} 36 | 37 | func NewInMemBaseStorage() *InMemBaseStorage { 38 | return NewInMemBaseStorageFromMap( 39 | make(map[atree.SlabID][]byte), 40 | ) 41 | } 42 | 43 | func NewInMemBaseStorageFromMap(segments map[atree.SlabID][]byte) *InMemBaseStorage { 44 | return &InMemBaseStorage{ 45 | segments: segments, 46 | slabIndex: make(map[atree.Address]atree.SlabIndex), 47 | segmentsReturned: make(map[atree.SlabID]struct{}), 48 | segmentsUpdated: make(map[atree.SlabID]struct{}), 49 | segmentsTouched: make(map[atree.SlabID]struct{}), 50 | } 51 | } 52 | 53 | func (s *InMemBaseStorage) Retrieve(id atree.SlabID) ([]byte, bool, error) { 54 | seg, ok := s.segments[id] 55 | s.bytesRetrieved += len(seg) 56 | s.segmentsReturned[id] = struct{}{} 57 | s.segmentsTouched[id] = struct{}{} 58 | return seg, ok, nil 59 | } 60 | 61 | func (s *InMemBaseStorage) Store(id atree.SlabID, data []byte) error { 62 | s.segments[id] = data 63 | s.bytesStored += len(data) 64 | s.segmentsUpdated[id] = struct{}{} 65 | s.segmentsTouched[id] = struct{}{} 66 | return nil 67 | } 68 | 69 | func (s *InMemBaseStorage) Remove(id atree.SlabID) error { 70 | s.segmentsUpdated[id] = struct{}{} 71 | s.segmentsTouched[id] = struct{}{} 72 | delete(s.segments, id) 73 | return nil 74 | } 75 | 76 | func (s *InMemBaseStorage) GenerateSlabID(address atree.Address) (atree.SlabID, error) { 77 | index := s.slabIndex[address] 78 | nextIndex := index.Next() 79 | 80 | s.slabIndex[address] = nextIndex 81 | return atree.NewSlabID(address, nextIndex), nil 82 | } 83 | 84 | func (s *InMemBaseStorage) SegmentCounts() int { 85 | return len(s.segments) 86 | } 87 | 88 | func (s *InMemBaseStorage) Size() int { 89 | total := 0 90 | for _, seg := range s.segments { 91 | total += len(seg) 92 | } 93 | return total 94 | } 95 | 96 | func (s *InMemBaseStorage) BytesRetrieved() int { 97 | return s.bytesRetrieved 98 | } 99 | 100 | func (s *InMemBaseStorage) BytesStored() int { 101 | return s.bytesStored 102 | } 103 | 104 | func (s *InMemBaseStorage) SegmentsReturned() int { 105 | return len(s.segmentsReturned) 106 | } 107 | 108 | func (s *InMemBaseStorage) SegmentsUpdated() int { 109 | return len(s.segmentsUpdated) 110 | } 111 | 112 | func (s *InMemBaseStorage) SegmentsTouched() int { 113 | return len(s.segmentsTouched) 114 | } 115 | 116 | func (s *InMemBaseStorage) ResetReporter() { 117 | s.bytesStored = 0 118 | s.bytesRetrieved = 0 119 | s.segmentsReturned = make(map[atree.SlabID]struct{}) 120 | s.segmentsUpdated = make(map[atree.SlabID]struct{}) 121 | s.segmentsTouched = make(map[atree.SlabID]struct{}) 122 | } 123 | -------------------------------------------------------------------------------- /test_utils/typeinfo_utils.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package test_utils 20 | 21 | import ( 22 | "fmt" 23 | 24 | "github.com/fxamacker/cbor/v2" 25 | 26 | "github.com/onflow/atree" 27 | ) 28 | 29 | // SimpleTypeInfo 30 | 31 | type SimpleTypeInfo struct { 32 | value uint64 33 | } 34 | 35 | var _ atree.TypeInfo = SimpleTypeInfo{} 36 | 37 | func NewSimpleTypeInfo(value uint64) SimpleTypeInfo { 38 | return SimpleTypeInfo{value} 39 | } 40 | 41 | func (i SimpleTypeInfo) Value() uint64 { 42 | return i.value 43 | } 44 | 45 | func (i SimpleTypeInfo) Copy() atree.TypeInfo { 46 | return i 47 | } 48 | 49 | func (i SimpleTypeInfo) IsComposite() bool { 50 | return false 51 | } 52 | 53 | func (i SimpleTypeInfo) Identifier() string { 54 | return fmt.Sprintf("uint64(%d)", i) 55 | } 56 | 57 | func (i SimpleTypeInfo) Encode(enc *cbor.StreamEncoder) error { 58 | return enc.EncodeUint64(i.value) 59 | } 60 | 61 | func (i SimpleTypeInfo) Equal(other atree.TypeInfo) bool { 62 | otherTestTypeInfo, ok := other.(SimpleTypeInfo) 63 | return ok && i.value == otherTestTypeInfo.value 64 | } 65 | 66 | // CompositeTypeInfo 67 | 68 | const CompositeTypeInfoTagNum = 246 69 | 70 | type CompositeTypeInfo struct { 71 | value uint64 72 | } 73 | 74 | var _ atree.TypeInfo = CompositeTypeInfo{} 75 | 76 | func NewCompositeTypeInfo(value uint64) CompositeTypeInfo { 77 | return CompositeTypeInfo{value} 78 | } 79 | 80 | func (i CompositeTypeInfo) Copy() atree.TypeInfo { 81 | return i 82 | } 83 | 84 | func (i CompositeTypeInfo) IsComposite() bool { 85 | return true 86 | } 87 | 88 | func (i CompositeTypeInfo) Identifier() string { 89 | return fmt.Sprintf("composite(%d)", i) 90 | } 91 | 92 | func (i CompositeTypeInfo) Encode(enc *cbor.StreamEncoder) error { 93 | err := enc.EncodeTagHead(CompositeTypeInfoTagNum) 94 | if err != nil { 95 | return err 96 | } 97 | return enc.EncodeUint64(i.value) 98 | } 99 | 100 | func (i CompositeTypeInfo) Equal(other atree.TypeInfo) bool { 101 | otherTestTypeInfo, ok := other.(CompositeTypeInfo) 102 | return ok && i.value == otherTestTypeInfo.value 103 | } 104 | 105 | func CompareTypeInfo(a, b atree.TypeInfo) bool { 106 | switch a := a.(type) { 107 | case SimpleTypeInfo: 108 | return a.Equal(b) 109 | 110 | case CompositeTypeInfo: 111 | return a.Equal(b) 112 | 113 | default: 114 | return false 115 | } 116 | } 117 | 118 | func DecodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { 119 | t, err := dec.NextType() 120 | if err != nil { 121 | return nil, err 122 | } 123 | 124 | switch t { 125 | case cbor.UintType: 126 | value, err := dec.DecodeUint64() 127 | if err != nil { 128 | return nil, err 129 | } 130 | 131 | return SimpleTypeInfo{value: value}, nil 132 | 133 | case cbor.TagType: 134 | tagNum, err := dec.DecodeTagNumber() 135 | if err != nil { 136 | return nil, err 137 | } 138 | 139 | switch tagNum { 140 | case CompositeTypeInfoTagNum: 141 | value, err := dec.DecodeUint64() 142 | if err != nil { 143 | return nil, err 144 | } 145 | 146 | return CompositeTypeInfo{value: value}, nil 147 | 148 | default: 149 | return nil, fmt.Errorf("failed to decode type info") 150 | } 151 | 152 | default: 153 | return nil, fmt.Errorf("failed to decode type info") 154 | } 155 | 156 | } 157 | -------------------------------------------------------------------------------- /typeinfo.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "bytes" 23 | "fmt" 24 | 25 | "github.com/fxamacker/cbor/v2" 26 | ) 27 | 28 | type TypeInfo interface { 29 | Encode(*cbor.StreamEncoder) error 30 | IsComposite() bool 31 | Copy() TypeInfo 32 | } 33 | 34 | type TypeInfoDecoder func( 35 | decoder *cbor.StreamDecoder, 36 | ) ( 37 | TypeInfo, 38 | error, 39 | ) 40 | 41 | // encodeTypeInfo encodes TypeInfo either: 42 | // - as is (for TypeInfo in root slab extra data section), or 43 | // - as index of inlined TypeInfos (for TypeInfo in inlined slab extra data section) 44 | type encodeTypeInfo func(*Encoder, TypeInfo) error 45 | 46 | // defaultEncodeTypeInfo encodes TypeInfo as is. 47 | func defaultEncodeTypeInfo(enc *Encoder, typeInfo TypeInfo) error { 48 | return typeInfo.Encode(enc.CBOR) 49 | } 50 | 51 | func decodeTypeInfoRefIfNeeded(inlinedTypeInfo []TypeInfo, defaultTypeInfoDecoder TypeInfoDecoder) TypeInfoDecoder { 52 | if len(inlinedTypeInfo) == 0 { 53 | return defaultTypeInfoDecoder 54 | } 55 | 56 | return func(decoder *cbor.StreamDecoder) (TypeInfo, error) { 57 | rawTypeInfo, err := decoder.DecodeRawBytes() 58 | if err != nil { 59 | return nil, NewDecodingError(fmt.Errorf("failed to decode raw type info: %w", err)) 60 | } 61 | 62 | if len(rawTypeInfo) > len(typeInfoRefTagHeadAndTagNumber) && 63 | bytes.Equal( 64 | rawTypeInfo[:len(typeInfoRefTagHeadAndTagNumber)], 65 | typeInfoRefTagHeadAndTagNumber) { 66 | 67 | // Type info is encoded as type info ref. 68 | 69 | var index uint64 70 | 71 | err = cbor.Unmarshal(rawTypeInfo[len(typeInfoRefTagHeadAndTagNumber):], &index) 72 | if err != nil { 73 | return nil, NewDecodingError(err) 74 | } 75 | 76 | if index >= uint64(len(inlinedTypeInfo)) { 77 | return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect index < %d, got %d", len(inlinedTypeInfo), index)) 78 | } 79 | 80 | return inlinedTypeInfo[int(index)], nil 81 | } 82 | 83 | // Decode type info as is. 84 | 85 | dec := cbor.NewByteStreamDecoder(rawTypeInfo) 86 | 87 | return defaultTypeInfoDecoder(dec) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /value.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | type Value interface { 22 | Storable(SlabStorage, Address, uint64) (Storable, error) 23 | } 24 | 25 | // WrapperValue is an interface that supports value wrapping another value. 26 | type WrapperValue interface { 27 | Value 28 | 29 | // UnwrapAtreeValue returns innermost wrapped Value and wrapper size. 30 | UnwrapAtreeValue() (Value, uint64) 31 | } 32 | 33 | type ValueComparator func(SlabStorage, Value, Storable) (bool, error) 34 | 35 | type StorableComparator func(Storable, Storable) bool 36 | 37 | type parentUpdater func() (found bool, err error) 38 | 39 | // mutableValueNotifier is an interface that allows mutable child value to notify and update parent. 40 | type mutableValueNotifier interface { 41 | Value 42 | ValueID() ValueID 43 | setParentUpdater(parentUpdater) 44 | Inlined() bool 45 | Inlinable(uint64) bool 46 | } 47 | 48 | func unwrapValue(v Value) (Value, uint64) { 49 | switch v := v.(type) { 50 | case WrapperValue: 51 | return v.UnwrapAtreeValue() 52 | default: 53 | return v, 0 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /value_id.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Atree - Scalable Arrays and Ordered Maps 3 | * 4 | * Copyright Flow Foundation 5 | * 6 | * Licensed under the Apache License, Version 2.0 (the "License"); 7 | * you may not use this file except in compliance with the License. 8 | * You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package atree 20 | 21 | import ( 22 | "bytes" 23 | "encoding/binary" 24 | "fmt" 25 | ) 26 | 27 | const ( 28 | ValueIDLength = SlabIDLength 29 | ) 30 | 31 | // ValueID identifies an Array or OrderedMap. ValueID is consistent 32 | // independent of inlining status, while ValueID and SlabID are used 33 | // differently despite having the same size and content under the hood. 34 | // By contrast, SlabID is affected by inlining because it identifies 35 | // a slab in storage. Given this, ValueID should be used for 36 | // resource tracking, etc. 37 | type ValueID [ValueIDLength]byte 38 | 39 | var emptyValueID = ValueID{} 40 | 41 | func (vid ValueID) equal(sid SlabID) bool { 42 | return bytes.Equal(vid[:len(sid.address)], sid.address[:]) && 43 | bytes.Equal(vid[len(sid.address):], sid.index[:]) 44 | } 45 | 46 | func (vid ValueID) String() string { 47 | return fmt.Sprintf( 48 | "0x%x.%d", 49 | binary.BigEndian.Uint64(vid[:SlabAddressLength]), 50 | binary.BigEndian.Uint64(vid[SlabAddressLength:]), 51 | ) 52 | } 53 | 54 | func slabIDToValueID(sid SlabID) ValueID { 55 | var id ValueID 56 | n := copy(id[:], sid.address[:]) 57 | copy(id[n:], sid.index[:]) 58 | return id 59 | } 60 | --------------------------------------------------------------------------------