├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug-report.yml
│ └── feature-request.yml
├── auto_request_review.yml
├── dependabot.yml
└── workflows
│ ├── buf-ci.yml
│ ├── buf-release.yml
│ ├── ci-release.yml
│ ├── issue-label-automation.yml
│ ├── lint-pull-request.yml
│ ├── lint.yml
│ ├── markdown-linter.yml
│ ├── pr-review-requester.yml
│ ├── project-automation.yml
│ └── test.yml
├── .gitignore
├── .golangci.yml
├── .markdownlint.yaml
├── .yamllint.yml
├── LICENSE
├── Makefile
├── NOTICE
├── README.md
├── buf.gen.yaml
├── buf.yaml
├── builder.go
├── builder_test.go
├── go.mod
├── go.sum
├── inclusion
├── blob_share_commitment_rules.go
├── blob_share_commitment_rules_test.go
├── commitment.go
├── commitment_test.go
└── doc.go
├── internal
├── test
│ ├── factory.go
│ └── factory_test.go
└── testdata
│ └── big_block.json
├── proto
└── blob
│ └── v1
│ ├── blob.pb.go
│ └── blob.proto
├── share
├── README.md
├── blob.go
├── blob_test.go
├── compact_shares_test.go
├── consts.go
├── counter.go
├── counter_test.go
├── info_byte.go
├── info_byte_test.go
├── namespace.go
├── namespace_test.go
├── padding.go
├── padding_test.go
├── parse.go
├── parse_compact_shares.go
├── parse_sparse_shares.go
├── parse_sparse_shares_test.go
├── parse_test.go
├── random_blobs.go
├── random_namespace.go
├── random_shares.go
├── range.go
├── range_test.go
├── reserved_bytes.go
├── reserved_bytes_test.go
├── share.go
├── share_benchmark_test.go
├── share_builder.go
├── share_builder_test.go
├── share_sequence.go
├── share_sequence_test.go
├── share_test.go
├── sparse_shares_test.go
├── split_compact_shares.go
├── split_compact_shares_test.go
├── split_sparse_shares.go
├── split_sparse_shares_test.go
├── utils.go
└── utils_test.go
├── square.go
├── square_benchmark_test.go
├── square_test.go
└── tx
├── blob_tx.go
└── index_wrapper.go
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # CODEOWNERS:
2 |
3 | # Everything goes through the following "global owners" by default. Unless a later
4 | # match takes precedence, the "global owners" will be requested for review when
5 | # someone opens a PR. Note that the last matching pattern takes precedence, so
6 | # global owners are only requested if there isn't a more specific codeowner
7 | # specified below. For this reason, the global owners are often repeated in
8 | # directory-level definitions.
9 |
10 | # global owners
11 | * @celestiaorg/celestia-core @celestiaorg/celestia-node
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: Create a report to help us squash bugs!
3 | title: "
"
4 | labels: ["bug"]
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | IMPORTANT: Prior to opening a bug report, check if it affects one of the
11 | core modules and if it's eligible for a bug bounty on `SECURITY.md`.
12 | Bugs that are not submitted through the appropriate channels won't
13 | receive any bounty.
14 |
15 | - type: textarea
16 | id: summary
17 | attributes:
18 | label: Summary of Bug
19 | description: Concisely describe the issue.
20 | validations:
21 | required: true
22 |
23 | - type: textarea
24 | id: version
25 | attributes:
26 | label: Version
27 | description: git commit hash or release version
28 | validations:
29 | required: true
30 |
31 | - type: textarea
32 | id: repro
33 | attributes:
34 | label: Steps to Reproduce
35 | description: >
36 | What commands in order should someone run to reproduce your problem?
37 | validations:
38 | required: true
39 |
40 | - type: checkboxes
41 | id: admin
42 | attributes:
43 | label: For Admin Use
44 | description: (do not edit)
45 | options:
46 | - label: Not duplicate issue
47 | - label: Appropriate labels applied
48 | - label: Appropriate contributors tagged
49 | - label: Contributor assigned/self-assigned
50 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: Feature Request
2 | description: Create a proposal to request a feature
3 | title: ""
4 | labels: ["enhancement"]
5 |
6 | body:
7 | - type: markdown
8 | attributes:
9 | value: |
10 | ✰ Thanks for opening an issue! ✰
11 | Before smashing the submit button please fill in the template.
12 | Word of caution: poorly thought-out proposals may be rejected without
13 | deliberation.
14 |
15 | - type: textarea
16 | id: summary
17 | attributes:
18 | label: Summary
19 | description: Short, concise description of the proposed feature.
20 | validations:
21 | required: true
22 |
23 | - type: textarea
24 | id: problem
25 | attributes:
26 | label: Problem Definition
27 | description: |
28 | Why do we need this feature?
29 | What problems may be addressed by introducing this feature?
30 | What benefits does the SDK stand to gain by including this feature?
31 | Are there any disadvantages of including this feature?
32 | validations:
33 | required: true
34 |
35 | - type: textarea
36 | id: proposal
37 | attributes:
38 | label: Proposal
39 | description: Detailed description of requirements of implementation.
40 | validations:
41 | required: true
42 |
43 | - type: checkboxes
44 | id: admin
45 | attributes:
46 | label: For Admin Use
47 | description: (do not edit)
48 | options:
49 | - label: Not duplicate issue
50 | - label: Appropriate labels applied
51 | - label: Appropriate contributors tagged
52 | - label: Contributor assigned/self-assigned
53 |
--------------------------------------------------------------------------------
/.github/auto_request_review.yml:
--------------------------------------------------------------------------------
1 | # More info at https://github.com/necojackarc/auto-request-review
2 | reviewers:
3 | # The default reviewers
4 | defaults:
5 | # Example of Github Team. Github team must have write access to repo.
6 | # NOTE: This assigned the team itself, not members of the team. The Github
7 | # team auto PR assignment will then turn this into individuals
8 | - team:celestia-core
9 | - team:celestia-node
10 |
11 | options:
12 | ignore_draft: true
13 | ignored_keywords:
14 | - DO NOT REVIEW
15 | enable_group_assignment: false
16 |
17 | # Randomly pick reviewers up to this number.
18 | # Do not set this option if you'd like to assign all matching reviewers.
19 | number_of_reviewers: 2
20 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: github-actions
4 | directory: "/"
5 | schedule:
6 | interval: weekly
7 | open-pull-requests-limit: 10
8 | labels:
9 | - dependencies
10 | - package-ecosystem: gomod
11 | directory: "/"
12 | schedule:
13 | interval: weekly
14 | open-pull-requests-limit: 10
15 | labels:
16 | - dependencies
17 |
--------------------------------------------------------------------------------
/.github/workflows/buf-ci.yml:
--------------------------------------------------------------------------------
1 | name: buf-ci
2 | on:
3 | push:
4 | branches:
5 | - main
6 | - "v[0-9]+.x"
7 | pull_request:
8 | permissions:
9 | contents: read
10 | pull-requests: write
11 | jobs:
12 | buf:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v4
16 | - uses: bufbuild/buf-setup-action@v1
17 | - uses: bufbuild/buf-breaking-action@v1
18 | with:
19 | against: 'https://github.com/celestiaorg/go-square.git#branch=main'
20 | - uses: bufbuild/buf-lint-action@v1
21 |
--------------------------------------------------------------------------------
/.github/workflows/buf-release.yml:
--------------------------------------------------------------------------------
1 | name: buf-release
2 | on:
3 | push:
4 | tags:
5 | - "v*"
6 | jobs:
7 | build:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v4
11 | - uses: bufbuild/buf-setup-action@v1
12 | with:
13 | github_token: ${{ secrets.GITHUB_TOKEN }}
14 | version: "1.44.0"
15 | # Push the protobuf definitions to the BSR
16 | - uses: bufbuild/buf-push-action@v1
17 | with:
18 | buf_token: ${{ secrets.BUF_TOKEN }}
19 | - name: "push the tag label to BSR"
20 | run: |
21 | set -euo pipefail
22 | echo ${{ secrets.BUF_TOKEN }} | buf registry login --token-stdin
23 | buf push --label ${{ github.ref_name }}
24 |
--------------------------------------------------------------------------------
/.github/workflows/ci-release.yml:
--------------------------------------------------------------------------------
1 | name: ci-release
2 |
3 | # Run this workflow on push events (i.e. PR merge) to main or release branches,
4 | # push events for new semantic version tags, and all PRs.
5 | on:
6 | push:
7 | branches:
8 | - main
9 | - "v[0-9]+.x"
10 | tags:
11 | - "v[0-9]+.[0-9]+.[0-9]+"
12 | - "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+"
13 | - "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+"
14 | - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+"
15 | pull_request:
16 |
17 | jobs:
18 | lint:
19 | uses: ./.github/workflows/lint.yml
20 |
21 | markdown-linter:
22 | uses: ./.github/workflows/markdown-linter.yml
23 |
24 | test:
25 | uses: ./.github/workflows/test.yml
26 |
--------------------------------------------------------------------------------
/.github/workflows/issue-label-automation.yml:
--------------------------------------------------------------------------------
1 | name: issue-label-automation
2 | on:
3 | pull_request_target:
4 | types: [opened]
5 | issues:
6 | types: [opened]
7 | jobs:
8 | automate-labels:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | pull-requests: write
13 | env:
14 | IS_BOT: ${{ (github.actor == 'dependabot[bot]' || github.actor == 'mergify[bot]') }}
15 | IS_HUMAN: ${{ !(github.actor == 'dependabot[bot]' || github.actor == 'mergify[bot]') }}
16 | steps:
17 | - name: Check if issue or PR was created by external contributor
18 | if: env.IS_HUMAN == 'true' && github.repository_owner == 'celestiaorg'
19 | uses: tspascoal/get-user-teams-membership@v3
20 | id: teamCheck
21 | with:
22 | username: ${{ github.actor }}
23 | team: "celestia-core"
24 | GITHUB_TOKEN: ${{ secrets.PAT_TEAM_CHECK }}
25 |
26 | # If an issue was unlabeled add `needs:triage`.
27 | - name: Maybe label issue with `needs:triage`
28 | if: ${{ github.event_name == 'issues' }}
29 | uses: andymckay/labeler@master
30 | with:
31 | add-labels: "needs:triage"
32 | ignore-if-labeled: true
33 | repo-token: ${{ secrets.GITHUB_TOKEN }}
34 |
35 | # If an issue or PR was created by someone that isn't part of
36 | # celestia-core, add the `external` label.
37 | - name: Maybe label issue or PR with `external`
38 | if: env.IS_HUMAN == 'true' && steps.teamCheck.outputs.isTeamMember == 'false'
39 | uses: andymckay/labeler@master
40 | with:
41 | add-labels: "external"
42 | repo-token: ${{ secrets.GITHUB_TOKEN }}
43 |
44 | # If a PR was created by dependabot or mergify add the `bot` label.
45 | - name: Maybe label PR with `bot`
46 | if: env.IS_BOT == 'true'
47 | uses: andymckay/labeler@master
48 | with:
49 | add-labels: "bot"
50 | repo-token: ${{ secrets.GITHUB_TOKEN }}
51 |
--------------------------------------------------------------------------------
/.github/workflows/lint-pull-request.yml:
--------------------------------------------------------------------------------
1 | name: lint-pull-request
2 |
3 | on:
4 | pull_request_target:
5 | types:
6 | - opened
7 | - edited
8 | - synchronize
9 |
10 | permissions:
11 | pull-requests: read
12 |
13 | jobs:
14 | main:
15 | name: conventional-commit-pr-title
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: amannn/action-semantic-pull-request@v5
19 | env:
20 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
21 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: lint
2 | on:
3 | workflow_call:
4 |
5 | jobs:
6 | golangci-lint:
7 | name: golangci-lint
8 | runs-on: ubuntu-latest
9 | timeout-minutes: 8
10 | steps:
11 | - uses: actions/checkout@v4
12 |
13 | - uses: actions/setup-go@v5
14 | with:
15 | go-version-file: ./go.mod
16 |
17 | - uses: technote-space/get-diff-action@v6.1.2
18 | with:
19 | # This job will pass without running if go.mod, go.sum, and *.go
20 | # wasn't modified.
21 | PATTERNS: |
22 | **/**.go
23 | go.mod
24 | go.sum
25 |
26 | - uses: golangci/golangci-lint-action@v6.3.2
27 | with:
28 | version: v1.63.4
29 | args: --timeout 10m
30 | github-token: ${{ secrets.github_token }}
31 | if: env.GIT_DIFF
32 |
33 | yamllint:
34 | runs-on: ubuntu-latest
35 | steps:
36 | - uses: actions/checkout@v4
37 | - uses: celestiaorg/.github/.github/actions/yamllint@v0.6.1
38 |
--------------------------------------------------------------------------------
/.github/workflows/markdown-linter.yml:
--------------------------------------------------------------------------------
1 | name: markdown-linter
2 | on:
3 | workflow_call:
4 |
5 | jobs:
6 | markdown-lint:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - name: Checkout code
10 | uses: actions/checkout@v4
11 |
12 | - name: Setup Node
13 | uses: actions/setup-node@v4
14 | with:
15 | node-version: 18
16 |
17 | - name: Install markdownlint-cli
18 | run: npm install -g markdownlint-cli@0.32.1
19 | shell: bash
20 |
21 | - name: Run markdownlint
22 | run: markdownlint --config .markdownlint.yaml **/*.md
23 | shell: bash
24 |
--------------------------------------------------------------------------------
/.github/workflows/pr-review-requester.yml:
--------------------------------------------------------------------------------
1 | name: pr-review-requester
2 |
3 | on:
4 | # pull_request_target is used to allow forks write permissions when running
5 | # this workflow. With the pull_request trigger, forks do not have any write
6 | # access for security reasons, however write access is needed in order to
7 | # request reviews. Since this workflow is simply requesting reviewers, it is
8 | # safe to allow forks write access.
9 | pull_request_target:
10 |
11 | jobs:
12 | auto-request-review:
13 | name: Auto request reviews
14 | uses: celestiaorg/.github/.github/workflows/reusable_housekeeping.yml@v0.6.1 # yamllint disable-line rule:line-length
15 | secrets: inherit
16 | # write access for issues and pull requests is needed because the called
17 | # workflow requires write access to issues and pull requests and the
18 | # permissions must match
19 | permissions:
20 | issues: write
21 | pull-requests: write
22 | with:
23 | run-auto-request-review: true
24 |
--------------------------------------------------------------------------------
/.github/workflows/project-automation.yml:
--------------------------------------------------------------------------------
1 | name: project-automation
2 | on:
3 | issues:
4 | types:
5 | - opened
6 | jobs:
7 | add-to-project:
8 | name: Add new issues to the core/app project
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/add-to-project@main
12 | with:
13 | project-url: https://github.com/orgs/celestiaorg/projects/24
14 | github-token: ${{ secrets.ADD_TO_PROJECT_PAT }}
15 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: test
2 | on:
3 | workflow_call:
4 |
5 | jobs:
6 | test:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v4
10 |
11 | - uses: actions/setup-go@v5
12 | with:
13 | go-version-file: ./go.mod
14 |
15 | - name: Run tests
16 | run: go test ./... -v -timeout 5m -race
17 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | go.work
2 | go.work.sum
3 | .idea/
4 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | run:
2 | timeout: 5m
3 | modules-download-mode: readonly
4 |
5 | linters:
6 | enable:
7 | - copyloopvar
8 | - gofumpt
9 | - misspell
10 | - nakedret
11 | - revive
12 | - prealloc
13 | - stylecheck
14 | - gocritic
15 | - prealloc
16 |
17 | linters-settings:
18 | nakedret:
19 | # Ban the use of naked returns because they reduce code readability.
20 | max-func-lines: 0 # override the default: 30
21 |
--------------------------------------------------------------------------------
/.markdownlint.yaml:
--------------------------------------------------------------------------------
1 | "default": true # Default state for all rules
2 | "MD010":
3 | "code_blocks": false # Disable rule for hard tabs in code blocks
4 | "MD013": false # Disable rule for line length
5 | "MD033": false # Disable rule banning inline HTML
6 |
--------------------------------------------------------------------------------
/.yamllint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Built from docs https://yamllint.readthedocs.io/en/stable/configuration.html
3 | extends: default
4 |
5 | rules:
6 | # 120 chars should be enough, but don't fail if a line is longer
7 | line-length:
8 | max: 120
9 | level: warning
10 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | DOCKER := $(shell which docker)
2 | DOCKER_BUF := $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace bufbuild/buf:1.28.1
3 | PROJECTNAME=$(shell basename "$(PWD)")
4 |
5 | ## help: Get more info on make commands.
6 | help: Makefile
7 | @echo " Choose a command run in "$(PROJECTNAME)":"
8 | @sed -n 's/^##//p' $< | column -t -s ':' | sed -e 's/^/ /'
9 | .PHONY: help
10 |
11 | ## proto-gen: Generate protobuf files. Requires docker.
12 | proto-gen:
13 | @echo "--> Generating Protobuf files"
14 | $(DOCKER_BUF) generate
15 | .PHONY: proto-gen-docker
16 |
17 | ## proto-lint: Lint protobuf files. Requires docker.
18 | proto-lint:
19 | @echo "--> Linting Protobuf files"
20 | @$(DOCKER_BUF) lint
21 | .PHONY: proto-lint
22 |
23 | ## lint: Lint Go files. Requires golangci-lint.
24 | lint:
25 | @echo "--> Lint source code using golangci-lint"
26 | @golangci-lint run
27 | .PHONY: lint
28 |
29 | ## fmt: Format files per linters golangci-lint and markdownlint.
30 | fmt:
31 | @echo "--> Running golangci-lint --fix"
32 | @golangci-lint run --fix
33 | @echo "--> Running markdownlint --fix"
34 | @markdownlint --fix --quiet --config .markdownlint.yaml .
35 | .PHONY: fmt
36 |
37 | ## test: Run unit tests.
38 | test:
39 | @echo "--> Run unit tests"
40 | @go test -mod=readonly ./...
41 | .PHONY: test
42 |
43 | ## benchmark: Run tests in benchmark mode.
44 | benchmark:
45 | @echo "--> Perform benchmark"
46 | @go test -mod=readonly -bench=. ./...
47 | .PHONY: benchmark
48 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | go-square
2 | Copyright 2024 Strange Loop Labs AG
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # go-square
2 |
3 | [](https://pkg.go.dev/github.com/celestiaorg/go-square/v2)
4 |
5 | `go-square` is a Go module that provides data structures and utilities for interacting with data squares in the Celestia network. The data square is a special form of block serialization in the Celestia blockchain designed for sampling. This repo deals with the original data square which is distinct from the extended data square. Operations on the extended data square are handled by [rsmt2d](https://github.com/celestiaorg/rsmt2d).
6 |
7 | Package | Description
8 | ----------|---------------------------------------------------------------------------------------------------------------------
9 | inclusion | Package inclusion contains functions to generate the blob share commitment from a given blob.
10 | proto | Package contains proto definitions and go generated code
11 | share | Package share contains encoding and decoding logic from blobs to shares.
12 | square | Package square implements the logic to construct the original data square based on a list of transactions.
13 | tx | Package tx contains BlobTx and IndexWrapper types
14 |
15 | ## Installation
16 |
17 | To use `go-square` as a dependency in your Go project, you can use `go get`:
18 |
19 | ```bash
20 | go get github.com/celestiaorg/go-square/v2
21 | ```
22 |
23 | ## Branches and Releasing
24 |
25 | This repo has one long living development branch `main`, for changes targeting the next major version as well as long living branches for each prior major version i.e. `v1.x`. Non breaking changes may be backported to these branches. This repo follows [semver](https://www.semver.org) versioning.
26 |
27 | ## Contributing
28 |
29 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
30 |
31 | This repo attempts to conform to [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) so PR titles should ideally start with `fix:`, `feat:`, `build:`, `chore:`, `ci:`, `docs:`, `style:`, `refactor:`, `perf:`, or `test:` because this helps with semantic versioning and changelog generation. It is especially important to include an `!` (e.g. `feat!:`) if the PR includes a breaking change.
32 |
33 | ### Tools
34 |
35 | 1. Install [Go](https://golang.org/doc/install) 1.23.6
36 | 1. Install [golangci-lint](https://golangci-lint.run/usage/install/)
37 | 1. Fork this repo
38 | 1. Make your changes
39 | 1. Submit a pull request
40 |
41 | ### Helpful Commands
42 |
43 | ```sh
44 | # Display all available make commands
45 | make help
46 |
47 | # Run tests
48 | make test
49 |
50 | # Run linter
51 | make lint
52 |
53 | # Perform benchmarking
54 | make bench
55 | ```
56 |
--------------------------------------------------------------------------------
/buf.gen.yaml:
--------------------------------------------------------------------------------
1 | version: v1
2 | plugins:
3 | - plugin: buf.build/protocolbuffers/go
4 | out: .
5 | opt:
6 | - paths=source_relative
7 |
--------------------------------------------------------------------------------
/buf.yaml:
--------------------------------------------------------------------------------
1 | version: v2
2 | modules:
3 | - name: buf.build/celestia/go-square
4 | breaking:
5 | use:
6 | - FILE
7 | lint:
8 | use:
9 | - BASIC
10 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/celestiaorg/go-square/v2
2 |
3 | go 1.23.6
4 |
5 | require (
6 | github.com/celestiaorg/nmt v0.23.0
7 | github.com/stretchr/testify v1.10.0
8 | golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb
9 | google.golang.org/protobuf v1.36.6
10 | )
11 |
12 | require (
13 | github.com/davecgh/go-spew v1.1.1 // indirect
14 | github.com/gogo/protobuf v1.3.2 // indirect
15 | github.com/pmezard/go-difflib v1.0.0 // indirect
16 | gopkg.in/yaml.v3 v3.0.1 // indirect
17 | )
18 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/celestiaorg/nmt v0.23.0 h1:cfYy//hL1HeDSH0ub3CPlJuox5U5xzgg4JGZrw23I/I=
2 | github.com/celestiaorg/nmt v0.23.0/go.mod h1:kYfIjRq5rmA2mJnv41GLWkxn5KyLNPlma3v5Q68rHdI=
3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
5 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
6 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
7 | github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
8 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
9 | github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
10 | github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
11 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
12 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
13 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
15 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
16 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
17 | github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
18 | github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
19 | github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
20 | github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
21 | github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
22 | github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
23 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
24 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
25 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
26 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
27 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
28 | golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
29 | golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
30 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
31 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
32 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
33 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
34 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
35 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
36 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
37 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
38 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
39 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
40 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
41 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
42 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
43 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
44 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
45 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
46 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
47 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
48 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
49 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
50 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
51 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
52 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
53 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
54 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
55 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
56 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
57 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
58 |
--------------------------------------------------------------------------------
/inclusion/blob_share_commitment_rules.go:
--------------------------------------------------------------------------------
1 | package inclusion
2 |
3 | import (
4 | "fmt"
5 | "math"
6 |
7 | "golang.org/x/exp/constraints"
8 | )
9 |
10 | // BlobSharesUsedNonInteractiveDefaults returns the number of shares used by a
11 | // given set of blobs share lengths. It follows the blob share commitment rules
12 | // and returns the total shares used and share indexes for each blob.
13 | func BlobSharesUsedNonInteractiveDefaults(cursor, subtreeRootThreshold int, blobShareLens ...int) (sharesUsed int, indexes []uint32) {
14 | start := cursor
15 | indexes = make([]uint32, len(blobShareLens))
16 | for i, blobLen := range blobShareLens {
17 | cursor = NextShareIndex(cursor, blobLen, subtreeRootThreshold)
18 | indexes[i] = uint32(cursor)
19 | cursor += blobLen
20 | }
21 | return cursor - start, indexes
22 | }
23 |
24 | // NextShareIndex determines the next index in a square that can be used. It
25 | // follows the blob share commitment rules defined in ADR-013. Assumes that all
26 | // args are non negative, that squareSize is a power of two and that the blob can
27 | // fit in the square. The cursor is expected to be the index after the end of
28 | // the previous blob.
29 | //
30 | // See https://github.com/celestiaorg/celestia-app/blob/main/specs/src/specs/data_square_layout.md
31 | // for more information.
32 | func NextShareIndex(cursor, blobShareLen, subtreeRootThreshold int) int {
33 | // Calculate the subtreewidth. This is the width of the first mountain in the
34 | // merkle mountain range that makes up the blob share commitment (given the
35 | // subtreeRootThreshold and the BlobMinSquareSize).
36 | treeWidth := SubTreeWidth(blobShareLen, subtreeRootThreshold)
37 | // Round up the cursor to the next multiple of treeWidth. For example, if
38 | // the cursor was at 13 and the tree width is 4, return 16.
39 | return RoundUpByMultipleOf(cursor, treeWidth)
40 | }
41 |
42 | // RoundUpByMultipleOf rounds cursor up to the next multiple of v. If cursor is divisible
43 | // by v, then it returns cursor.
44 | func RoundUpByMultipleOf(cursor, v int) int {
45 | if cursor%v == 0 {
46 | return cursor
47 | }
48 | return ((cursor / v) + 1) * v
49 | }
50 |
51 | // RoundUpPowerOfTwo returns the next power of two greater than or equal to input.
52 | func RoundUpPowerOfTwo[I constraints.Integer](input I) I {
53 | var result I = 1
54 | for result < input {
55 | result <<= 1
56 | }
57 | return result
58 | }
59 |
60 | // RoundDownPowerOfTwo returns the next power of two less than or equal to input.
61 | func RoundDownPowerOfTwo[I constraints.Integer](input I) (I, error) {
62 | if input <= 0 {
63 | return 0, fmt.Errorf("input %v must be positive", input)
64 | }
65 | roundedUp := RoundUpPowerOfTwo(input)
66 | if roundedUp == input {
67 | return roundedUp, nil
68 | }
69 | return roundedUp / 2, nil
70 | }
71 |
72 | // BlobMinSquareSize returns the minimum square size that can contain shareCount
73 | // number of shares.
74 | func BlobMinSquareSize(shareCount int) int {
75 | return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount)))))
76 | }
77 |
78 | // SubTreeWidth returns the maximum number of leaves per subtree in the share
79 | // commitment over a given blob. The input should be the total number of shares
80 | // used by that blob. See ADR-013.
81 | func SubTreeWidth(shareCount, subtreeRootThreshold int) int {
82 | // Per ADR-013, we use a predetermined threshold to determine width of sub
83 | // trees used to create share commitments
84 | s := (shareCount / subtreeRootThreshold)
85 |
86 | // round up if the width is not an exact multiple of the threshold
87 | if shareCount%subtreeRootThreshold != 0 {
88 | s++
89 | }
90 |
91 | // use a power of two equal to or larger than the multiple of the subtree
92 | // root threshold
93 | s = RoundUpPowerOfTwo(s)
94 |
95 | // use the minimum of the subtree width and the min square size, this
96 | // gurarantees that a valid value is returned
97 | return getMin(s, BlobMinSquareSize(shareCount))
98 | }
99 |
100 | func getMin[T constraints.Integer](i, j T) T {
101 | if i < j {
102 | return i
103 | }
104 | return j
105 | }
106 |
--------------------------------------------------------------------------------
/inclusion/blob_share_commitment_rules_test.go:
--------------------------------------------------------------------------------
1 | package inclusion_test
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/celestiaorg/go-square/v2/inclusion"
8 | "github.com/stretchr/testify/assert"
9 | "github.com/stretchr/testify/require"
10 | )
11 |
12 | const (
13 | defaultSubtreeRootThreshold = 64
14 | defaultMaxSquareSize = 128
15 | )
16 |
17 | func TestBlobSharesUsedNonInteractiveDefaults(t *testing.T) {
18 | defaultSquareSize := 128
19 | type test struct {
20 | cursor, expected int
21 | blobLens []int
22 | indexes []uint32
23 | }
24 | tests := []test{
25 | {2, 1, []int{1}, []uint32{2}},
26 | {2, 1, []int{1}, []uint32{2}},
27 | {3, 6, []int{3, 3}, []uint32{3, 6}},
28 | {0, 8, []int{8}, []uint32{0}},
29 | {1, 6, []int{3, 3}, []uint32{1, 4}},
30 | {1, 32, []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}},
31 | {3, 12, []int{5, 7}, []uint32{3, 8}},
32 | {0, 20, []int{5, 5, 5, 5}, []uint32{0, 5, 10, 15}},
33 | {0, 10, []int{10}, []uint32{0}},
34 | {1, 20, []int{10, 10}, []uint32{1, 11}},
35 | {0, 1000, []int{1000}, []uint32{0}},
36 | {0, defaultSquareSize + 1, []int{defaultSquareSize + 1}, []uint32{0}},
37 | {1, 385, []int{128, 128, 128}, []uint32{2, 130, 258}},
38 | {1024, 32, []int{32}, []uint32{1024}},
39 | }
40 | for i, tt := range tests {
41 | res, indexes := inclusion.BlobSharesUsedNonInteractiveDefaults(tt.cursor, defaultSubtreeRootThreshold, tt.blobLens...)
42 | test := fmt.Sprintf("test %d: cursor %d", i, tt.cursor)
43 | assert.Equal(t, tt.expected, res, test)
44 | assert.Equal(t, tt.indexes, indexes, test)
45 | }
46 | }
47 |
48 | func TestNextShareIndex(t *testing.T) {
49 | type test struct {
50 | name string
51 | cursor, blobLen, squareSize int
52 | expectedIndex int
53 | }
54 | tests := []test{
55 | {
56 | name: "whole row blobLen 4",
57 | cursor: 0,
58 | blobLen: 4,
59 | squareSize: 4,
60 | expectedIndex: 0,
61 | },
62 | {
63 | name: "half row blobLen 2 cursor 1",
64 | cursor: 1,
65 | blobLen: 2,
66 | squareSize: 4,
67 | expectedIndex: 1,
68 | },
69 | {
70 | name: "half row blobLen 2 cursor 2",
71 | cursor: 2,
72 | blobLen: 2,
73 | squareSize: 4,
74 | expectedIndex: 2,
75 | },
76 | {
77 | name: "half row blobLen 4 cursor 3",
78 | cursor: 3,
79 | blobLen: 4,
80 | squareSize: 8,
81 | expectedIndex: 3,
82 | },
83 | {
84 | name: "blobLen 5 cursor 3 size 8",
85 | cursor: 3,
86 | blobLen: 5,
87 | squareSize: 8,
88 | expectedIndex: 3,
89 | },
90 | {
91 | name: "blobLen 2 cursor 3 square size 8",
92 | cursor: 3,
93 | blobLen: 2,
94 | squareSize: 8,
95 | expectedIndex: 3,
96 | },
97 | {
98 | name: "cursor 3 blobLen 5 size 8",
99 | cursor: 3,
100 | blobLen: 5,
101 | squareSize: 8,
102 | expectedIndex: 3,
103 | },
104 | {
105 | name: "bloblen 12 cursor 1 size 16",
106 | cursor: 1,
107 | blobLen: 12,
108 | squareSize: 16,
109 | expectedIndex: 1,
110 | },
111 | {
112 | name: "edge case where there are many blobs with a single size",
113 | cursor: 10291,
114 | blobLen: 1,
115 | squareSize: 128,
116 | expectedIndex: 10291,
117 | },
118 | {
119 | name: "second row blobLen 2 cursor 11 square size 8",
120 | cursor: 11,
121 | blobLen: 2,
122 | squareSize: 8,
123 | expectedIndex: 11,
124 | },
125 | {
126 | name: "blob share commitment rules for reduced padding diagram",
127 | cursor: 11,
128 | blobLen: 11,
129 | squareSize: 8,
130 | expectedIndex: 11,
131 | },
132 | {
133 | name: "at threshold",
134 | cursor: 11,
135 | blobLen: defaultSubtreeRootThreshold,
136 | squareSize: inclusion.RoundUpPowerOfTwo(defaultSubtreeRootThreshold),
137 | expectedIndex: 11,
138 | },
139 | {
140 | name: "one over the threshold",
141 | cursor: 64,
142 | blobLen: defaultSubtreeRootThreshold + 1,
143 | squareSize: 128,
144 | expectedIndex: 64,
145 | },
146 | {
147 | name: "one under the threshold",
148 | cursor: 64,
149 | blobLen: defaultSubtreeRootThreshold - 1,
150 | squareSize: 128,
151 | expectedIndex: 64,
152 | },
153 | {
154 | name: "one under the threshold small square size",
155 | cursor: 1,
156 | blobLen: defaultSubtreeRootThreshold - 1,
157 | squareSize: 16,
158 | expectedIndex: 1,
159 | },
160 | {
161 | name: "max padding for square size 128",
162 | cursor: 1,
163 | blobLen: 16256,
164 | squareSize: 128,
165 | expectedIndex: 128,
166 | },
167 | {
168 | name: "half max padding for square size 128",
169 | cursor: 1,
170 | blobLen: 8192,
171 | squareSize: 128,
172 | expectedIndex: 128,
173 | },
174 | {
175 | name: "quarter max padding for square size 128",
176 | cursor: 1,
177 | blobLen: 4096,
178 | squareSize: 128,
179 | expectedIndex: 64,
180 | },
181 | {
182 | name: "round up to 128 subtree size",
183 | cursor: 1,
184 | blobLen: 8193,
185 | squareSize: 128,
186 | expectedIndex: 128,
187 | },
188 | }
189 | for _, tt := range tests {
190 | t.Run(tt.name, func(t *testing.T) {
191 | res := inclusion.NextShareIndex(tt.cursor, tt.blobLen, defaultSubtreeRootThreshold)
192 | assert.Equal(t, tt.expectedIndex, res)
193 | })
194 | }
195 | }
196 |
197 | func TestRoundUpByMultipleOf(t *testing.T) {
198 | type test struct {
199 | cursor, v int
200 | expectedIndex int
201 | }
202 | tests := []test{
203 | {
204 | cursor: 1,
205 | v: 2,
206 | expectedIndex: 2,
207 | },
208 | {
209 | cursor: 2,
210 | v: 2,
211 | expectedIndex: 2,
212 | },
213 | {
214 | cursor: 0,
215 | v: 2,
216 | expectedIndex: 0,
217 | },
218 | {
219 | cursor: 5,
220 | v: 2,
221 | expectedIndex: 6,
222 | },
223 | {
224 | cursor: 8,
225 | v: 16,
226 | expectedIndex: 16,
227 | },
228 | {
229 | cursor: 33,
230 | v: 1,
231 | expectedIndex: 33,
232 | },
233 | {
234 | cursor: 32,
235 | v: 16,
236 | expectedIndex: 32,
237 | },
238 | {
239 | cursor: 33,
240 | v: 16,
241 | expectedIndex: 48,
242 | },
243 | }
244 | for i, tt := range tests {
245 | t.Run(
246 | fmt.Sprintf(
247 | "test %d: %d cursor %d v %d expectedIndex",
248 | i,
249 | tt.cursor,
250 | tt.v,
251 | tt.expectedIndex,
252 | ),
253 | func(t *testing.T) {
254 | res := inclusion.RoundUpByMultipleOf(tt.cursor, tt.v)
255 | assert.Equal(t, tt.expectedIndex, res)
256 | })
257 | }
258 | }
259 |
260 | func TestRoundUpPowerOfTwo(t *testing.T) {
261 | type testCase struct {
262 | input int
263 | want int
264 | }
265 | testCases := []testCase{
266 | {input: -1, want: 1},
267 | {input: 0, want: 1},
268 | {input: 1, want: 1},
269 | {input: 2, want: 2},
270 | {input: 4, want: 4},
271 | {input: 5, want: 8},
272 | {input: 8, want: 8},
273 | {input: 11, want: 16},
274 | {input: 511, want: 512},
275 | }
276 | for _, tc := range testCases {
277 | got := inclusion.RoundUpPowerOfTwo(tc.input)
278 | assert.Equal(t, tc.want, got)
279 | }
280 | }
281 |
282 | func TestBlobMinSquareSize(t *testing.T) {
283 | type testCase struct {
284 | shareCount int
285 | want int
286 | }
287 | testCases := []testCase{
288 | {
289 | shareCount: 0,
290 | want: 1,
291 | },
292 | {
293 | shareCount: 1,
294 | want: 1,
295 | },
296 | {
297 | shareCount: 2,
298 | want: 2,
299 | },
300 | {
301 | shareCount: 3,
302 | want: 2,
303 | },
304 | {
305 | shareCount: 4,
306 | want: 2,
307 | },
308 | {
309 | shareCount: 5,
310 | want: 4,
311 | },
312 | {
313 | shareCount: 16,
314 | want: 4,
315 | },
316 | {
317 | shareCount: 17,
318 | want: 8,
319 | },
320 | }
321 | for _, tc := range testCases {
322 | t.Run(fmt.Sprintf("shareCount %d", tc.shareCount), func(t *testing.T) {
323 | got := inclusion.BlobMinSquareSize(tc.shareCount)
324 | assert.Equal(t, tc.want, got)
325 | })
326 | }
327 | }
328 |
329 | func TestSubTreeWidth(t *testing.T) {
330 | type testCase struct {
331 | shareCount int
332 | want int
333 | }
334 | testCases := []testCase{
335 | {
336 | shareCount: 0,
337 | want: 1,
338 | },
339 | {
340 | shareCount: 1,
341 | want: 1,
342 | },
343 | {
344 | shareCount: 2,
345 | want: 1,
346 | },
347 | {
348 | shareCount: defaultSubtreeRootThreshold,
349 | want: 1,
350 | },
351 | {
352 | shareCount: defaultSubtreeRootThreshold + 1,
353 | want: 2,
354 | },
355 | {
356 | shareCount: defaultSubtreeRootThreshold - 1,
357 | want: 1,
358 | },
359 | {
360 | shareCount: defaultSubtreeRootThreshold * 2,
361 | want: 2,
362 | },
363 | {
364 | shareCount: (defaultSubtreeRootThreshold * 2) + 1,
365 | want: 4,
366 | },
367 | {
368 | shareCount: (defaultSubtreeRootThreshold * 3) - 1,
369 | want: 4,
370 | },
371 | {
372 | shareCount: (defaultSubtreeRootThreshold * 4),
373 | want: 4,
374 | },
375 | {
376 | shareCount: (defaultSubtreeRootThreshold * 5),
377 | want: 8,
378 | },
379 | {
380 | shareCount: (defaultSubtreeRootThreshold * defaultMaxSquareSize) - 1,
381 | want: 128,
382 | },
383 | }
384 | for i, tc := range testCases {
385 | t.Run(fmt.Sprintf("shareCount %d", tc.shareCount), func(t *testing.T) {
386 | got := inclusion.SubTreeWidth(tc.shareCount, defaultSubtreeRootThreshold)
387 | assert.Equal(t, tc.want, got, i)
388 | })
389 | }
390 | }
391 |
392 | func TestRoundDownPowerOfTwo(t *testing.T) {
393 | type testCase struct {
394 | input int
395 | want int
396 | }
397 | testCases := []testCase{
398 | {input: 1, want: 1},
399 | {input: 2, want: 2},
400 | {input: 4, want: 4},
401 | {input: 5, want: 4},
402 | {input: 8, want: 8},
403 | {input: 11, want: 8},
404 | {input: 511, want: 256},
405 | }
406 | for _, tc := range testCases {
407 | got, err := inclusion.RoundDownPowerOfTwo(tc.input)
408 | require.NoError(t, err)
409 | assert.Equal(t, tc.want, got)
410 | }
411 | }
412 |
--------------------------------------------------------------------------------
/inclusion/commitment.go:
--------------------------------------------------------------------------------
1 | package inclusion
2 |
3 | import (
4 | "crypto/sha256"
5 |
6 | sh "github.com/celestiaorg/go-square/v2/share"
7 | "github.com/celestiaorg/nmt"
8 | )
9 |
10 | type MerkleRootFn func([][]byte) []byte
11 |
12 | // CreateCommitment generates the share commitment for a given blob.
13 | // See [data square layout rationale] and [blob share commitment rules].
14 | //
15 | // [data square layout rationale]: ../../specs/src/specs/data_square_layout.md
16 | // [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules
17 | func CreateCommitment(blob *sh.Blob, merkleRootFn MerkleRootFn, subtreeRootThreshold int) ([]byte, error) {
18 | subTreeRoots, err := GenerateSubtreeRoots(blob, subtreeRootThreshold)
19 | if err != nil {
20 | return nil, err
21 | }
22 | return merkleRootFn(subTreeRoots), nil
23 | }
24 |
25 | // GenerateSubtreeRoots generates the subtree roots of a blob.
26 | // See [data square layout rationale] and [blob share commitment rules].
27 | //
28 | // [data square layout rationale]: ../../specs/src/specs/data_square_layout.md
29 | // [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules
30 | func GenerateSubtreeRoots(blob *sh.Blob, subtreeRootThreshold int) ([][]byte, error) {
31 | shares, err := splitBlobs(blob)
32 | if err != nil {
33 | return nil, err
34 | }
35 |
36 | // the commitment is the root of a merkle mountain range with max tree size
37 | // determined by the number of roots required to create a share commitment
38 | // over that blob. The size of the tree is only increased if the number of
39 | // subtree roots surpasses a constant threshold.
40 | subTreeWidth := SubTreeWidth(len(shares), subtreeRootThreshold)
41 | treeSizes, err := MerkleMountainRangeSizes(uint64(len(shares)), uint64(subTreeWidth))
42 | if err != nil {
43 | return nil, err
44 | }
45 | leafSets := make([][][]byte, len(treeSizes))
46 | cursor := uint64(0)
47 | for i, treeSize := range treeSizes {
48 | leafSets[i] = sh.ToBytes(shares[cursor : cursor+treeSize])
49 | cursor += treeSize
50 | }
51 |
52 | namespace := blob.Namespace()
53 | // create the commitments by pushing each leaf set onto an NMT
54 | subTreeRoots := make([][]byte, len(leafSets))
55 | for i, set := range leafSets {
56 | // Create the NMT. TODO: use NMT wrapper.
57 | tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(sh.NamespaceSize), nmt.IgnoreMaxNamespace(true))
58 | for _, leaf := range set {
59 | // the namespace must be added again here even though it is already
60 | // included in the leaf to ensure that the hash will match that of
61 | // the NMT wrapper (pkg/wrapper). Each namespace is added to keep
62 | // the namespace in the share, and therefore the parity data, while
63 | // also allowing for the manual addition of the parity namespace to
64 | // the parity data.
65 | nsLeaf := make([]byte, 0)
66 | nsLeaf = append(nsLeaf, namespace.Bytes()...)
67 | nsLeaf = append(nsLeaf, leaf...)
68 |
69 | err = tree.Push(nsLeaf)
70 | if err != nil {
71 | return nil, err
72 | }
73 | }
74 | // add the root
75 | root, err := tree.Root()
76 | if err != nil {
77 | return nil, err
78 | }
79 | subTreeRoots[i] = root
80 | }
81 | return subTreeRoots, nil
82 | }
83 |
84 | func CreateCommitments(blobs []*sh.Blob, merkleRootFn MerkleRootFn, subtreeRootThreshold int) ([][]byte, error) {
85 | commitments := make([][]byte, len(blobs))
86 | for i, blob := range blobs {
87 | commitment, err := CreateCommitment(blob, merkleRootFn, subtreeRootThreshold)
88 | if err != nil {
89 | return nil, err
90 | }
91 | commitments[i] = commitment
92 | }
93 | return commitments, nil
94 | }
95 |
96 | // MerkleMountainRangeSizes returns the sizes (number of leaf nodes) of the
97 | // trees in a merkle mountain range constructed for a given totalSize and
98 | // maxTreeSize.
99 | //
100 | // https://docs.grin.mw/wiki/chain-state/merkle-mountain-range/
101 | // https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md
102 | func MerkleMountainRangeSizes(totalSize, maxTreeSize uint64) ([]uint64, error) {
103 | var treeSizes []uint64
104 |
105 | for totalSize != 0 {
106 | switch {
107 | case totalSize >= maxTreeSize:
108 | treeSizes = append(treeSizes, maxTreeSize)
109 | totalSize -= maxTreeSize
110 | case totalSize < maxTreeSize:
111 | treeSize, err := RoundDownPowerOfTwo(totalSize)
112 | if err != nil {
113 | return treeSizes, err
114 | }
115 | treeSizes = append(treeSizes, treeSize)
116 | totalSize -= treeSize
117 | }
118 | }
119 |
120 | return treeSizes, nil
121 | }
122 |
123 | // splitBlobs splits the provided blobs into shares.
124 | func splitBlobs(blobs ...*sh.Blob) ([]sh.Share, error) {
125 | writer := sh.NewSparseShareSplitter()
126 | for _, blob := range blobs {
127 | if err := writer.Write(blob); err != nil {
128 | return nil, err
129 | }
130 | }
131 | return writer.Export(), nil
132 | }
133 |
--------------------------------------------------------------------------------
/inclusion/commitment_test.go:
--------------------------------------------------------------------------------
1 | package inclusion_test
2 |
3 | import (
4 | "bytes"
5 | "crypto/sha256"
6 | "testing"
7 |
8 | "github.com/celestiaorg/go-square/v2/inclusion"
9 | "github.com/celestiaorg/go-square/v2/share"
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestMerkleMountainRangeSizes(t *testing.T) {
15 | type test struct {
16 | totalSize uint64
17 | squareSize uint64
18 | expected []uint64
19 | }
20 | tests := []test{
21 | {
22 | totalSize: 11,
23 | squareSize: 4,
24 | expected: []uint64{4, 4, 2, 1},
25 | },
26 | {
27 | totalSize: 2,
28 | squareSize: 64,
29 | expected: []uint64{2},
30 | },
31 | {
32 | totalSize: 64,
33 | squareSize: 8,
34 | expected: []uint64{8, 8, 8, 8, 8, 8, 8, 8},
35 | },
36 | // Height
37 | // 3 x x
38 | // / \ / \
39 | // / \ / \
40 | // / \ / \
41 | // / \ / \
42 | // 2 x x x x
43 | // / \ / \ / \ / \
44 | // 1 x x x x x x x x x
45 | // / \ / \ / \ / \ / \ / \ / \ / \ / \
46 | // 0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
47 | {
48 | totalSize: 19,
49 | squareSize: 8,
50 | expected: []uint64{8, 8, 2, 1},
51 | },
52 | }
53 | for _, tt := range tests {
54 | res, err := inclusion.MerkleMountainRangeSizes(tt.totalSize, tt.squareSize)
55 | require.NoError(t, err)
56 | assert.Equal(t, tt.expected, res)
57 | }
58 | }
59 |
60 | // TestCreateCommitment will fail if a change is made to share encoding or how
61 | // the commitment is calculated. If this is the case, the expected commitment
62 | // bytes will need to be updated.
63 | func TestCreateCommitment(t *testing.T) {
64 | ns1 := share.MustNewV0Namespace(bytes.Repeat([]byte{0x1}, share.NamespaceVersionZeroIDSize))
65 |
66 | type test struct {
67 | name string
68 | namespace share.Namespace
69 | blob []byte
70 | expected []byte
71 | expectErr bool
72 | shareVersion uint8
73 | signer []byte
74 | }
75 | tests := []test{
76 | {
77 | name: "blob of 2 shares succeeds",
78 | namespace: ns1,
79 | blob: bytes.Repeat([]byte{0xFF}, share.AvailableBytesFromSparseShares(2)),
80 | expected: []byte{0x31, 0xf5, 0x15, 0x6d, 0x5d, 0xb9, 0xa7, 0xf5, 0xb4, 0x3b, 0x29, 0x7a, 0x14, 0xc0, 0x70, 0xc2, 0xcc, 0x4e, 0xf3, 0xd6, 0x9d, 0x87, 0xed, 0x8, 0xad, 0xdd, 0x21, 0x6d, 0x9b, 0x9f, 0xa1, 0x18},
81 | shareVersion: share.ShareVersionZero,
82 | },
83 | {
84 | name: "blob of one share with signer succeeds",
85 | namespace: ns1,
86 | blob: bytes.Repeat([]byte{0xFF}, share.AvailableBytesFromSparseShares(2)-share.SignerSize),
87 | expected: []byte{0x88, 0x3c, 0x74, 0x6, 0x4e, 0x8e, 0x26, 0x27, 0xad, 0x58, 0x8, 0x38, 0x9f, 0x1f, 0x19, 0x24, 0x19, 0x4c, 0x1a, 0xe2, 0x3c, 0x7d, 0xf9, 0x62, 0xc8, 0xd5, 0x6d, 0xf0, 0x62, 0xa9, 0x2b, 0x2b},
88 | shareVersion: share.ShareVersionOne,
89 | signer: bytes.Repeat([]byte{1}, share.SignerSize),
90 | },
91 | }
92 | for _, tt := range tests {
93 | t.Run(tt.name, func(t *testing.T) {
94 | blob, err := share.NewBlob(tt.namespace, tt.blob, tt.shareVersion, tt.signer)
95 | require.NoError(t, err)
96 | res, err := inclusion.CreateCommitment(blob, twoLeafMerkleRoot, defaultSubtreeRootThreshold)
97 | if tt.expectErr {
98 | assert.Error(t, err)
99 | return
100 | }
101 | assert.NoError(t, err)
102 | assert.Equal(t, tt.expected, res)
103 | })
104 | }
105 | }
106 |
107 | func twoLeafMerkleRoot(data [][]byte) []byte {
108 | if len(data) != 2 {
109 | panic("data must have exactly 2 elements")
110 | }
111 | h1 := sha256.Sum256(data[0])
112 | h2 := sha256.Sum256(data[1])
113 | sum := sha256.Sum256(append(h1[:], h2[:]...))
114 | return sum[:]
115 | }
116 |
--------------------------------------------------------------------------------
/inclusion/doc.go:
--------------------------------------------------------------------------------
1 | // Package inclusion contains functions to generate the blob share commitment
2 | // from a given blob.
3 | package inclusion
4 |
--------------------------------------------------------------------------------
/internal/test/factory.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | crand "crypto/rand"
5 | "encoding/binary"
6 | "fmt"
7 | "math/rand"
8 |
9 | "github.com/celestiaorg/go-square/v2/share"
10 | "github.com/celestiaorg/go-square/v2/tx"
11 | )
12 |
13 | var DefaultTestNamespace = share.MustNewV0Namespace([]byte("test"))
14 |
15 | func GenerateTxs(minSize, maxSize, numTxs int) [][]byte {
16 | txs := make([][]byte, numTxs)
17 | for i := 0; i < numTxs; i++ {
18 | txs[i] = GenerateRandomTx(minSize, maxSize)
19 | }
20 | return txs
21 | }
22 |
23 | func GenerateRandomTx(minSize, maxSize int) []byte {
24 | size := minSize
25 | if maxSize > minSize {
26 | size = rand.Intn(maxSize-minSize) + minSize
27 | }
28 | return RandomBytes(size)
29 | }
30 |
31 | func RandomBytes(size int) []byte {
32 | b := make([]byte, size)
33 | _, err := crand.Read(b)
34 | if err != nil {
35 | panic(err)
36 | }
37 | return b
38 | }
39 |
40 | func GenerateBlobTxWithNamespace(namespaces []share.Namespace, blobSizes []int, version uint8) []byte {
41 | blobs := make([]*share.Blob, len(blobSizes))
42 | if len(namespaces) != len(blobSizes) {
43 | panic("number of namespaces should match number of blob sizes")
44 | }
45 | var err error
46 | var signer []byte
47 | if version == share.ShareVersionOne {
48 | signer = RandomBytes(share.SignerSize)
49 | }
50 | for i, size := range blobSizes {
51 | blobs[i], err = share.NewBlob(namespaces[i], RandomBytes(size), version, signer)
52 | if err != nil {
53 | panic(err)
54 | }
55 | }
56 | blobTx, err := tx.MarshalBlobTx(MockPFB(toUint32(blobSizes)), blobs...)
57 | if err != nil {
58 | panic(err)
59 | }
60 | return blobTx
61 | }
62 |
63 | func GenerateBlobTx(blobSizes []int) []byte {
64 | return GenerateBlobTxWithNamespace(Repeat(DefaultTestNamespace, len(blobSizes)), blobSizes, share.DefaultShareVersion)
65 | }
66 |
67 | func GenerateBlobTxs(numTxs, blobsPerPfb, blobSize int) [][]byte {
68 | blobSizes := make([]int, blobsPerPfb)
69 | for i := range blobSizes {
70 | blobSizes[i] = blobSize
71 | }
72 | txs := make([][]byte, numTxs)
73 | for i := 0; i < numTxs; i++ {
74 | txs[i] = GenerateBlobTx(blobSizes)
75 | }
76 | return txs
77 | }
78 |
79 | func GenerateBlobs(blobSizes ...int) []*share.Blob {
80 | blobs := make([]*share.Blob, len(blobSizes))
81 | var err error
82 | for i, size := range blobSizes {
83 | blobs[i], err = share.NewBlob(share.RandomBlobNamespace(), RandomBytes(size), share.ShareVersionZero, nil)
84 | if err != nil {
85 | panic(err)
86 | }
87 | }
88 | return blobs
89 | }
90 |
91 | const mockPFBExtraBytes = 329
92 |
93 | func MockPFB(blobSizes []uint32) []byte {
94 | if len(blobSizes) == 0 {
95 | panic("must have at least one blob")
96 | }
97 | tx := make([]byte, len(blobSizes)*4)
98 | for i, size := range blobSizes {
99 | binary.BigEndian.PutUint32(tx[i*4:], uint32(size))
100 | }
101 |
102 | return append(RandomBytes(mockPFBExtraBytes), tx...)
103 | }
104 |
105 | func DecodeMockPFB(pfb []byte) ([]uint32, error) {
106 | if len(pfb) < mockPFBExtraBytes+4 {
107 | return nil, fmt.Errorf("must have a length of at least %d bytes, got %d", mockPFBExtraBytes+4, len(pfb))
108 | }
109 | pfb = pfb[mockPFBExtraBytes:]
110 | blobSizes := make([]uint32, len(pfb)/4)
111 | for i := 0; i < len(blobSizes); i++ {
112 | blobSizes[i] = binary.BigEndian.Uint32(pfb[i*4 : (i+1)*4])
113 | }
114 | return blobSizes, nil
115 | }
116 |
117 | func toUint32(arr []int) []uint32 {
118 | output := make([]uint32, len(arr))
119 | for i, value := range arr {
120 | output[i] = uint32(value)
121 | }
122 | return output
123 | }
124 |
125 | func Repeat[T any](s T, count int) []T {
126 | ss := make([]T, count)
127 | for i := 0; i < count; i++ {
128 | ss[i] = s
129 | }
130 | return ss
131 | }
132 |
133 | // DelimLen calculates the length of the delimiter for a given unit size
134 | func DelimLen(size uint64) int {
135 | lenBuf := make([]byte, binary.MaxVarintLen64)
136 | return binary.PutUvarint(lenBuf, size)
137 | }
138 |
--------------------------------------------------------------------------------
/internal/test/factory_test.go:
--------------------------------------------------------------------------------
1 | package test_test
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/celestiaorg/go-square/v2/internal/test"
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestPFBParity(t *testing.T) {
11 | blobSizes := []uint32{20, 30, 10}
12 | pfb := test.MockPFB(blobSizes)
13 | output, err := test.DecodeMockPFB(pfb)
14 | require.NoError(t, err)
15 | require.Equal(t, blobSizes, output)
16 |
17 | require.Panics(t, func() { test.MockPFB(nil) })
18 |
19 | _, err = test.DecodeMockPFB(test.RandomBytes(20))
20 | require.Error(t, err)
21 | }
22 |
--------------------------------------------------------------------------------
/proto/blob/v1/blob.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package proto.blob.v1;
3 |
4 | option go_package = "github.com/celestiaorg/go-square/v2/proto/blob/v1";
5 |
6 | // BlobProto is the protobuf representation of a blob (binary large object)
7 | // to be published to the Celestia blockchain. The data of a Blob is published
8 | // to a namespace and is encoded into shares based on the format specified by
9 | // share_version.
10 | message BlobProto {
11 | bytes namespace_id = 1;
12 | bytes data = 2;
13 | uint32 share_version = 3;
14 | uint32 namespace_version = 4;
15 | // Signer is sdk.AccAddress that paid for this blob. This field is optional
16 | // and can only be used when share_version is set to 1.
17 | bytes signer = 5;
18 | }
19 |
20 | // BlobTx wraps an encoded sdk.Tx with a second field to contain blobs of data.
21 | // The raw bytes of the blobs are not signed over, instead we verify each blob
22 | // using the relevant MsgPayForBlobs that is signed over in the encoded sdk.Tx.
23 | message BlobTx {
24 | bytes tx = 1;
25 | repeated BlobProto blobs = 2;
26 | string type_id = 3;
27 | }
28 |
29 | // IndexWrapper adds index metadata to a transaction. This is used to track
30 | // transactions that pay for blobs, and where the blobs start in the square.
31 | message IndexWrapper {
32 | bytes tx = 1;
33 | repeated uint32 share_indexes = 2;
34 | string type_id = 3;
35 | }
36 |
--------------------------------------------------------------------------------
/share/README.md:
--------------------------------------------------------------------------------
1 | # Shares
2 |
3 | See the celestia-app specs for [shares](https://celestiaorg.github.io/celestia-app/specs/shares.html).
4 |
--------------------------------------------------------------------------------
/share/blob.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "sort"
8 |
9 | v1 "github.com/celestiaorg/go-square/v2/proto/blob/v1"
10 | "google.golang.org/protobuf/proto"
11 | )
12 |
13 | // Blob (stands for binary large object) is a core type that represents data
14 | // to be submitted to the Celestia network alongside an accompanying namespace
15 | // and optional signer (for proving the signer of the blob)
16 | type Blob struct {
17 | namespace Namespace
18 | data []byte
19 | shareVersion uint8
20 | signer []byte
21 | }
22 |
23 | // New creates a new coretypes.Blob from the provided data after performing
24 | // basic stateless checks over it.
25 | func NewBlob(ns Namespace, data []byte, shareVersion uint8, signer []byte) (*Blob, error) {
26 | if len(data) == 0 {
27 | return nil, errors.New("data can not be empty")
28 | }
29 | if ns.IsEmpty() {
30 | return nil, errors.New("namespace can not be empty")
31 | }
32 | if ns.Version() != NamespaceVersionZero {
33 | return nil, fmt.Errorf("namespace version must be %d got %d", NamespaceVersionZero, ns.Version())
34 | }
35 | switch shareVersion {
36 | case ShareVersionZero:
37 | if signer != nil {
38 | return nil, errors.New("share version 0 does not support signer")
39 | }
40 | case ShareVersionOne:
41 | if len(signer) != SignerSize {
42 | return nil, fmt.Errorf("share version 1 requires signer of size %d bytes", SignerSize)
43 | }
44 | // Note that we don't specifically check that shareVersion is less than 128 as this is caught
45 | // by the default case
46 | default:
47 | return nil, fmt.Errorf("share version %d not supported. Please use 0 or 1", shareVersion)
48 | }
49 | return &Blob{
50 | namespace: ns,
51 | data: data,
52 | shareVersion: shareVersion,
53 | signer: signer,
54 | }, nil
55 | }
56 |
57 | // NewV0Blob creates a new blob with share version 0
58 | func NewV0Blob(ns Namespace, data []byte) (*Blob, error) {
59 | return NewBlob(ns, data, 0, nil)
60 | }
61 |
62 | // NewV1Blob creates a new blob with share version 1
63 | func NewV1Blob(ns Namespace, data []byte, signer []byte) (*Blob, error) {
64 | return NewBlob(ns, data, 1, signer)
65 | }
66 |
67 | // UnmarshalBlob unmarshals a blob from the proto encoded bytes
68 | func UnmarshalBlob(blob []byte) (*Blob, error) {
69 | pb := &v1.BlobProto{}
70 | err := proto.Unmarshal(blob, pb)
71 | if err != nil {
72 | return nil, fmt.Errorf("failed to unmarshal blob: %w", err)
73 | }
74 | return NewBlobFromProto(pb)
75 | }
76 |
77 | // Marshal marshals the blob to the proto encoded bytes
78 | func (b *Blob) Marshal() ([]byte, error) {
79 | pb := &v1.BlobProto{
80 | NamespaceId: b.namespace.ID(),
81 | NamespaceVersion: uint32(b.namespace.Version()),
82 | ShareVersion: uint32(b.shareVersion),
83 | Data: b.data,
84 | Signer: b.signer,
85 | }
86 | return proto.Marshal(pb)
87 | }
88 |
89 | // MarshalJSON converts blob's data to the json encoded bytes
90 | func (b *Blob) MarshalJSON() ([]byte, error) {
91 | pb := &v1.BlobProto{
92 | NamespaceId: b.namespace.ID(),
93 | NamespaceVersion: uint32(b.namespace.Version()),
94 | ShareVersion: uint32(b.shareVersion),
95 | Data: b.data,
96 | Signer: b.signer,
97 | }
98 | return json.Marshal(pb)
99 | }
100 |
101 | // UnmarshalJSON converts json encoded data to the blob
102 | func (b *Blob) UnmarshalJSON(bb []byte) error {
103 | pb := &v1.BlobProto{}
104 | err := json.Unmarshal(bb, pb)
105 | if err != nil {
106 | return err
107 | }
108 |
109 | blob, err := NewBlobFromProto(pb)
110 | if err != nil {
111 | return err
112 | }
113 |
114 | *b = *blob
115 | return nil
116 | }
117 |
118 | // NewBlobFromProto creates a new blob from the proto generated type
119 | func NewBlobFromProto(pb *v1.BlobProto) (*Blob, error) {
120 | if pb.NamespaceVersion > NamespaceVersionMax {
121 | return nil, errors.New("namespace version can not be greater than MaxNamespaceVersion")
122 | }
123 | if pb.ShareVersion > MaxShareVersion {
124 | return nil, fmt.Errorf("share version can not be greater than MaxShareVersion %d", MaxShareVersion)
125 | }
126 | ns, err := NewNamespace(uint8(pb.NamespaceVersion), pb.NamespaceId)
127 | if err != nil {
128 | return nil, fmt.Errorf("invalid namespace: %w", err)
129 | }
130 | return NewBlob(
131 | ns,
132 | pb.Data,
133 | uint8(pb.ShareVersion),
134 | pb.Signer,
135 | )
136 | }
137 |
138 | // Namespace returns the namespace of the blob
139 | func (b *Blob) Namespace() Namespace {
140 | return b.namespace
141 | }
142 |
143 | // ShareVersion returns the share version of the blob
144 | func (b *Blob) ShareVersion() uint8 {
145 | return b.shareVersion
146 | }
147 |
148 | // Signer returns the signer of the blob
149 | func (b *Blob) Signer() []byte {
150 | return b.signer
151 | }
152 |
153 | // Data returns the data of the blob
154 | func (b *Blob) Data() []byte {
155 | return b.data
156 | }
157 |
158 | // DataLen returns the length of the data of the blob
159 | func (b *Blob) DataLen() int {
160 | return len(b.data)
161 | }
162 |
163 | // Compare is used to order two blobs based on their namespace
164 | func (b *Blob) Compare(other *Blob) int {
165 | return b.namespace.Compare(other.namespace)
166 | }
167 |
168 | // IsEmpty returns true if the blob is empty. This is an invalid
169 | // construction that can only occur if using the nil value. We
170 | // only check that the data is empty but this also implies that
171 | // all other fields would have their zero value
172 | func (b *Blob) IsEmpty() bool {
173 | return len(b.data) == 0
174 | }
175 |
176 | // Sort sorts the blobs by their namespace.
177 | func SortBlobs(blobs []*Blob) {
178 | sort.SliceStable(blobs, func(i, j int) bool {
179 | return blobs[i].Compare(blobs[j]) < 0
180 | })
181 | }
182 |
183 | // ToShares converts blob's data back to shares.
184 | func (b *Blob) ToShares() ([]Share, error) {
185 | splitter := NewSparseShareSplitter()
186 | err := splitter.Write(b)
187 | if err != nil {
188 | return nil, err
189 | }
190 | return splitter.Export(), nil
191 | }
192 |
--------------------------------------------------------------------------------
/share/blob_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "crypto/rand"
6 | "encoding/json"
7 | "testing"
8 |
9 | v1 "github.com/celestiaorg/go-square/v2/proto/blob/v1"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestProtoEncoding(t *testing.T) {
14 | signer := make([]byte, 20)
15 | _, err := rand.Read(signer)
16 | require.NoError(t, err)
17 | blob, err := NewBlob(RandomNamespace(), []byte{1, 2, 3, 4, 5}, 1, signer)
18 | require.NoError(t, err)
19 |
20 | blobBytes, err := blob.Marshal()
21 | require.NoError(t, err)
22 |
23 | newBlob, err := UnmarshalBlob(blobBytes)
24 | require.NoError(t, err)
25 |
26 | require.Equal(t, blob, newBlob)
27 | }
28 |
29 | func TestJSONEncoding(t *testing.T) {
30 | signer := make([]byte, 20)
31 | _, err := rand.Read(signer)
32 | require.NoError(t, err)
33 | blob, err := NewBlob(RandomNamespace(), []byte{1, 2, 3, 4, 5}, 1, signer)
34 | require.NoError(t, err)
35 |
36 | data, err := json.Marshal(blob)
37 | require.NoError(t, err)
38 | require.NotNil(t, data)
39 |
40 | b := &Blob{}
41 | err = json.Unmarshal(data, b)
42 | require.NoError(t, err)
43 | require.Equal(t, blob, b)
44 | }
45 |
46 | func TestBlobConstructor(t *testing.T) {
47 | signer := make([]byte, 20)
48 | _, err := rand.Read(signer)
49 | require.NoError(t, err)
50 |
51 | ns := RandomNamespace()
52 | data := []byte{1, 2, 3, 4, 5}
53 |
54 | // test all invalid cases
55 | _, err = NewBlob(ns, data, 0, signer)
56 | require.Error(t, err)
57 | require.Contains(t, err.Error(), "share version 0 does not support signer")
58 |
59 | _, err = NewBlob(ns, nil, 0, nil)
60 | require.Error(t, err)
61 | require.Contains(t, err.Error(), "data can not be empty")
62 |
63 | _, err = NewBlob(ns, data, 1, nil)
64 | require.Error(t, err)
65 | require.Contains(t, err.Error(), "share version 1 requires signer of size")
66 |
67 | _, err = NewBlob(ns, data, 128, nil)
68 | require.Error(t, err)
69 | require.Contains(t, err.Error(), "share version 128 not supported")
70 |
71 | _, err = NewBlob(ns, data, 2, nil)
72 | require.Error(t, err)
73 | require.Contains(t, err.Error(), "share version 2 not supported")
74 |
75 | _, err = NewBlob(Namespace{}, data, 1, signer)
76 | require.Error(t, err)
77 | require.Contains(t, err.Error(), "namespace can not be empty")
78 |
79 | ns2, err := NewNamespace(NamespaceVersionMax, ns.ID())
80 | require.NoError(t, err)
81 | _, err = NewBlob(ns2, data, 0, nil)
82 | require.Error(t, err)
83 | require.Contains(t, err.Error(), "namespace version must be 0")
84 |
85 | blob, err := NewBlob(ns, data, 0, nil)
86 | require.NoError(t, err)
87 | shares, err := blob.ToShares()
88 | require.NoError(t, err)
89 | blobList, err := parseSparseShares(shares)
90 | require.NoError(t, err)
91 | require.Len(t, blobList, 1)
92 | require.Equal(t, blob, blobList[0])
93 | }
94 |
95 | func TestNewBlobFromProto(t *testing.T) {
96 | namespace := RandomNamespace()
97 | testCases := []struct {
98 | name string
99 | proto *v1.BlobProto
100 | expectedErr string
101 | }{
102 | {
103 | name: "valid blob",
104 | proto: &v1.BlobProto{
105 | NamespaceId: namespace.ID(),
106 | NamespaceVersion: uint32(namespace.Version()),
107 | ShareVersion: 0,
108 | Data: []byte{1, 2, 3, 4, 5},
109 | },
110 | expectedErr: "",
111 | },
112 | {
113 | name: "invalid namespace version",
114 | proto: &v1.BlobProto{
115 | NamespaceId: namespace.ID(),
116 | NamespaceVersion: 256,
117 | ShareVersion: 0,
118 | Data: []byte{1, 2, 3, 4, 5},
119 | },
120 | expectedErr: "namespace version can not be greater than MaxNamespaceVersion",
121 | },
122 | {
123 | name: "empty data",
124 | proto: &v1.BlobProto{
125 | NamespaceId: namespace.ID(),
126 | NamespaceVersion: 0,
127 | ShareVersion: 0,
128 | Data: []byte{},
129 | },
130 | expectedErr: "data can not be empty",
131 | },
132 | {
133 | name: "invalid namespace ID length",
134 | proto: &v1.BlobProto{
135 | NamespaceId: []byte{1, 2, 3},
136 | NamespaceVersion: 0,
137 | ShareVersion: 0,
138 | Data: []byte{1, 2, 3, 4, 5},
139 | },
140 | expectedErr: "invalid namespace",
141 | },
142 | {
143 | name: "valid blob with signer",
144 | proto: &v1.BlobProto{
145 | NamespaceId: namespace.ID(),
146 | NamespaceVersion: 0,
147 | ShareVersion: 1,
148 | Data: []byte{1, 2, 3, 4, 5},
149 | Signer: bytes.Repeat([]byte{1}, SignerSize),
150 | },
151 | expectedErr: "",
152 | },
153 | {
154 | name: "invalid signer length",
155 | proto: &v1.BlobProto{
156 | NamespaceId: namespace.ID(),
157 | NamespaceVersion: 0,
158 | ShareVersion: 1,
159 | Data: []byte{1, 2, 3, 4, 5},
160 | Signer: []byte{1, 2, 3},
161 | },
162 | expectedErr: "share version 1 requires signer of size",
163 | },
164 | }
165 |
166 | for _, tc := range testCases {
167 | t.Run(tc.name, func(t *testing.T) {
168 | blob, err := NewBlobFromProto(tc.proto)
169 | if tc.expectedErr != "" {
170 | require.Error(t, err)
171 | require.Contains(t, err.Error(), tc.expectedErr)
172 | } else {
173 | require.NoError(t, err)
174 | require.NotNil(t, blob)
175 | require.Equal(t, tc.proto.NamespaceId, blob.Namespace().ID())
176 | require.Equal(t, uint8(tc.proto.NamespaceVersion), blob.Namespace().Version())
177 | require.Equal(t, uint8(tc.proto.ShareVersion), blob.ShareVersion())
178 | require.Equal(t, tc.proto.Data, blob.Data())
179 | require.Equal(t, tc.proto.Signer, blob.Signer())
180 | }
181 | })
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/share/compact_shares_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "crypto/sha256"
7 | "fmt"
8 | "math/rand"
9 | "testing"
10 | "time"
11 |
12 | "github.com/stretchr/testify/assert"
13 | "github.com/stretchr/testify/require"
14 | )
15 |
16 | func TestCompactShareSplitter(t *testing.T) {
17 | // note that this test is mainly for debugging purposes, the main round trip
18 | // tests occur in TestMerge and Test_processCompactShares
19 | css := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
20 | txs := generateRandomTxs(33, 200)
21 | for _, tx := range txs {
22 | err := css.WriteTx(tx)
23 | require.NoError(t, err)
24 | }
25 | shares, err := css.Export()
26 | require.NoError(t, err)
27 |
28 | resTxs, err := parseCompactShares(shares)
29 | require.NoError(t, err)
30 |
31 | assert.Equal(t, txs, resTxs)
32 | }
33 |
34 | func TestFuzz_processCompactShares(t *testing.T) {
35 | t.Skip()
36 | // run random shares through processCompactShares for a minute
37 | ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
38 | defer cancel()
39 | for {
40 | select {
41 | case <-ctx.Done():
42 | return
43 | default:
44 | Test_processCompactShares(t)
45 | }
46 | }
47 | }
48 |
49 | func Test_processCompactShares(t *testing.T) {
50 | // exactTxShareSize is the length of tx that will fit exactly into a single
51 | // share, accounting for the tx length delimiter prepended to
52 | // each tx. Note that the length delimiter can be 1 to 10 bytes (varint) but
53 | // this test assumes it is 1 byte.
54 | const exactTxShareSize = FirstCompactShareContentSize - 1
55 |
56 | type test struct {
57 | name string
58 | txSize int
59 | txCount int
60 | }
61 |
62 | // each test is ran twice, once using txSize as an exact size, and again
63 | // using it as a cap for randomly sized txs
64 | tests := []test{
65 | {"single small tx", ContinuationCompactShareContentSize / 8, 1},
66 | {"many small txs", ContinuationCompactShareContentSize / 8, 10},
67 | {"single big tx", ContinuationCompactShareContentSize * 4, 1},
68 | {"many big txs", ContinuationCompactShareContentSize * 4, 10},
69 | {"single exact size tx", exactTxShareSize, 1},
70 | {"many exact size txs", exactTxShareSize, 100},
71 | }
72 |
73 | for _, tc := range tests {
74 | // run the tests with identically sized txs
75 | t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) {
76 | txs := generateRandomTxs(tc.txCount, tc.txSize)
77 |
78 | shares, _, err := splitTxs(txs)
79 | require.NoError(t, err)
80 |
81 | parsedTxs, err := parseCompactShares(shares)
82 | if err != nil {
83 | t.Error(err)
84 | }
85 |
86 | // check that the data parsed is identical
87 | for i := 0; i < len(txs); i++ {
88 | assert.Equal(t, txs[i], parsedTxs[i])
89 | }
90 | })
91 |
92 | // run the same tests using randomly sized txs with caps of tc.txSize
93 | t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
94 | txs := generateRandomlySizedTxs(tc.txCount, tc.txSize)
95 |
96 | txShares, _, err := splitTxs(txs)
97 | require.NoError(t, err)
98 | parsedTxs, err := parseCompactShares(txShares)
99 | if err != nil {
100 | t.Error(err)
101 | }
102 |
103 | // check that the data parsed is identical to the original
104 | for i := 0; i < len(txs); i++ {
105 | assert.Equal(t, txs[i], parsedTxs[i])
106 | }
107 | })
108 | }
109 | }
110 |
111 | func TestParseRandomOutOfContextShares(t *testing.T) {
112 | txs := generateRandomlySizedTxs(1000, 150)
113 | txShares, _, err := splitTxs(txs)
114 | require.NoError(t, err)
115 |
116 | for i := 0; i < 1000; i++ {
117 | start, length := getRandomSubSlice(len(txShares))
118 | randomRange := NewRange(start, start+length)
119 | resTxs, err := ParseTxs(txShares[randomRange.Start:randomRange.End])
120 | require.NoError(t, err)
121 | assert.True(t, checkSubArray(txs, resTxs))
122 | }
123 | }
124 |
125 | // getRandomSubSlice returns two integers representing a randomly sized range in the interval [0, size]
126 | func getRandomSubSlice(size int) (start int, length int) {
127 | length = rand.Intn(size + 1)
128 | start = rand.Intn(size - length + 1)
129 | return start, length
130 | }
131 |
132 | // checkSubArray returns whether subTxList is a subarray of txList
133 | func checkSubArray(txList [][]byte, subTxList [][]byte) bool {
134 | for i := 0; i <= len(txList)-len(subTxList); i++ {
135 | j := 0
136 | for j = 0; j < len(subTxList); j++ {
137 | tx := txList[i+j]
138 | subTx := subTxList[j]
139 | if !bytes.Equal(tx, subTx) {
140 | break
141 | }
142 | }
143 | if j == len(subTxList) {
144 | return true
145 | }
146 | }
147 | return false
148 | }
149 |
150 | func TestParseOutOfContextSharesUsingShareRanges(t *testing.T) {
151 | txs := generateRandomlySizedTxs(1000, 150)
152 | txShares, shareRanges, err := splitTxs(txs)
153 | require.NoError(t, err)
154 |
155 | for key, r := range shareRanges {
156 | resTxs, err := ParseTxs(txShares[r.Start:r.End])
157 | require.NoError(t, err)
158 | has := false
159 | for _, tx := range resTxs {
160 | if sha256.Sum256(tx) == key {
161 | has = true
162 | break
163 | }
164 | }
165 | assert.True(t, has)
166 | }
167 | }
168 |
169 | func TestCompactShareContainsInfoByte(t *testing.T) {
170 | css := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
171 | txs := generateRandomTxs(1, ContinuationCompactShareContentSize/4)
172 |
173 | for _, tx := range txs {
174 | err := css.WriteTx(tx)
175 | require.NoError(t, err)
176 | }
177 |
178 | shares, err := css.Export()
179 | require.NoError(t, err)
180 | assert.Condition(t, func() bool { return len(shares) == 1 })
181 |
182 | infoByte := shares[0].data[NamespaceSize : NamespaceSize+ShareInfoBytes][0]
183 |
184 | isSequenceStart := true
185 | want, err := NewInfoByte(ShareVersionZero, isSequenceStart)
186 |
187 | require.NoError(t, err)
188 | assert.Equal(t, byte(want), infoByte)
189 | }
190 |
191 | func TestContiguousCompactShareContainsInfoByte(t *testing.T) {
192 | css := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
193 | txs := generateRandomTxs(1, ContinuationCompactShareContentSize*4)
194 |
195 | for _, tx := range txs {
196 | err := css.WriteTx(tx)
197 | require.NoError(t, err)
198 | }
199 |
200 | shares, err := css.Export()
201 | require.NoError(t, err)
202 | assert.Condition(t, func() bool { return len(shares) > 1 })
203 |
204 | infoByte := shares[1].data[NamespaceSize : NamespaceSize+ShareInfoBytes][0]
205 |
206 | isSequenceStart := false
207 | want, err := NewInfoByte(ShareVersionZero, isSequenceStart)
208 |
209 | require.NoError(t, err)
210 | assert.Equal(t, byte(want), infoByte)
211 | }
212 |
213 | func generateRandomlySizedTxs(count, maxSize int) [][]byte {
214 | txs := make([][]byte, count)
215 | for i := 0; i < count; i++ {
216 | size := rand.Intn(maxSize)
217 | if size == 0 {
218 | size = 1
219 | }
220 | txs[i] = generateRandomTxs(1, size)[0]
221 | }
222 | return txs
223 | }
224 |
225 | func splitTxs(txs [][]byte) ([]Share, map[[sha256.Size]byte]Range, error) {
226 | txWriter := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
227 | for _, tx := range txs {
228 | err := txWriter.WriteTx(tx)
229 | if err != nil {
230 | return nil, nil, err
231 | }
232 | }
233 | shares, err := txWriter.Export()
234 | if err != nil {
235 | return nil, nil, err
236 | }
237 | return shares, txWriter.ShareRanges(0), nil
238 | }
239 |
--------------------------------------------------------------------------------
/share/consts.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "math"
6 | )
7 |
8 | const (
9 | // ShareSize is the size of a share in bytes.
10 | ShareSize = 512
11 |
12 | // ShareInfoBytes is the number of bytes reserved for information. The info
13 | // byte contains the share version and a sequence start idicator.
14 | ShareInfoBytes = 1
15 |
16 | // SequenceLenBytes is the number of bytes reserved for the sequence length
17 | // that is present in the first share of a sequence.
18 | SequenceLenBytes = 4
19 |
20 | // ShareVersionZero is the first share version format.
21 | ShareVersionZero = uint8(0)
22 |
23 | // ShareVersionOne is the second share version format.
24 | // It requires that a signer is included in the first share in the sequence.
25 | ShareVersionOne = uint8(1)
26 |
27 | // DefaultShareVersion is the defacto share version. Use this if you are
28 | // unsure of which version to use.
29 | DefaultShareVersion = ShareVersionZero
30 |
31 | // CompactShareReservedBytes is the number of bytes reserved for the location of
32 | // the first unit (transaction, ISR) in a compact share.
33 | // Deprecated: use ShareReservedBytes instead.
34 | CompactShareReservedBytes = ShareReservedBytes
35 |
36 | // ShareReservedBytes is the number of bytes reserved for the location of
37 | // the first unit (transaction, ISR) in a compact share.
38 | ShareReservedBytes = 4
39 |
40 | // FirstCompactShareContentSize is the number of bytes usable for data in
41 | // the first compact share of a sequence.
42 | FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - ShareReservedBytes
43 |
44 | // ContinuationCompactShareContentSize is the number of bytes usable for
45 | // data in a continuation compact share of a sequence.
46 | ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - ShareReservedBytes
47 |
48 | // FirstSparseShareContentSize is the number of bytes usable for data in the
49 | // first sparse share of a sequence.
50 | FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes
51 |
52 | // ContinuationSparseShareContentSize is the number of bytes usable for data
53 | // in a continuation sparse share of a sequence.
54 | ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes
55 |
56 | // MinSquareSize is the smallest original square width.
57 | MinSquareSize = 1
58 |
59 | // MinShareCount is the minimum number of shares allowed in the original
60 | // data square.
61 | MinShareCount = MinSquareSize * MinSquareSize
62 |
63 | // MaxShareVersion is the maximum value a share version can be.
64 | MaxShareVersion = 127
65 |
66 | // SignerSize is the size of the signer in bytes.
67 | SignerSize = 20
68 | )
69 |
70 | // SupportedShareVersions is a list of supported share versions.
71 | var SupportedShareVersions = []uint8{ShareVersionZero, ShareVersionOne}
72 |
73 | const (
74 | // NamespaceVersionSize is the size of a namespace version in bytes.
75 | NamespaceVersionSize = 1
76 |
77 | // VersionIndex is the index of the version in the namespace. This should
78 | // always be the first byte
79 | VersionIndex = 0
80 |
81 | // NamespaceIDSize is the size of a namespace ID in bytes.
82 | NamespaceIDSize = 28
83 |
84 | // NamespaceSize is the size of a namespace (version + ID) in bytes.
85 | NamespaceSize = NamespaceVersionSize + NamespaceIDSize
86 |
87 | // NamespaceVersionZero is the first namespace version.
88 | NamespaceVersionZero = uint8(0)
89 |
90 | // NamespaceVersionMax is the max namespace version.
91 | NamespaceVersionMax = math.MaxUint8
92 |
93 | // NamespaceVersionZeroPrefixSize is the number of `0` bytes that are prefixed to
94 | // namespace IDs for version 0.
95 | NamespaceVersionZeroPrefixSize = 18
96 |
97 | // NamespaceVersionZeroIDSize is the number of bytes available for
98 | // user-specified namespace ID in a namespace ID for version 0.
99 | NamespaceVersionZeroIDSize = NamespaceIDSize - NamespaceVersionZeroPrefixSize
100 | )
101 |
102 | var (
103 | // NamespaceVersionZeroPrefix is the prefix of a namespace ID for version 0.
104 | NamespaceVersionZeroPrefix = bytes.Repeat([]byte{0}, NamespaceVersionZeroPrefixSize)
105 |
106 | // TxNamespace is the namespace reserved for ordinary Cosmos SDK transactions.
107 | TxNamespace = primaryReservedNamespace(0x01)
108 |
109 | // IntermediateStateRootsNamespace is the namespace reserved for
110 | // intermediate state root data.
111 | IntermediateStateRootsNamespace = primaryReservedNamespace(0x02)
112 |
113 | // PayForBlobNamespace is the namespace reserved for PayForBlobs transactions.
114 | PayForBlobNamespace = primaryReservedNamespace(0x04)
115 |
116 | // PrimaryReservedPaddingNamespace is the namespace used for padding after all
117 | // primary reserved namespaces.
118 | PrimaryReservedPaddingNamespace = primaryReservedNamespace(0xFF)
119 |
120 | // MaxPrimaryReservedNamespace is the highest primary reserved namespace.
121 | // Namespaces lower than this are reserved for protocol use.
122 | MaxPrimaryReservedNamespace = primaryReservedNamespace(0xFF)
123 |
124 | // MinSecondaryReservedNamespace is the lowest secondary reserved namespace
125 | // reserved for protocol use. Namespaces higher than this are reserved for
126 | // protocol use.
127 | MinSecondaryReservedNamespace = secondaryReservedNamespace(0x00)
128 |
129 | // TailPaddingNamespace is the namespace reserved for tail padding. All data
130 | // with this namespace will be ignored.
131 | TailPaddingNamespace = secondaryReservedNamespace(0xFE)
132 |
133 | // ParitySharesNamespace is the namespace reserved for erasure coded data.
134 | ParitySharesNamespace = secondaryReservedNamespace(0xFF)
135 |
136 | // SupportedBlobNamespaceVersions is a list of namespace versions that can be specified by a user for blobs.
137 | SupportedBlobNamespaceVersions = []uint8{NamespaceVersionZero}
138 | )
139 |
140 | func primaryReservedNamespace(lastByte byte) Namespace {
141 | return newNamespace(NamespaceVersionZero, append(bytes.Repeat([]byte{0x00}, NamespaceIDSize-1), lastByte))
142 | }
143 |
144 | func secondaryReservedNamespace(lastByte byte) Namespace {
145 | return newNamespace(NamespaceVersionMax, append(bytes.Repeat([]byte{0xFF}, NamespaceIDSize-1), lastByte))
146 | }
147 |
--------------------------------------------------------------------------------
/share/counter.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | type CompactShareCounter struct {
4 | lastShares int
5 | lastRemainder int
6 | shares int
7 | // remainder is the number of bytes used for data in the last share
8 | remainder int
9 | }
10 |
11 | // NewCompactShareCounter creates a new instance of a counter which calculates the amount
12 | // of compact shares a set of data will be split into.
13 | func NewCompactShareCounter() *CompactShareCounter {
14 | return &CompactShareCounter{}
15 | }
16 |
17 | // Add adds the length of the data to the counter and returns the amount of shares
18 | // the counter has been increased by.
19 | func (c *CompactShareCounter) Add(dataLen int) int {
20 | // Increment the data len by the varint that will prefix the data.
21 | dataLen += delimLen(uint64(dataLen))
22 |
23 | // save a copy of the previous state
24 | c.lastRemainder = c.remainder
25 | c.lastShares = c.shares
26 |
27 | // if this is the first share, calculate how much is taken up by dataLen
28 | if c.shares == 0 {
29 | if dataLen >= FirstCompactShareContentSize-c.remainder {
30 | dataLen -= (FirstCompactShareContentSize - c.remainder)
31 | c.shares++
32 | c.remainder = 0
33 | } else {
34 | c.remainder += dataLen
35 | dataLen = 0
36 | }
37 | }
38 |
39 | // next, look to fill the remainder of the continuation share
40 | if dataLen >= (ContinuationCompactShareContentSize - c.remainder) {
41 | dataLen -= (ContinuationCompactShareContentSize - c.remainder)
42 | c.shares++
43 | c.remainder = 0
44 | } else {
45 | c.remainder += dataLen
46 | dataLen = 0
47 | }
48 |
49 | // finally, divide the remaining dataLen into the continuation shares and update
50 | // the remainder
51 | if dataLen > 0 {
52 | c.shares += dataLen / ContinuationCompactShareContentSize
53 | c.remainder = dataLen % ContinuationCompactShareContentSize
54 | }
55 |
56 | // calculate the diff between before and after
57 | diff := c.shares - c.lastShares
58 | if c.lastRemainder == 0 && c.remainder > 0 {
59 | diff++
60 | } else if c.lastRemainder > 0 && c.remainder == 0 {
61 | diff--
62 | }
63 | return diff
64 | }
65 |
66 | // Revert reverts the last Add operation. This can be called multiple times but only works
67 | // the first time after an add operation.
68 | func (c *CompactShareCounter) Revert() {
69 | c.shares = c.lastShares
70 | c.remainder = c.lastRemainder
71 | }
72 |
73 | // Size returns the amount of shares the compact share counter has counted.
74 | func (c *CompactShareCounter) Size() int {
75 | if c.remainder == 0 {
76 | return c.shares
77 | }
78 | return c.shares + 1
79 | }
80 |
81 | func (c *CompactShareCounter) Remainder() int {
82 | return c.remainder
83 | }
84 |
--------------------------------------------------------------------------------
/share/counter_test.go:
--------------------------------------------------------------------------------
1 | package share_test
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "testing"
7 |
8 | "github.com/celestiaorg/go-square/v2/share"
9 | "github.com/stretchr/testify/require"
10 | )
11 |
12 | func TestCounterMatchesCompactShareSplitter(t *testing.T) {
13 | testCases := []struct {
14 | txs [][]byte
15 | }{
16 | {txs: [][]byte{}},
17 | {txs: [][]byte{newTx(120)}},
18 | {txs: [][]byte{newTx(share.FirstCompactShareContentSize - 2)}},
19 | {txs: [][]byte{newTx(share.FirstCompactShareContentSize - 1)}},
20 | {txs: [][]byte{newTx(share.FirstCompactShareContentSize)}},
21 | {txs: [][]byte{newTx(share.FirstCompactShareContentSize + 1)}},
22 | {txs: [][]byte{newTx(share.FirstCompactShareContentSize), newTx(share.ContinuationCompactShareContentSize - 4)}},
23 | {txs: newTxs(1000, 100)},
24 | {txs: newTxs(100, 1000)},
25 | {txs: newTxs(8931, 77)},
26 | }
27 |
28 | for idx, tc := range testCases {
29 | t.Run(fmt.Sprintf("case%d", idx), func(t *testing.T) {
30 | writer := share.NewCompactShareSplitter(share.PayForBlobNamespace, share.ShareVersionZero)
31 | counter := share.NewCompactShareCounter()
32 |
33 | sum := 0
34 | for _, tx := range tc.txs {
35 | require.NoError(t, writer.WriteTx(tx))
36 | diff := counter.Add(len(tx))
37 | require.Equal(t, writer.Count()-sum, diff)
38 | sum = writer.Count()
39 | require.Equal(t, sum, counter.Size())
40 | }
41 | shares, err := writer.Export()
42 | require.NoError(t, err)
43 | require.Equal(t, len(shares), sum)
44 | require.Equal(t, len(shares), counter.Size())
45 | })
46 | }
47 |
48 | writer := share.NewCompactShareSplitter(share.PayForBlobNamespace, share.ShareVersionZero)
49 | counter := share.NewCompactShareCounter()
50 | require.Equal(t, counter.Size(), 0)
51 | require.Equal(t, writer.Count(), counter.Size())
52 | }
53 |
54 | func TestCompactShareCounterRevert(t *testing.T) {
55 | counter := share.NewCompactShareCounter()
56 | require.Equal(t, counter.Size(), 0)
57 | counter.Add(share.FirstCompactShareContentSize - 2)
58 | counter.Add(1)
59 | require.Equal(t, counter.Size(), 2)
60 | counter.Revert()
61 | require.Equal(t, counter.Size(), 1)
62 | }
63 |
64 | func newTx(length int) []byte {
65 | return bytes.Repeat([]byte("a"), length)
66 | }
67 |
68 | func newTxs(n int, length int) [][]byte {
69 | txs := make([][]byte, n)
70 | for i := 0; i < n; i++ {
71 | txs[i] = newTx(length)
72 | }
73 | return txs
74 | }
75 |
--------------------------------------------------------------------------------
/share/info_byte.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // InfoByte is a byte with the following structure: the first 7 bits are
8 | // reserved for version information in big endian form (initially `0000000`).
9 | // The last bit is a "sequence start indicator", that is `1` if this is the
10 | // first share of a sequence and `0` if this is a continuation share.
11 | type InfoByte byte
12 |
13 | func NewInfoByte(version uint8, isSequenceStart bool) (InfoByte, error) {
14 | if version > MaxShareVersion {
15 | return 0, fmt.Errorf("version %d must be less than or equal to %d", version, MaxShareVersion)
16 | }
17 |
18 | prefix := version << 1
19 | if isSequenceStart {
20 | return InfoByte(prefix + 1), nil
21 | }
22 | return InfoByte(prefix), nil
23 | }
24 |
25 | // Version returns the version encoded in this InfoByte. Version is
26 | // expected to be between 0 and MaxShareVersion (inclusive).
27 | func (i InfoByte) Version() uint8 {
28 | version := uint8(i) >> 1
29 | return version
30 | }
31 |
32 | // IsSequenceStart returns whether this share is the start of a sequence.
33 | func (i InfoByte) IsSequenceStart() bool {
34 | return uint(i)%2 == 1
35 | }
36 |
37 | func ParseInfoByte(i byte) (InfoByte, error) {
38 | isSequenceStart := i%2 == 1
39 | version := i >> 1
40 | return NewInfoByte(version, isSequenceStart)
41 | }
42 |
--------------------------------------------------------------------------------
/share/info_byte_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import "testing"
4 |
5 | func TestInfoByte(t *testing.T) {
6 | blobStart := true
7 | notBlobStart := false
8 |
9 | type testCase struct {
10 | version uint8
11 | isSequenceStart bool
12 | }
13 | tests := []testCase{
14 | {0, blobStart},
15 | {1, blobStart},
16 | {2, blobStart},
17 | {127, blobStart},
18 |
19 | {0, notBlobStart},
20 | {1, notBlobStart},
21 | {2, notBlobStart},
22 | {127, notBlobStart},
23 | }
24 |
25 | for _, test := range tests {
26 | irb, err := NewInfoByte(test.version, test.isSequenceStart)
27 | if err != nil {
28 | t.Errorf("got %v want no error", err)
29 | }
30 | if got := irb.Version(); got != test.version {
31 | t.Errorf("got version %v want %v", got, test.version)
32 | }
33 | if got := irb.IsSequenceStart(); got != test.isSequenceStart {
34 | t.Errorf("got IsSequenceStart %v want %v", got, test.isSequenceStart)
35 | }
36 | }
37 | }
38 |
39 | func TestInfoByteErrors(t *testing.T) {
40 | blobStart := true
41 | notBlobStart := false
42 |
43 | type testCase struct {
44 | version uint8
45 | isSequenceStart bool
46 | }
47 |
48 | tests := []testCase{
49 | {128, notBlobStart},
50 | {255, notBlobStart},
51 | {128, blobStart},
52 | {255, blobStart},
53 | }
54 |
55 | for _, test := range tests {
56 | _, err := NewInfoByte(test.version, false)
57 | if err == nil {
58 | t.Errorf("got nil but want error when version > 127")
59 | }
60 | }
61 | }
62 |
63 | func FuzzNewInfoByte(f *testing.F) {
64 | f.Fuzz(func(t *testing.T, version uint8, isSequenceStart bool) {
65 | if version > 127 {
66 | t.Skip()
67 | }
68 | _, err := NewInfoByte(version, isSequenceStart)
69 | if err != nil {
70 | t.Errorf("got nil but want error when version > 127")
71 | }
72 | })
73 | }
74 |
75 | func TestParseInfoByte(t *testing.T) {
76 | type testCase struct {
77 | b byte
78 | wantVersion uint8
79 | wantisSequenceStart bool
80 | }
81 |
82 | tests := []testCase{
83 | {0b00000000, 0, false},
84 | {0b00000001, 0, true},
85 | {0b00000010, 1, false},
86 | {0b00000011, 1, true},
87 | {0b00000101, 2, true},
88 | {0b11111111, 127, true},
89 | }
90 |
91 | for _, test := range tests {
92 | got, err := ParseInfoByte(test.b)
93 | if err != nil {
94 | t.Errorf("got %v want no error", err)
95 | }
96 | if got.Version() != test.wantVersion {
97 | t.Errorf("got version %v want %v", got.Version(), test.wantVersion)
98 | }
99 | if got.IsSequenceStart() != test.wantisSequenceStart {
100 | t.Errorf("got IsSequenceStart %v want %v", got.IsSequenceStart(), test.wantisSequenceStart)
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/share/namespace.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "encoding/hex"
7 | "encoding/json"
8 | "errors"
9 | "fmt"
10 | "slices"
11 | )
12 |
13 | type Namespace struct {
14 | data []byte
15 | }
16 |
17 | // MarshalJSON encodes namespace to the json encoded bytes.
18 | func (n Namespace) MarshalJSON() ([]byte, error) {
19 | return json.Marshal(n.data)
20 | }
21 |
22 | // UnmarshalJSON decodes json bytes to the namespace.
23 | func (n *Namespace) UnmarshalJSON(data []byte) error {
24 | var buf []byte
25 | if err := json.Unmarshal(data, &buf); err != nil {
26 | return err
27 | }
28 |
29 | ns, err := NewNamespaceFromBytes(buf)
30 | if err != nil {
31 | return err
32 | }
33 | *n = ns
34 | return nil
35 | }
36 |
37 | // NewNamespace validates the provided version and id and returns a new namespace.
38 | // This should be used for user specified namespaces.
39 | func NewNamespace(version uint8, id []byte) (Namespace, error) {
40 | ns := newNamespace(version, id)
41 | if err := ns.validate(); err != nil {
42 | return Namespace{}, err
43 | }
44 | return ns, nil
45 | }
46 |
47 | func newNamespace(version uint8, id []byte) Namespace {
48 | data := make([]byte, NamespaceVersionSize+len(id))
49 | data[VersionIndex] = version
50 | copy(data[NamespaceVersionSize:], id)
51 | return Namespace{
52 | data: data,
53 | }
54 | }
55 |
56 | // MustNewNamespace returns a new namespace with the provided version and id. It panics
57 | // if the provided version or id are not supported.
58 | func MustNewNamespace(version uint8, id []byte) Namespace {
59 | ns, err := NewNamespace(version, id)
60 | if err != nil {
61 | panic(err)
62 | }
63 | return ns
64 | }
65 |
66 | // NewNamespaceFromBytes returns a new namespace from the provided byte slice.
67 | // This is for user specified namespaces.
68 | func NewNamespaceFromBytes(bytes []byte) (Namespace, error) {
69 | if len(bytes) != NamespaceSize {
70 | return Namespace{}, fmt.Errorf("invalid namespace length: %d. Must be %d bytes", len(bytes), NamespaceSize)
71 | }
72 |
73 | ns := Namespace{data: bytes}
74 | if err := ns.validate(); err != nil {
75 | return Namespace{}, err
76 | }
77 | return ns, nil
78 | }
79 |
80 | // NewV0Namespace returns a new namespace with version 0 and the provided subID. subID
81 | // must be <= 10 bytes. If subID is < 10 bytes, it will be left-padded with 0s
82 | // to fill 10 bytes.
83 | func NewV0Namespace(subID []byte) (Namespace, error) {
84 | if lenSubID := len(subID); lenSubID > NamespaceVersionZeroIDSize {
85 | return Namespace{}, fmt.Errorf("subID must be <= %v, but it was %v bytes", NamespaceVersionZeroIDSize, lenSubID)
86 | }
87 |
88 | namespace := make([]byte, NamespaceSize)
89 | copy(namespace[NamespaceSize-len(subID):], subID)
90 |
91 | return NewNamespaceFromBytes(namespace)
92 | }
93 |
94 | // MustNewV0Namespace returns a new namespace with version 0 and the provided subID. This
95 | // function panics if the provided subID would result in an invalid namespace.
96 | func MustNewV0Namespace(subID []byte) Namespace {
97 | ns, err := NewV0Namespace(subID)
98 | if err != nil {
99 | panic(err)
100 | }
101 | return ns
102 | }
103 |
104 | // Bytes returns this namespace as a byte slice.
105 | func (n Namespace) Bytes() []byte {
106 | return n.data
107 | }
108 |
109 | // Version return this namespace's version
110 | func (n Namespace) Version() uint8 {
111 | return n.data[VersionIndex]
112 | }
113 |
114 | // ID returns this namespace's ID
115 | func (n Namespace) ID() []byte {
116 | return n.data[NamespaceVersionSize:]
117 | }
118 |
119 | // String stringifies the Namespace.
120 | func (n Namespace) String() string {
121 | return hex.EncodeToString(n.data)
122 | }
123 |
124 | // validate returns an error if the provided version is not
125 | // supported or the provided id does not meet the requirements
126 | // for the provided version. This should be used for validating
127 | // user specified namespaces
128 | func (n Namespace) validate() error {
129 | err := n.validateVersionSupported()
130 | if err != nil {
131 | return err
132 | }
133 | return n.validateID()
134 | }
135 |
136 | // ValidateForData checks if the Namespace is of real/useful data.
137 | func (n Namespace) ValidateForData() error {
138 | if err := n.validate(); err != nil {
139 | return err
140 | }
141 | if !n.IsUsableNamespace() {
142 | return fmt.Errorf("invalid data namespace(%s): parity and tail padding namespace are forbidden", n)
143 | }
144 | return nil
145 | }
146 |
147 | // ValidateForBlob verifies whether the Namespace is appropriate for blob data.
148 | // A valid blob namespace must meet two conditions: it cannot be reserved for special purposes,
149 | // and its version must be supported by the system. If either of these conditions is not met,
150 | // an error is returned indicating the issue. This ensures that only valid namespaces are
151 | // used when dealing with blob data.
152 | func (n Namespace) ValidateForBlob() error {
153 | if err := n.ValidateForData(); err != nil {
154 | return err
155 | }
156 |
157 | if n.IsReserved() {
158 | return fmt.Errorf("invalid data namespace(%s): reserved data is forbidden", n)
159 | }
160 |
161 | if !slices.Contains(SupportedBlobNamespaceVersions, n.Version()) {
162 | return fmt.Errorf("blob version %d is not supported", n.Version())
163 | }
164 | return nil
165 | }
166 |
167 | // validateVersionSupported returns an error if the version is not supported.
168 | func (n Namespace) validateVersionSupported() error {
169 | if n.Version() != NamespaceVersionZero && n.Version() != NamespaceVersionMax {
170 | return fmt.Errorf("unsupported namespace version %v", n.Version())
171 | }
172 | return nil
173 | }
174 |
175 | // validateID returns an error if the provided id does not meet the requirements
176 | // for the provided version.
177 | func (n Namespace) validateID() error {
178 | if len(n.ID()) != NamespaceIDSize {
179 | return fmt.Errorf("unsupported namespace id length: id %v must be %v bytes but it was %v bytes", n.ID(), NamespaceIDSize, len(n.ID()))
180 | }
181 |
182 | if n.Version() == NamespaceVersionZero && !bytes.HasPrefix(n.ID(), NamespaceVersionZeroPrefix) {
183 | return fmt.Errorf("unsupported namespace id with version %v. ID %v must start with %v leading zeros", n.Version(), n.ID(), len(NamespaceVersionZeroPrefix))
184 | }
185 | return nil
186 | }
187 |
188 | // IsEmpty returns true if the namespace is empty
189 | func (n Namespace) IsEmpty() bool {
190 | return len(n.data) == 0
191 | }
192 |
193 | // IsReserved returns true if the namespace is reserved
194 | // for the Celestia state machine
195 | func (n Namespace) IsReserved() bool {
196 | return n.IsPrimaryReserved() || n.IsSecondaryReserved()
197 | }
198 |
199 | func (n Namespace) IsPrimaryReserved() bool {
200 | return n.IsLessOrEqualThan(MaxPrimaryReservedNamespace)
201 | }
202 |
203 | func (n Namespace) IsSecondaryReserved() bool {
204 | return n.IsGreaterOrEqualThan(MinSecondaryReservedNamespace)
205 | }
206 |
207 | // IsUsableNamespace refers to the range of namespaces that are
208 | // not reserved by the square protocol i.e. not parity shares or
209 | // tail padding
210 | func (n Namespace) IsUsableNamespace() bool {
211 | return !n.IsParityShares() && !n.IsTailPadding()
212 | }
213 |
214 | func (n Namespace) IsParityShares() bool {
215 | return n.Equals(ParitySharesNamespace)
216 | }
217 |
218 | func (n Namespace) IsTailPadding() bool {
219 | return n.Equals(TailPaddingNamespace)
220 | }
221 |
222 | func (n Namespace) IsPrimaryReservedPadding() bool {
223 | return n.Equals(PrimaryReservedPaddingNamespace)
224 | }
225 |
226 | func (n Namespace) IsTx() bool {
227 | return n.Equals(TxNamespace)
228 | }
229 |
230 | func (n Namespace) IsPayForBlob() bool {
231 | return n.Equals(PayForBlobNamespace)
232 | }
233 |
234 | func (n Namespace) Repeat(times int) []Namespace {
235 | ns := make([]Namespace, times)
236 | for i := 0; i < times; i++ {
237 | ns[i] = n.deepCopy()
238 | }
239 | return ns
240 | }
241 |
242 | func (n Namespace) Equals(n2 Namespace) bool {
243 | return bytes.Equal(n.data, n2.data)
244 | }
245 |
246 | func (n Namespace) IsLessThan(n2 Namespace) bool {
247 | return n.Compare(n2) == -1
248 | }
249 |
250 | func (n Namespace) IsLessOrEqualThan(n2 Namespace) bool {
251 | return n.Compare(n2) < 1
252 | }
253 |
254 | func (n Namespace) IsGreaterThan(n2 Namespace) bool {
255 | return n.Compare(n2) == 1
256 | }
257 |
258 | func (n Namespace) IsGreaterOrEqualThan(n2 Namespace) bool {
259 | return n.Compare(n2) > -1
260 | }
261 |
262 | func (n Namespace) Compare(n2 Namespace) int {
263 | return bytes.Compare(n.data, n2.data)
264 | }
265 |
266 | // AddInt adds arbitrary int value to namespace, treating namespace as big-endian
267 | // implementation of int. It could be helpful for users to create adjacent namespaces.
268 | func (n Namespace) AddInt(val int) (Namespace, error) {
269 | if val == 0 {
270 | return n, nil
271 | }
272 | // Convert the input integer to a byte slice and add it to result slice
273 | result := make([]byte, NamespaceSize)
274 | if val > 0 {
275 | binary.BigEndian.PutUint64(result[NamespaceSize-8:], uint64(val))
276 | } else {
277 | binary.BigEndian.PutUint64(result[NamespaceSize-8:], uint64(-val))
278 | }
279 |
280 | // Perform addition byte by byte
281 | var carry int
282 | nn := n.Bytes()
283 | for i := NamespaceSize - 1; i >= 0; i-- {
284 | var sum int
285 | if val > 0 {
286 | sum = int(nn[i]) + int(result[i]) + carry
287 | } else {
288 | sum = int(nn[i]) - int(result[i]) + carry
289 | }
290 |
291 | switch {
292 | case sum > 255:
293 | carry = 1
294 | sum -= 256
295 | case sum < 0:
296 | carry = -1
297 | sum += 256
298 | default:
299 | carry = 0
300 | }
301 |
302 | result[i] = uint8(sum)
303 | }
304 |
305 | // Handle any remaining carry
306 | if carry != 0 {
307 | return Namespace{}, errors.New("namespace overflow")
308 | }
309 | return Namespace{data: result}, nil
310 | }
311 |
312 | // leftPad returns a new byte slice with the provided byte slice left-padded to the provided size.
313 | // If the provided byte slice is already larger than the provided size, the original byte slice is returned.
314 | func leftPad(b []byte, size int) []byte {
315 | if len(b) >= size {
316 | return b
317 | }
318 | pad := make([]byte, size-len(b))
319 | return append(pad, b...)
320 | }
321 |
322 | // deepCopy returns a deep copy of the Namespace object.
323 | func (n Namespace) deepCopy() Namespace {
324 | // Create a deep copy of the ID slice
325 | copyData := make([]byte, len(n.data))
326 | copy(copyData, n.data)
327 |
328 | return Namespace{
329 | data: copyData,
330 | }
331 | }
332 |
--------------------------------------------------------------------------------
/share/padding.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "errors"
5 | )
6 |
7 | // NamespacePaddingShare returns a share that acts as padding. Namespace padding
8 | // shares follow a blob so that the next blob may start at an index that
9 | // conforms to blob share commitment rules. The ns and shareVersion parameters
10 | // provided should be the namespace and shareVersion of the blob that precedes
11 | // this padding in the data square.
12 | func NamespacePaddingShare(ns Namespace, shareVersion uint8) (Share, error) {
13 | b, err := newBuilder(ns, shareVersion, true)
14 | if err != nil {
15 | return Share{}, err
16 | }
17 | if err := b.WriteSequenceLen(0); err != nil {
18 | return Share{}, err
19 | }
20 | padding := make([]byte, FirstSparseShareContentSize)
21 | b.AddData(padding)
22 |
23 | share, err := b.Build()
24 | if err != nil {
25 | return Share{}, err
26 | }
27 |
28 | return *share, nil
29 | }
30 |
31 | // NamespacePaddingShares returns n namespace padding shares.
32 | func NamespacePaddingShares(ns Namespace, shareVersion uint8, n int) ([]Share, error) {
33 | var err error
34 | if n < 0 {
35 | return nil, errors.New("n must be positive")
36 | }
37 | shares := make([]Share, n)
38 | for i := 0; i < n; i++ {
39 | shares[i], err = NamespacePaddingShare(ns, shareVersion)
40 | if err != nil {
41 | return shares, err
42 | }
43 | }
44 | return shares, nil
45 | }
46 |
47 | // ReservedPaddingShare returns a share that acts as padding. Reserved padding
48 | // shares follow all significant shares in the reserved namespace so that the
49 | // first blob can start at an index that conforms to non-interactive default
50 | // rules.
51 | func ReservedPaddingShare() Share {
52 | share, err := NamespacePaddingShare(PrimaryReservedPaddingNamespace, ShareVersionZero)
53 | if err != nil {
54 | panic(err)
55 | }
56 | return share
57 | }
58 |
59 | // ReservedPaddingShares returns n reserved padding shares.
60 | func ReservedPaddingShares(n int) []Share {
61 | shares, err := NamespacePaddingShares(PrimaryReservedPaddingNamespace, ShareVersionZero, n)
62 | if err != nil {
63 | panic(err)
64 | }
65 | return shares
66 | }
67 |
68 | // TailPaddingShare is a share that is used to pad a data square to the desired
69 | // square size. Tail padding shares follow the last blob share in the data
70 | // square.
71 | func TailPaddingShare() Share {
72 | share, err := NamespacePaddingShare(TailPaddingNamespace, ShareVersionZero)
73 | if err != nil {
74 | panic(err)
75 | }
76 | return share
77 | }
78 |
79 | // TailPaddingShares returns n tail padding shares.
80 | func TailPaddingShares(n int) []Share {
81 | shares, err := NamespacePaddingShares(TailPaddingNamespace, ShareVersionZero, n)
82 | if err != nil {
83 | panic(err)
84 | }
85 | return shares
86 | }
87 |
--------------------------------------------------------------------------------
/share/padding_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | var ns1 = MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
12 |
13 | var nsOnePadding, _ = zeroPadIfNecessary(
14 | append(
15 | ns1.Bytes(),
16 | []byte{
17 | 1, // info byte
18 | 0, 0, 0, 0, // sequence len
19 | }...,
20 | ), ShareSize)
21 |
22 | var reservedPadding, _ = zeroPadIfNecessary(
23 | append(
24 | PrimaryReservedPaddingNamespace.Bytes(),
25 | []byte{
26 | 1, // info byte
27 | 0, 0, 0, 0, // sequence len
28 | }...,
29 | ), ShareSize)
30 |
31 | var tailPadding, _ = zeroPadIfNecessary(
32 | append(
33 | TailPaddingNamespace.Bytes(),
34 | []byte{
35 | 1, // info byte
36 | 0, 0, 0, 0, // sequence len
37 | }...,
38 | ), ShareSize)
39 |
40 | func TestNamespacePaddingShare(t *testing.T) {
41 | got, err := NamespacePaddingShare(ns1, ShareVersionZero)
42 | assert.NoError(t, err)
43 | assert.Equal(t, nsOnePadding, got.ToBytes())
44 | }
45 |
46 | func TestNamespacePaddingShares(t *testing.T) {
47 | shares, err := NamespacePaddingShares(ns1, ShareVersionZero, 2)
48 | assert.NoError(t, err)
49 | for _, share := range shares {
50 | assert.Equal(t, nsOnePadding, share.ToBytes())
51 | }
52 | }
53 |
54 | func TestReservedPaddingShare(t *testing.T) {
55 | require.NotPanics(t, func() {
56 | got := ReservedPaddingShare()
57 | assert.Equal(t, reservedPadding, got.ToBytes())
58 | })
59 | }
60 |
61 | func TestReservedPaddingShares(t *testing.T) {
62 | require.NotPanics(t, func() {
63 | shares := ReservedPaddingShares(2)
64 | for _, share := range shares {
65 | assert.Equal(t, reservedPadding, share.ToBytes())
66 | }
67 | })
68 | }
69 |
70 | func TestTailPaddingShare(t *testing.T) {
71 | require.NotPanics(t, func() {
72 | got := TailPaddingShare()
73 | assert.Equal(t, tailPadding, got.ToBytes())
74 | })
75 | }
76 |
77 | func TestTailPaddingShares(t *testing.T) {
78 | require.NotPanics(t, func() {
79 | shares := TailPaddingShares(2)
80 | for _, share := range shares {
81 | assert.Equal(t, tailPadding, share.ToBytes())
82 | }
83 | })
84 | }
85 |
--------------------------------------------------------------------------------
/share/parse.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | )
7 |
8 | // ParseTxs collects all of the transactions from the shares provided
9 | func ParseTxs(shares []Share) ([][]byte, error) {
10 | // parse the shares. Only share version 0 is supported for transactions
11 | rawTxs, err := parseCompactShares(shares)
12 | if err != nil {
13 | return nil, err
14 | }
15 |
16 | return rawTxs, nil
17 | }
18 |
19 | // ParseBlobs collects all blobs from the shares provided
20 | func ParseBlobs(shares []Share) ([]*Blob, error) {
21 | blobList, err := parseSparseShares(shares)
22 | if err != nil {
23 | return []*Blob{}, err
24 | }
25 |
26 | return blobList, nil
27 | }
28 |
29 | // ParseShares parses the shares provided and returns a list of Sequences.
30 | // If ignorePadding is true then the returned Sequences will not contain
31 | // any padding sequences.
32 | func ParseShares(shares []Share, ignorePadding bool) ([]Sequence, error) {
33 | sequences := []Sequence{}
34 | currentSequence := Sequence{}
35 |
36 | for _, share := range shares {
37 | ns := share.Namespace()
38 | if share.IsSequenceStart() {
39 | if len(currentSequence.Shares) > 0 {
40 | sequences = append(sequences, currentSequence)
41 | }
42 | currentSequence = Sequence{
43 | Shares: []Share{share},
44 | Namespace: ns,
45 | }
46 | } else {
47 | if !bytes.Equal(currentSequence.Namespace.Bytes(), ns.Bytes()) {
48 | return sequences, fmt.Errorf("share sequence %v has inconsistent namespace IDs with share %v", currentSequence, share)
49 | }
50 | currentSequence.Shares = append(currentSequence.Shares, share)
51 | }
52 | }
53 |
54 | if len(currentSequence.Shares) > 0 {
55 | sequences = append(sequences, currentSequence)
56 | }
57 |
58 | for _, sequence := range sequences {
59 | if err := sequence.validSequenceLen(); err != nil {
60 | return sequences, err
61 | }
62 | }
63 |
64 | result := []Sequence{}
65 | for _, sequence := range sequences {
66 | if ignorePadding && sequence.isPadding() {
67 | continue
68 | }
69 | result = append(result, sequence)
70 | }
71 |
72 | return result, nil
73 | }
74 |
--------------------------------------------------------------------------------
/share/parse_compact_shares.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import "fmt"
4 |
5 | // parseCompactShares returns data (transactions or intermediate state roots
6 | // based on the contents of rawShares and supportedShareVersions. If rawShares
7 | // contains a share with a version that isn't present in supportedShareVersions,
8 | // an error is returned. The returned data [][]byte does not have namespaces,
9 | // info bytes, data length delimiter, or unit length delimiters and are ready to
10 | // be unmarshalled.
11 | func parseCompactShares(shares []Share) (data [][]byte, err error) {
12 | if len(shares) == 0 {
13 | return nil, nil
14 | }
15 |
16 | for _, share := range shares {
17 | if share.Version() != ShareVersionZero {
18 | return nil, fmt.Errorf("unsupported share version for compact shares %v", share.Version())
19 | }
20 | }
21 |
22 | rawData, err := extractRawData(shares)
23 | if err != nil {
24 | return nil, err
25 | }
26 |
27 | data, err = parseRawData(rawData)
28 | if err != nil {
29 | return nil, err
30 | }
31 |
32 | return data, nil
33 | }
34 |
35 | // parseRawData returns the units (transactions, PFB transactions, intermediate
36 | // state roots) contained in raw data by parsing the unit length delimiter
37 | // prefixed to each unit.
38 | func parseRawData(rawData []byte) (units [][]byte, err error) {
39 | units = make([][]byte, 0)
40 | for {
41 | actualData, unitLen, err := parseDelimiter(rawData)
42 | if err != nil {
43 | return nil, err
44 | }
45 | // the rest of raw data is padding
46 | if unitLen == 0 {
47 | return units, nil
48 | }
49 | // the rest of actual data contains only part of the next transaction so
50 | // we stop parsing raw data
51 | if unitLen > uint64(len(actualData)) {
52 | return units, nil
53 | }
54 | rawData = actualData[unitLen:]
55 | units = append(units, actualData[:unitLen])
56 | }
57 | }
58 |
59 | // extractRawData returns the raw data representing complete transactions
60 | // contained in the shares. The raw data does not contain the namespace, info
61 | // byte, sequence length, or reserved bytes. Starts reading raw data based on
62 | // the reserved bytes in the first share.
63 | func extractRawData(shares []Share) (rawData []byte, err error) {
64 | for i := 0; i < len(shares); i++ {
65 | var raw []byte
66 | if i == 0 {
67 | raw, err = shares[i].RawDataUsingReserved()
68 | if err != nil {
69 | return nil, err
70 | }
71 | } else {
72 | raw = shares[i].RawData()
73 | }
74 | rawData = append(rawData, raw...)
75 | }
76 | return rawData, nil
77 | }
78 |
--------------------------------------------------------------------------------
/share/parse_sparse_shares.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | )
7 |
8 | type sequence struct {
9 | ns Namespace
10 | shareVersion uint8
11 | data []byte
12 | sequenceLen uint32
13 | signer []byte
14 | }
15 |
16 | // parseSparseShares iterates through rawShares and parses out individual
17 | // blobs. It returns an error if a rawShare contains a share version that
18 | // isn't present in supportedShareVersions.
19 | func parseSparseShares(shares []Share) (blobs []*Blob, err error) {
20 | if len(shares) == 0 {
21 | return nil, nil
22 | }
23 | sequences := make([]sequence, 0)
24 |
25 | for _, share := range shares {
26 | version := share.Version()
27 | if !bytes.Contains(SupportedShareVersions, []byte{version}) {
28 | return nil, fmt.Errorf("unsupported share version %v is not present in supported share versions %v", version, SupportedShareVersions)
29 | }
30 |
31 | if share.IsPadding() {
32 | continue
33 | }
34 |
35 | if share.IsSequenceStart() {
36 | sequences = append(sequences, sequence{
37 | ns: share.Namespace(),
38 | shareVersion: version,
39 | data: share.RawData(),
40 | sequenceLen: share.SequenceLen(),
41 | signer: GetSigner(share),
42 | })
43 | } else { // continuation share
44 | if len(sequences) == 0 {
45 | return nil, fmt.Errorf("continuation share %v without a sequence start share", share)
46 | }
47 | // FIXME: it doesn't look like we check whether all the shares belong to the same namespace.
48 | prev := &sequences[len(sequences)-1]
49 | prev.data = append(prev.data, share.RawData()...)
50 | }
51 | }
52 | for _, sequence := range sequences {
53 | // trim any padding from the end of the sequence
54 | sequence.data = sequence.data[:sequence.sequenceLen]
55 | blob, err := NewBlob(sequence.ns, sequence.data, sequence.shareVersion, sequence.signer)
56 | if err != nil {
57 | return nil, err
58 | }
59 | blobs = append(blobs, blob)
60 | }
61 |
62 | return blobs, nil
63 | }
64 |
--------------------------------------------------------------------------------
/share/parse_sparse_shares_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 | "github.com/stretchr/testify/require"
10 | )
11 |
12 | func Test_parseSparseShares(t *testing.T) {
13 | type test struct {
14 | name string
15 | blobSize int
16 | blobCount int
17 | sameNamespace bool
18 | }
19 |
20 | // each test is ran twice, once using blobSize as an exact size, and again
21 | // using it as a cap for randomly sized leaves
22 | tests := []test{
23 | {
24 | name: "single small blob",
25 | blobSize: 10,
26 | blobCount: 1,
27 | sameNamespace: true,
28 | },
29 | {
30 | name: "ten small blobs",
31 | blobSize: 10,
32 | blobCount: 10,
33 | sameNamespace: true,
34 | },
35 | {
36 | name: "single big blob",
37 | blobSize: ContinuationSparseShareContentSize * 4,
38 | blobCount: 1,
39 | sameNamespace: true,
40 | },
41 | {
42 | name: "many big blobs",
43 | blobSize: ContinuationSparseShareContentSize * 4,
44 | blobCount: 10,
45 | sameNamespace: true,
46 | },
47 | {
48 | name: "single exact size blob",
49 | blobSize: FirstSparseShareContentSize,
50 | blobCount: 1,
51 | sameNamespace: true,
52 | },
53 | {
54 | name: "blobs with different namespaces",
55 | blobSize: FirstSparseShareContentSize,
56 | blobCount: 5,
57 | sameNamespace: false,
58 | },
59 | }
60 |
61 | for _, tc := range tests {
62 | // run the tests with identically sized blobs
63 | t.Run(fmt.Sprintf("%s identically sized ", tc.name), func(t *testing.T) {
64 | sizes := make([]int, tc.blobCount)
65 | for i := range sizes {
66 | sizes[i] = tc.blobSize
67 | }
68 | blobs, err := GenerateV0Blobs(sizes, tc.sameNamespace)
69 | if err != nil {
70 | t.Error(err)
71 | }
72 |
73 | SortBlobs(blobs)
74 |
75 | shares, err := splitBlobs(blobs...)
76 | require.NoError(t, err)
77 | parsedBlobs, err := parseSparseShares(shares)
78 | if err != nil {
79 | t.Error(err)
80 | }
81 |
82 | // check that the namespaces and data are the same
83 | for i := 0; i < len(blobs); i++ {
84 | assert.Equal(t, blobs[i].Namespace(), parsedBlobs[i].Namespace(), "parsed blob namespace does not match")
85 | assert.Equal(t, blobs[i].Data(), parsedBlobs[i].Data(), "parsed blob data does not match")
86 | }
87 |
88 | if !tc.sameNamespace {
89 | // compare namespaces in case they should not be the same
90 | for i := 0; i < len(blobs); i++ {
91 | for j := i + 1; j < len(blobs); j++ {
92 | require.False(t, parsedBlobs[i].Namespace().Equals(parsedBlobs[j].Namespace()))
93 | }
94 | }
95 | }
96 | })
97 |
98 | // run the same tests using randomly sized blobs with caps of tc.blobSize
99 | t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
100 | blobs := generateRandomlySizedBlobs(tc.blobCount, tc.blobSize)
101 | shares, err := splitBlobs(blobs...)
102 | require.NoError(t, err)
103 | parsedBlobs, err := parseSparseShares(shares)
104 | if err != nil {
105 | t.Error(err)
106 | }
107 |
108 | // check that the namespaces and data are the same
109 | for i := 0; i < len(blobs); i++ {
110 | assert.Equal(t, blobs[i].Namespace(), parsedBlobs[i].Namespace())
111 | assert.Equal(t, blobs[i].Data(), parsedBlobs[i].Data())
112 | }
113 | })
114 | }
115 | }
116 |
117 | func Test_parseSparseSharesWithNamespacedPadding(t *testing.T) {
118 | sss := NewSparseShareSplitter()
119 | randomSmallBlob := generateRandomBlob(ContinuationSparseShareContentSize / 2)
120 | randomLargeBlob := generateRandomBlob(ContinuationSparseShareContentSize * 4)
121 | blobs := []*Blob{
122 | randomSmallBlob,
123 | randomLargeBlob,
124 | }
125 | SortBlobs(blobs)
126 |
127 | err := sss.Write(blobs[0])
128 | require.NoError(t, err)
129 |
130 | err = sss.WriteNamespacePaddingShares(4)
131 | require.NoError(t, err)
132 |
133 | err = sss.Write(blobs[1])
134 | require.NoError(t, err)
135 |
136 | err = sss.WriteNamespacePaddingShares(10)
137 | require.NoError(t, err)
138 |
139 | shares := sss.Export()
140 | pblobs, err := parseSparseShares(shares)
141 | require.NoError(t, err)
142 | require.Equal(t, blobs, pblobs)
143 | }
144 |
145 | func Test_parseShareVersionOne(t *testing.T) {
146 | v1blob, err := NewV1Blob(MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize)), []byte("data"), bytes.Repeat([]byte{1}, SignerSize))
147 | require.NoError(t, err)
148 | v1shares, err := splitBlobs(v1blob)
149 | require.NoError(t, err)
150 |
151 | parsedBlobs, err := parseSparseShares(v1shares)
152 | require.NoError(t, err)
153 | require.Equal(t, v1blob, parsedBlobs[0])
154 | require.Len(t, parsedBlobs, 1)
155 | }
156 |
157 | func splitBlobs(blobs ...*Blob) ([]Share, error) {
158 | writer := NewSparseShareSplitter()
159 | for _, blob := range blobs {
160 | if err := writer.Write(blob); err != nil {
161 | return nil, err
162 | }
163 | }
164 | return writer.Export(), nil
165 | }
166 |
--------------------------------------------------------------------------------
/share/parse_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "crypto/rand"
6 | "encoding/binary"
7 | "reflect"
8 | "testing"
9 |
10 | "github.com/stretchr/testify/assert"
11 | "github.com/stretchr/testify/require"
12 | )
13 |
14 | func TestParseShares(t *testing.T) {
15 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
16 | ns2 := MustNewV0Namespace(bytes.Repeat([]byte{2}, NamespaceVersionZeroIDSize))
17 |
18 | txShares, _, err := splitTxs(generateRandomTxs(2, 1000))
19 | require.NoError(t, err)
20 | txShareStart := txShares[0]
21 | txShareContinuation := txShares[1]
22 |
23 | blobOneShares, err := splitBlobs(generateRandomBlobWithNamespace(ns1, 1000))
24 | require.NoError(t, err)
25 | blobOneStart := blobOneShares[0]
26 | blobOneContinuation := blobOneShares[1]
27 |
28 | blobTwoShares, err := splitBlobs(generateRandomBlobWithNamespace(ns2, 1000))
29 | require.NoError(t, err)
30 | blobTwoStart := blobTwoShares[0]
31 | blobTwoContinuation := blobTwoShares[1]
32 |
33 | // tooLargeSequenceLen is a single share with too large of a sequence len
34 | // because it takes more than one share to store a sequence of 1000 bytes
35 | tooLargeSequenceLen := generateRawShare(t, ns1, true, uint32(1000))
36 |
37 | ns1Padding, err := NamespacePaddingShare(ns1, ShareVersionZero)
38 | require.NoError(t, err)
39 |
40 | type testCase struct {
41 | name string
42 | shares []Share
43 | ignorePadding bool
44 | want []Sequence
45 | expectErr bool
46 | }
47 |
48 | tests := []testCase{
49 | {
50 | name: "empty",
51 | shares: []Share{},
52 | ignorePadding: false,
53 | want: []Sequence{},
54 | expectErr: false,
55 | },
56 | {
57 | name: "one transaction share",
58 | shares: []Share{txShareStart},
59 | ignorePadding: false,
60 | want: []Sequence{{Namespace: TxNamespace, Shares: []Share{txShareStart}}},
61 | expectErr: false,
62 | },
63 | {
64 | name: "two transaction shares",
65 | shares: []Share{txShareStart, txShareContinuation},
66 | ignorePadding: false,
67 | want: []Sequence{{Namespace: TxNamespace, Shares: []Share{txShareStart, txShareContinuation}}},
68 | expectErr: false,
69 | },
70 | {
71 | name: "one blob share",
72 | shares: []Share{blobOneStart},
73 | ignorePadding: false,
74 | want: []Sequence{{Namespace: ns1, Shares: []Share{blobOneStart}}},
75 | expectErr: false,
76 | },
77 | {
78 | name: "two blob shares",
79 | shares: []Share{blobOneStart, blobOneContinuation},
80 | ignorePadding: false,
81 | want: []Sequence{{Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}}},
82 | expectErr: false,
83 | },
84 | {
85 | name: "two blobs with two shares each",
86 | shares: []Share{blobOneStart, blobOneContinuation, blobTwoStart, blobTwoContinuation},
87 | ignorePadding: false,
88 | want: []Sequence{
89 | {Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}},
90 | {Namespace: ns2, Shares: []Share{blobTwoStart, blobTwoContinuation}},
91 | },
92 | expectErr: false,
93 | },
94 | {
95 | name: "one transaction, one blob",
96 | shares: []Share{txShareStart, blobOneStart},
97 | ignorePadding: false,
98 | want: []Sequence{
99 | {Namespace: TxNamespace, Shares: []Share{txShareStart}},
100 | {Namespace: ns1, Shares: []Share{blobOneStart}},
101 | },
102 | expectErr: false,
103 | },
104 | {
105 | name: "one transaction, two blobs",
106 | shares: []Share{txShareStart, blobOneStart, blobTwoStart},
107 | ignorePadding: false,
108 | want: []Sequence{
109 | {Namespace: TxNamespace, Shares: []Share{txShareStart}},
110 | {Namespace: ns1, Shares: []Share{blobOneStart}},
111 | {Namespace: ns2, Shares: []Share{blobTwoStart}},
112 | },
113 | expectErr: false,
114 | },
115 | {
116 | name: "blob one start followed by blob two continuation",
117 | shares: []Share{blobOneStart, blobTwoContinuation},
118 | ignorePadding: false,
119 | want: []Sequence{},
120 | expectErr: true,
121 | },
122 | {
123 | name: "one share with too large sequence length",
124 | shares: []Share{{data: tooLargeSequenceLen}},
125 | ignorePadding: false,
126 | want: []Sequence{},
127 | expectErr: true,
128 | },
129 | {
130 | name: "tail padding shares",
131 | shares: TailPaddingShares(2),
132 | ignorePadding: false,
133 | want: []Sequence{
134 | {
135 | Namespace: TailPaddingNamespace,
136 | Shares: []Share{TailPaddingShare()},
137 | },
138 | {
139 | Namespace: TailPaddingNamespace,
140 | Shares: []Share{TailPaddingShare()},
141 | },
142 | },
143 | expectErr: false,
144 | },
145 | {
146 | name: "reserve padding shares",
147 | shares: ReservedPaddingShares(2),
148 | ignorePadding: false,
149 | want: []Sequence{
150 | {
151 | Namespace: PrimaryReservedPaddingNamespace,
152 | Shares: []Share{ReservedPaddingShare()},
153 | },
154 | {
155 | Namespace: PrimaryReservedPaddingNamespace,
156 | Shares: []Share{ReservedPaddingShare()},
157 | },
158 | },
159 | expectErr: false,
160 | },
161 | {
162 | name: "namespace padding shares",
163 | shares: []Share{ns1Padding, ns1Padding},
164 | ignorePadding: false,
165 | want: []Sequence{
166 | {
167 | Namespace: ns1,
168 | Shares: []Share{ns1Padding},
169 | },
170 | {
171 | Namespace: ns1,
172 | Shares: []Share{ns1Padding},
173 | },
174 | },
175 | expectErr: false,
176 | },
177 | {
178 | name: "ignores all types of padding shares",
179 | shares: []Share{TailPaddingShare(), ReservedPaddingShare(), ns1Padding},
180 | ignorePadding: true,
181 | want: []Sequence{},
182 | expectErr: false,
183 | },
184 | }
185 | for _, tt := range tests {
186 | t.Run(tt.name, func(t *testing.T) {
187 | got, err := ParseShares(tt.shares, tt.ignorePadding)
188 | if tt.expectErr {
189 | assert.Error(t, err)
190 | return
191 | }
192 | if !reflect.DeepEqual(got, tt.want) {
193 | t.Errorf("ParseShares() got %v, want %v", got, tt.want)
194 | }
195 | })
196 | }
197 | }
198 |
199 | func generateRawShare(t *testing.T, namespace Namespace, isSequenceStart bool, sequenceLen uint32) (rawShare []byte) {
200 | infoByte, _ := NewInfoByte(ShareVersionZero, isSequenceStart)
201 |
202 | sequenceLenBuf := make([]byte, SequenceLenBytes)
203 | binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen)
204 |
205 | rawShare = append(rawShare, namespace.Bytes()...)
206 | rawShare = append(rawShare, byte(infoByte))
207 | rawShare = append(rawShare, sequenceLenBuf...)
208 |
209 | return padWithRandomBytes(t, rawShare)
210 | }
211 |
212 | func padWithRandomBytes(t *testing.T, partialShare []byte) (paddedShare []byte) {
213 | paddedShare = make([]byte, ShareSize)
214 | copy(paddedShare, partialShare)
215 | _, err := rand.Read(paddedShare[len(partialShare):])
216 | require.NoError(t, err)
217 | return paddedShare
218 | }
219 |
220 | func generateRandomTxs(count, size int) [][]byte {
221 | txs := make([][]byte, count)
222 | for i := 0; i < count; i++ {
223 | tx := make([]byte, size)
224 | _, err := rand.Read(tx)
225 | if err != nil {
226 | panic(err)
227 | }
228 | txs[i] = tx
229 | }
230 | return txs
231 | }
232 |
--------------------------------------------------------------------------------
/share/random_blobs.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | crand "crypto/rand"
6 | "math/rand"
7 | )
8 |
9 | // GenerateV0Blobs is a test utility producing v0 share formatted blobs with the
10 | // requested size and namespaces.
11 | func GenerateV0Blobs(sizes []int, sameNamespace bool) ([]*Blob, error) {
12 | blobs := make([]*Blob, 0, len(sizes))
13 | for _, size := range sizes {
14 | size := rawTxSize(FirstSparseShareContentSize * size)
15 | blob := generateRandomBlob(size)
16 | if !sameNamespace {
17 | ns := RandomBlobNamespace()
18 | var err error
19 | blob, err = NewV0Blob(ns, blob.Data())
20 | if err != nil {
21 | return nil, err
22 | }
23 | }
24 |
25 | blobs = append(blobs, blob)
26 | }
27 | return blobs, nil
28 | }
29 |
30 | func generateRandomBlobWithNamespace(namespace Namespace, size int) *Blob {
31 | data := make([]byte, size)
32 | _, err := crand.Read(data)
33 | if err != nil {
34 | panic(err)
35 | }
36 | blob, err := NewV0Blob(namespace, data)
37 | if err != nil {
38 | panic(err)
39 | }
40 | return blob
41 | }
42 |
43 | func generateRandomBlob(dataSize int) *Blob {
44 | ns := MustNewV0Namespace(bytes.Repeat([]byte{0x1}, NamespaceVersionZeroIDSize))
45 | return generateRandomBlobWithNamespace(ns, dataSize)
46 | }
47 |
48 | func generateRandomlySizedBlobs(count, maxBlobSize int) []*Blob {
49 | blobs := make([]*Blob, count)
50 | for i := 0; i < count; i++ {
51 | blobs[i] = generateRandomBlob(rand.Intn(maxBlobSize-1) + 1)
52 | if len(blobs[i].Data()) == 0 {
53 | i--
54 | }
55 | }
56 |
57 | // this is just to let us use assert.Equal
58 | if count == 0 {
59 | blobs = nil
60 | }
61 |
62 | SortBlobs(blobs)
63 | return blobs
64 | }
65 |
--------------------------------------------------------------------------------
/share/random_namespace.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "crypto/rand"
5 | )
6 |
7 | func RandomNamespace() Namespace {
8 | for {
9 | id := RandomVerzionZeroID()
10 | namespace, err := NewNamespace(NamespaceVersionZero, id)
11 | if err != nil {
12 | continue
13 | }
14 | return namespace
15 | }
16 | }
17 |
18 | func RandomVerzionZeroID() []byte {
19 | namespace := make([]byte, NamespaceVersionZeroIDSize)
20 | _, err := rand.Read(namespace)
21 | if err != nil {
22 | panic(err)
23 | }
24 | return append(NamespaceVersionZeroPrefix, namespace...)
25 | }
26 |
27 | func RandomBlobNamespaceID() []byte {
28 | namespace := make([]byte, NamespaceVersionZeroIDSize)
29 | _, err := rand.Read(namespace)
30 | if err != nil {
31 | panic(err)
32 | }
33 | return namespace
34 | }
35 |
36 | func RandomBlobNamespace() Namespace {
37 | for {
38 | id := RandomBlobNamespaceID()
39 | namespace := MustNewV0Namespace(id)
40 | if err := namespace.ValidateForBlob(); err == nil {
41 | return namespace
42 | }
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/share/random_shares.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "crypto/rand"
6 | "fmt"
7 | "sort"
8 | )
9 |
10 | // RandShares generates total amount of shares and fills them with random data.
11 | func RandShares(total int) ([]Share, error) {
12 | if total&(total-1) != 0 {
13 | return nil, fmt.Errorf("total must be power of 2: %d", total)
14 | }
15 |
16 | shares := make([]Share, total)
17 | for i := range shares {
18 | shr := make([]byte, ShareSize)
19 | copy(shr[:NamespaceSize], RandomNamespace().Bytes())
20 | if _, err := rand.Read(shr[NamespaceSize:]); err != nil {
21 | panic(err)
22 | }
23 |
24 | sh, err := NewShare(shr)
25 | if err != nil {
26 | panic(err)
27 | }
28 | if err = sh.Namespace().ValidateForData(); err != nil {
29 | panic(err)
30 | }
31 |
32 | shares[i] = *sh
33 | }
34 | sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i].ToBytes(), shares[j].ToBytes()) < 0 })
35 | return shares, nil
36 | }
37 |
38 | // RandSharesWithNamespace is the same as RandShares, but sets the same namespace for all shares.
39 | func RandSharesWithNamespace(namespace Namespace, namespacedAmount, total int) ([]Share, error) {
40 | if total&(total-1) != 0 {
41 | return nil, fmt.Errorf("total must be power of 2: %d", total)
42 | }
43 |
44 | if namespacedAmount > total {
45 | return nil,
46 | fmt.Errorf("namespacedAmount %v must be less than or equal to total: %v", namespacedAmount, total)
47 | }
48 |
49 | shares := make([]Share, total)
50 | for i := range shares {
51 | shr := make([]byte, ShareSize)
52 | if i < namespacedAmount {
53 | copy(shr[:NamespaceSize], namespace.Bytes())
54 | } else {
55 | copy(shr[:NamespaceSize], RandomNamespace().Bytes())
56 | }
57 | _, err := rand.Read(shr[NamespaceSize:])
58 | if err != nil {
59 | panic(err)
60 | }
61 |
62 | sh, err := NewShare(shr)
63 | if err != nil {
64 | panic(err)
65 | }
66 | shares[i] = *sh
67 | }
68 | sort.Slice(shares, func(i, j int) bool { return bytes.Compare(shares[i].ToBytes(), shares[j].ToBytes()) < 0 })
69 | return shares, nil
70 | }
71 |
--------------------------------------------------------------------------------
/share/range.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | // Range is an end exclusive set of share indexes.
4 | type Range struct {
5 | // Start is the index of the first share occupied by this range.
6 | Start int
7 | // End is the next index after the last share occupied by this range.
8 | End int
9 | }
10 |
11 | func NewRange(start, end int) Range {
12 | return Range{Start: start, End: end}
13 | }
14 |
15 | func EmptyRange() Range {
16 | return Range{Start: 0, End: 0}
17 | }
18 |
19 | func (r Range) IsEmpty() bool {
20 | return r.Start == 0 && r.End == 0
21 | }
22 |
23 | func (r *Range) Add(value int) {
24 | r.Start += value
25 | r.End += value
26 | }
27 |
28 | // GetShareRangeForNamespace returns all shares that belong to a given
29 | // namespace. It will return an empty range if the namespace could not be
30 | // found. This assumes that the slice of shares are lexicographically
31 | // sorted by namespace. Ranges here are always end exclusive.
32 | func GetShareRangeForNamespace(shares []Share, ns Namespace) Range {
33 | if len(shares) == 0 {
34 | return EmptyRange()
35 | }
36 | n0 := shares[0].Namespace()
37 | if ns.IsLessThan(n0) {
38 | return EmptyRange()
39 | }
40 | n1 := shares[len(shares)-1].Namespace()
41 | if ns.IsGreaterThan(n1) {
42 | return EmptyRange()
43 | }
44 |
45 | start := -1
46 | for i, share := range shares {
47 | shareNS := share.Namespace()
48 | if shareNS.IsGreaterThan(ns) && start != -1 {
49 | return Range{start, i}
50 | }
51 | if ns.Equals(shareNS) && start == -1 {
52 | start = i
53 | }
54 | }
55 | if start == -1 {
56 | return EmptyRange()
57 | }
58 | return Range{start, len(shares)}
59 | }
60 |
--------------------------------------------------------------------------------
/share/range_test.go:
--------------------------------------------------------------------------------
1 | package share_test
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | "github.com/stretchr/testify/require"
8 |
9 | "github.com/celestiaorg/go-square/v2/internal/test"
10 | "github.com/celestiaorg/go-square/v2/share"
11 | )
12 |
13 | func TestGetShareRangeForNamespace(t *testing.T) {
14 | blobs := test.GenerateBlobs(100, 200, 300, 400)
15 | share.SortBlobs(blobs)
16 | writer := share.NewSparseShareSplitter()
17 | for _, blob := range blobs {
18 | err := writer.Write(blob)
19 | require.NoError(t, err)
20 | }
21 | shares := writer.Export()
22 | firstNamespace := shares[0].Namespace()
23 | lastNamespace := shares[len(shares)-1].Namespace()
24 | ns := share.RandomBlobNamespace()
25 |
26 | testCases := []struct {
27 | name string
28 | shares []share.Share
29 | namespace share.Namespace
30 | expectedRange share.Range
31 | }{
32 | {
33 | name: "Empty shares",
34 | shares: []share.Share{},
35 | namespace: ns,
36 | expectedRange: share.EmptyRange(),
37 | },
38 | {
39 | name: "Namespace not found",
40 | shares: shares,
41 | namespace: ns,
42 | expectedRange: share.EmptyRange(),
43 | },
44 | {
45 | name: "Namespace found",
46 | shares: shares,
47 | namespace: firstNamespace,
48 | expectedRange: share.NewRange(0, 1),
49 | },
50 | {
51 | name: "Namespace at end",
52 | shares: shares,
53 | namespace: lastNamespace,
54 | expectedRange: share.NewRange(3, 4),
55 | },
56 | }
57 |
58 | for _, tc := range testCases {
59 | t.Run(tc.name, func(t *testing.T) {
60 | result := share.GetShareRangeForNamespace(tc.shares, tc.namespace)
61 | assert.Equal(t, tc.expectedRange, result)
62 | })
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/share/reserved_bytes.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "encoding/binary"
5 | "fmt"
6 | )
7 |
8 | // NewReservedBytes returns a byte slice of length
9 | // ShareReservedBytes that contains the byteIndex of the first
10 | // unit that starts in a compact share.
11 | func NewReservedBytes(byteIndex uint32) ([]byte, error) {
12 | if byteIndex >= ShareSize {
13 | return []byte{}, fmt.Errorf("byte index %d must be less than share size %d", byteIndex, ShareSize)
14 | }
15 | reservedBytes := make([]byte, ShareReservedBytes)
16 | binary.BigEndian.PutUint32(reservedBytes, byteIndex)
17 | return reservedBytes, nil
18 | }
19 |
20 | // ParseReservedBytes parses a byte slice of length
21 | // ShareReservedBytes into a byteIndex.
22 | func ParseReservedBytes(reservedBytes []byte) (uint32, error) {
23 | if len(reservedBytes) != ShareReservedBytes {
24 | return 0, fmt.Errorf("reserved bytes must be of length %d", ShareReservedBytes)
25 | }
26 | byteIndex := binary.BigEndian.Uint32(reservedBytes)
27 | if ShareSize <= byteIndex {
28 | return 0, fmt.Errorf("byteIndex must be less than share size %d", ShareSize)
29 | }
30 | return byteIndex, nil
31 | }
32 |
--------------------------------------------------------------------------------
/share/reserved_bytes_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestParseReservedBytes(t *testing.T) {
10 | type testCase struct {
11 | name string
12 | input []byte
13 | want uint32
14 | expectErr bool
15 | }
16 | testCases := []testCase{
17 | {"byte index of 0", []byte{0, 0, 0, 0}, 0, false},
18 | {"byte index of 2", []byte{0, 0, 0, 2}, 2, false},
19 | {"byte index of 4", []byte{0, 0, 0, 4}, 4, false},
20 | {"byte index of 8", []byte{0, 0, 0, 8}, 8, false},
21 | {"byte index of 16", []byte{0, 0, 0, 16}, 16, false},
22 | {"byte index of 32", []byte{0, 0, 0, 32}, 32, false},
23 | {"byte index of 64", []byte{0, 0, 0, 64}, 64, false},
24 | {"byte index of 128", []byte{0, 0, 0, 128}, 128, false},
25 | {"byte index of 256", []byte{0, 0, 1, 0}, 256, false},
26 | {"byte index of 511", []byte{0, 0, 1, 255}, 511, false},
27 |
28 | // error cases
29 | {"empty", []byte{}, 0, true},
30 | {"too few reserved bytes", []byte{1}, 0, true},
31 | {"another case of too few reserved bytes", []byte{3, 3, 3}, 0, true},
32 | {"too many bytes", []byte{0, 0, 0, 0, 0}, 0, true},
33 | {"too high of a byte index", []byte{0, 0, 3, 232}, 0, true},
34 | }
35 |
36 | for _, tc := range testCases {
37 | t.Run(tc.name, func(t *testing.T) {
38 | got, err := ParseReservedBytes(tc.input)
39 | if tc.expectErr {
40 | assert.Error(t, err)
41 | return
42 | }
43 | assert.NoError(t, err)
44 | assert.Equal(t, tc.want, got)
45 | })
46 | }
47 | }
48 |
49 | func TestNewReservedBytes(t *testing.T) {
50 | type testCase struct {
51 | name string
52 | input uint32
53 | want []byte
54 | expectErr bool
55 | }
56 | testCases := []testCase{
57 | {"byte index of 0", 0, []byte{0, 0, 0, 0}, false},
58 | {"byte index of 2", 2, []byte{0, 0, 0, 2}, false},
59 | {"byte index of 4", 4, []byte{0, 0, 0, 4}, false},
60 | {"byte index of 8", 8, []byte{0, 0, 0, 8}, false},
61 | {"byte index of 16", 16, []byte{0, 0, 0, 16}, false},
62 | {"byte index of 32", 32, []byte{0, 0, 0, 32}, false},
63 | {"byte index of 64", 64, []byte{0, 0, 0, 64}, false},
64 | {"byte index of 128", 128, []byte{0, 0, 0, 128}, false},
65 | {"byte index of 256", 256, []byte{0, 0, 1, 0}, false},
66 | {"byte index of 511", 511, []byte{0, 0, 1, 255}, false},
67 |
68 | // error cases
69 | {"byte index of 512 is equal to share size", 512, []byte{}, true},
70 | {"byte index of 1000 is greater than share size", 1000, []byte{}, true},
71 | }
72 |
73 | for _, tc := range testCases {
74 | t.Run(tc.name, func(t *testing.T) {
75 | got, err := NewReservedBytes(tc.input)
76 | if tc.expectErr {
77 | assert.Error(t, err)
78 | return
79 | }
80 | assert.NoError(t, err)
81 | assert.Equal(t, tc.want, got)
82 | })
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/share/share.go:
--------------------------------------------------------------------------------
1 | /*
2 | Package share is an encoding and decoding protocol that takes blobs,
3 | a struct containing arbitrary data based on a namespace and coverts
4 | them into a slice of shares, bytes 512 in length. This logic is used
5 | for constructing the original data square.
6 | */
7 | package share
8 |
9 | import (
10 | "bytes"
11 | "encoding/binary"
12 | "encoding/json"
13 | "fmt"
14 | )
15 |
16 | // Share contains the raw share data (including namespace ID).
17 | type Share struct {
18 | data []byte
19 | }
20 |
21 | // MarshalJSON encodes share to the json encoded bytes.
22 | func (s Share) MarshalJSON() ([]byte, error) {
23 | return json.Marshal(s.data)
24 | }
25 |
26 | // UnmarshalJSON decodes json bytes to the share.
27 | func (s *Share) UnmarshalJSON(data []byte) error {
28 | var buf []byte
29 |
30 | if err := json.Unmarshal(data, &buf); err != nil {
31 | return err
32 | }
33 | s.data = buf
34 | return validateSize(s.data)
35 | }
36 |
37 | // NewShare creates a new share from the raw data, validating it's
38 | // size and versioning
39 | func NewShare(data []byte) (*Share, error) {
40 | if err := validateSize(data); err != nil {
41 | return nil, err
42 | }
43 | return &Share{data}, nil
44 | }
45 |
46 | func validateSize(data []byte) error {
47 | if len(data) != ShareSize {
48 | return fmt.Errorf("share data must be %d bytes, got %d", ShareSize, len(data))
49 | }
50 | return nil
51 | }
52 |
53 | // Namespace returns the shares namespace
54 | func (s *Share) Namespace() Namespace {
55 | return Namespace{data: s.data[:NamespaceSize]}
56 | }
57 |
58 | // InfoByte returns the byte after the namespace used
59 | // for indicating versioning and whether the share is
60 | // the first in it's sequence or a continuation
61 | func (s *Share) InfoByte() InfoByte {
62 | return InfoByte(s.data[NamespaceSize])
63 | }
64 |
65 | // Version returns the version of the share
66 | func (s *Share) Version() uint8 {
67 | return s.InfoByte().Version()
68 | }
69 |
70 | // CheckVersionSupported checks if the share version is supported
71 | func (s *Share) CheckVersionSupported() error {
72 | ver := s.Version()
73 | if !bytes.Contains(SupportedShareVersions, []byte{ver}) {
74 | return fmt.Errorf("unsupported share version %v is not present in the list of supported share versions %v", ver, SupportedShareVersions)
75 | }
76 | return nil
77 | }
78 |
79 | // IsSequenceStart returns true if this is the first share in a sequence.
80 | func (s *Share) IsSequenceStart() bool {
81 | infoByte := s.InfoByte()
82 | return infoByte.IsSequenceStart()
83 | }
84 |
85 | // IsCompactShare returns true if this is a compact share.
86 | func (s Share) IsCompactShare() bool {
87 | ns := s.Namespace()
88 | isCompact := ns.IsTx() || ns.IsPayForBlob()
89 | return isCompact
90 | }
91 |
92 | // GetSigner returns the signer of the share, if the
93 | // share is not of type v1 and is not the first share in a sequence
94 | // it returns nil
95 | func GetSigner(share Share) []byte {
96 | infoByte := share.InfoByte()
97 | if infoByte.Version() != ShareVersionOne {
98 | return nil
99 | }
100 | if !infoByte.IsSequenceStart() {
101 | return nil
102 | }
103 | startIndex := NamespaceSize + ShareInfoBytes + SequenceLenBytes
104 | endIndex := startIndex + SignerSize
105 | return share.data[startIndex:endIndex]
106 | }
107 |
108 | // SequenceLen returns the sequence length of this share.
109 | // It returns 0 if this is a continuation share because then it doesn't contain a sequence length.
110 | func (s *Share) SequenceLen() uint32 {
111 | if !s.IsSequenceStart() {
112 | return 0
113 | }
114 |
115 | start := NamespaceSize + ShareInfoBytes
116 | end := start + SequenceLenBytes
117 | return binary.BigEndian.Uint32(s.data[start:end])
118 | }
119 |
120 | // IsPadding returns whether this *share is padding or not.
121 | func (s *Share) IsPadding() bool {
122 | isNamespacePadding := s.isNamespacePadding()
123 | isTailPadding := s.isTailPadding()
124 | isPrimaryReservedPadding := s.isPrimaryReservedPadding()
125 | return isNamespacePadding || isTailPadding || isPrimaryReservedPadding
126 | }
127 |
128 | func (s *Share) isNamespacePadding() bool {
129 | return s.IsSequenceStart() && s.SequenceLen() == 0
130 | }
131 |
132 | func (s *Share) isTailPadding() bool {
133 | ns := s.Namespace()
134 | return ns.IsTailPadding()
135 | }
136 |
137 | func (s *Share) isPrimaryReservedPadding() bool {
138 | ns := s.Namespace()
139 | return ns.IsPrimaryReservedPadding()
140 | }
141 |
142 | // ToBytes returns the underlying bytes of the share
143 | func (s *Share) ToBytes() []byte {
144 | return s.data
145 | }
146 |
147 | // RawData returns the raw share data. The raw share data does not contain the
148 | // namespace ID, info byte, sequence length and if they exist: the reserved bytes
149 | // and signer.
150 | func (s *Share) RawData() []byte {
151 | startingIndex := s.rawDataStartIndex()
152 | return s.data[startingIndex:]
153 | }
154 |
155 | func (s *Share) rawDataStartIndex() int {
156 | isStart := s.IsSequenceStart()
157 | isCompact := s.IsCompactShare()
158 | index := NamespaceSize + ShareInfoBytes
159 | if isStart {
160 | index += SequenceLenBytes
161 | }
162 | if isCompact {
163 | index += ShareReservedBytes
164 | }
165 | if s.Version() == ShareVersionOne {
166 | index += SignerSize
167 | }
168 | return index
169 | }
170 |
171 | // RawDataUsingReserved returns the raw share data while taking reserved bytes into account.
172 | func (s *Share) RawDataUsingReserved() (rawData []byte, err error) {
173 | rawDataStartIndexUsingReserved, err := s.rawDataStartIndexUsingReserved()
174 | if err != nil {
175 | return nil, err
176 | }
177 |
178 | // This means share is the last share and does not have any transaction beginning in it
179 | if rawDataStartIndexUsingReserved == 0 {
180 | return []byte{}, nil
181 | }
182 | if len(s.data) < rawDataStartIndexUsingReserved {
183 | return rawData, fmt.Errorf("share %s is too short to contain raw data", s)
184 | }
185 |
186 | return s.data[rawDataStartIndexUsingReserved:], nil
187 | }
188 |
189 | // rawDataStartIndexUsingReserved returns the start index of raw data while accounting for
190 | // reserved bytes, if it exists in the share.
191 | func (s *Share) rawDataStartIndexUsingReserved() (int, error) {
192 | isStart := s.IsSequenceStart()
193 | isCompact := s.IsCompactShare()
194 |
195 | index := NamespaceSize + ShareInfoBytes
196 | if isStart {
197 | index += SequenceLenBytes
198 | }
199 | if s.Version() == ShareVersionOne {
200 | index += SignerSize
201 | }
202 |
203 | if isCompact {
204 | reservedBytes, err := ParseReservedBytes(s.data[index : index+ShareReservedBytes])
205 | if err != nil {
206 | return 0, err
207 | }
208 | return int(reservedBytes), nil
209 | }
210 | return index, nil
211 | }
212 |
213 | func ToBytes(shares []Share) (bytes [][]byte) {
214 | bytes = make([][]byte, len(shares))
215 | for i, share := range shares {
216 | bytes[i] = share.data
217 | }
218 | return bytes
219 | }
220 |
221 | func FromBytes(bytes [][]byte) (shares []Share, err error) {
222 | for _, b := range bytes {
223 | share, err := NewShare(b)
224 | if err != nil {
225 | return nil, err
226 | }
227 | shares = append(shares, *share)
228 | }
229 | return shares, nil
230 | }
231 |
--------------------------------------------------------------------------------
/share/share_benchmark_test.go:
--------------------------------------------------------------------------------
1 | package share_test
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/celestiaorg/go-square/v2/internal/test"
8 | "github.com/celestiaorg/go-square/v2/share"
9 | )
10 |
11 | func BenchmarkBlobsToShares(b *testing.B) {
12 | sizes := []int{256, 256 * 8, 256 * 64}
13 | numBlobs := []int{1, 8, 64}
14 | for _, size := range sizes {
15 | for _, numBlobs := range numBlobs {
16 | b.Run(fmt.Sprintf("ShareEncoding%dBlobs%dBytes", numBlobs, size), func(b *testing.B) {
17 | b.ReportAllocs()
18 | blobs := test.GenerateBlobs(test.Repeat(size, numBlobs)...)
19 | b.ResetTimer()
20 | for i := 0; i < b.N; i++ {
21 | // Convert blob to shares
22 | writer := share.NewSparseShareSplitter()
23 | for _, blob := range blobs {
24 | if err := writer.Write(blob); err != nil {
25 | b.Fatal("Failed to write blob into shares:", err)
26 | }
27 | }
28 | _ = writer.Export()
29 | }
30 | })
31 | }
32 | }
33 | }
34 |
35 | func BenchmarkSharesToBlobs(b *testing.B) {
36 | sizes := []int{256, 256 * 8, 256 * 64}
37 | numBlobs := []int{1, 8, 64}
38 | for _, size := range sizes {
39 | for _, numBlobs := range numBlobs {
40 | b.Run(fmt.Sprintf("ShareDecoding%dBlobs%dBytes", numBlobs, size), func(b *testing.B) {
41 | b.ReportAllocs()
42 | blobs := test.GenerateBlobs(test.Repeat(size, numBlobs)...)
43 | writer := share.NewSparseShareSplitter()
44 | for _, blob := range blobs {
45 | if err := writer.Write(blob); err != nil {
46 | b.Fatal("Failed to write blob into shares:", err)
47 | }
48 | }
49 | s := writer.Export()
50 |
51 | b.ResetTimer()
52 | for i := 0; i < b.N; i++ {
53 | // Convert shares back to blob
54 | _, err := share.ParseBlobs(s)
55 | if err != nil {
56 | b.Fatal("Failed to reconstruct blob from shares:", err)
57 | }
58 | }
59 | })
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/share/share_builder.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "encoding/binary"
5 | "errors"
6 | )
7 |
8 | type builder struct {
9 | namespace Namespace
10 | shareVersion uint8
11 | isFirstShare bool
12 | isCompactShare bool
13 | rawShareData []byte
14 | }
15 |
16 | func newEmptyBuilder() *builder {
17 | return &builder{
18 | rawShareData: make([]byte, 0, ShareSize),
19 | }
20 | }
21 |
22 | // newBuilder returns a new share builder.
23 | func newBuilder(ns Namespace, shareVersion uint8, isFirstShare bool) (*builder, error) {
24 | b := builder{
25 | namespace: ns,
26 | shareVersion: shareVersion,
27 | isFirstShare: isFirstShare,
28 | isCompactShare: isCompactShare(ns),
29 | }
30 | if err := b.init(); err != nil {
31 | return nil, err
32 | }
33 | return &b, nil
34 | }
35 |
36 | // init initializes the share builder by populating rawShareData.
37 | func (b *builder) init() error {
38 | if b.isCompactShare {
39 | return b.prepareCompactShare()
40 | }
41 | return b.prepareSparseShare()
42 | }
43 |
44 | func (b *builder) AvailableBytes() int {
45 | return ShareSize - len(b.rawShareData)
46 | }
47 |
48 | func (b *builder) ImportRawShare(rawBytes []byte) *builder {
49 | b.rawShareData = rawBytes
50 | return b
51 | }
52 |
53 | func (b *builder) AddData(rawData []byte) (rawDataLeftOver []byte) {
54 | // find the len left in the pending share
55 | pendingLeft := ShareSize - len(b.rawShareData)
56 |
57 | // if we can simply add the tx to the share without creating a new
58 | // pending share, do so and return
59 | if len(rawData) <= pendingLeft {
60 | b.rawShareData = append(b.rawShareData, rawData...)
61 | return nil
62 | }
63 |
64 | // if we can only add a portion of the rawData to the pending share,
65 | // then we add it and add the pending share to the finalized shares.
66 | chunk := rawData[:pendingLeft]
67 | b.rawShareData = append(b.rawShareData, chunk...)
68 |
69 | // We need to finish this share and start a new one
70 | // so we return the leftover to be written into a new share
71 | return rawData[pendingLeft:]
72 | }
73 |
74 | func (b *builder) Build() (*Share, error) {
75 | return NewShare(b.rawShareData)
76 | }
77 |
78 | // IsEmptyShare returns true if no data has been written to the share
79 | func (b *builder) IsEmptyShare() bool {
80 | expectedLen := NamespaceSize + ShareInfoBytes
81 | if b.isCompactShare {
82 | expectedLen += ShareReservedBytes
83 | }
84 | if b.isFirstShare {
85 | expectedLen += SequenceLenBytes
86 | }
87 | return len(b.rawShareData) == expectedLen
88 | }
89 |
90 | func (b *builder) ZeroPadIfNecessary() (bytesOfPadding int) {
91 | b.rawShareData, bytesOfPadding = zeroPadIfNecessary(b.rawShareData, ShareSize)
92 | return bytesOfPadding
93 | }
94 |
95 | // isEmptyReservedBytes returns true if the reserved bytes are empty.
96 | func (b *builder) isEmptyReservedBytes() (bool, error) {
97 | indexOfReservedBytes := b.indexOfReservedBytes()
98 | reservedBytes, err := ParseReservedBytes(b.rawShareData[indexOfReservedBytes : indexOfReservedBytes+ShareReservedBytes])
99 | if err != nil {
100 | return false, err
101 | }
102 | return reservedBytes == 0, nil
103 | }
104 |
105 | // indexOfReservedBytes returns the index of the reserved bytes in the share.
106 | func (b *builder) indexOfReservedBytes() int {
107 | if b.isFirstShare {
108 | // if the share is the first share, the reserved bytes follow the namespace, info byte, and sequence length
109 | return NamespaceSize + ShareInfoBytes + SequenceLenBytes
110 | }
111 | // if the share is not the first share, the reserved bytes follow the namespace and info byte
112 | return NamespaceSize + ShareInfoBytes
113 | }
114 |
115 | // indexOfInfoBytes returns the index of the InfoBytes.
116 | func (b *builder) indexOfInfoBytes() int {
117 | // the info byte is immediately after the namespace
118 | return NamespaceSize
119 | }
120 |
121 | // MaybeWriteReservedBytes will be a no-op if the reserved bytes
122 | // have already been populated. If the reserved bytes are empty, it will write
123 | // the location of the next unit of data to the reserved bytes.
124 | func (b *builder) MaybeWriteReservedBytes() error {
125 | if !b.isCompactShare {
126 | return errors.New("this is not a compact share")
127 | }
128 |
129 | empty, err := b.isEmptyReservedBytes()
130 | if err != nil {
131 | return err
132 | }
133 | if !empty {
134 | return nil
135 | }
136 |
137 | byteIndexOfNextUnit := len(b.rawShareData)
138 | reservedBytes, err := NewReservedBytes(uint32(byteIndexOfNextUnit))
139 | if err != nil {
140 | return err
141 | }
142 |
143 | indexOfReservedBytes := b.indexOfReservedBytes()
144 | // overwrite the reserved bytes of the pending share
145 | for i := 0; i < ShareReservedBytes; i++ {
146 | b.rawShareData[indexOfReservedBytes+i] = reservedBytes[i]
147 | }
148 | return nil
149 | }
150 |
151 | // WriteSequenceLen writes the sequence length to the first share.
152 | func (b *builder) WriteSequenceLen(sequenceLen uint32) error {
153 | if b == nil {
154 | return errors.New("the builder object is not initialized (is nil)")
155 | }
156 | if !b.isFirstShare {
157 | return errors.New("not the first share")
158 | }
159 | sequenceLenBuf := make([]byte, SequenceLenBytes)
160 | binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen)
161 |
162 | for i := 0; i < SequenceLenBytes; i++ {
163 | b.rawShareData[NamespaceSize+ShareInfoBytes+i] = sequenceLenBuf[i]
164 | }
165 |
166 | return nil
167 | }
168 |
169 | // WriteSigner writes the signer's information to the share.
170 | func (b *builder) WriteSigner(signer []byte) {
171 | // only write the signer if it is the first share and the share version is 1
172 | if b == nil || !b.isFirstShare || b.shareVersion != ShareVersionOne {
173 | return
174 | }
175 | // NOTE: we don't check whether previous data has already been expected
176 | // like the sequence length (we just assume it has)
177 | b.rawShareData = append(b.rawShareData, signer...)
178 | }
179 |
180 | // FlipSequenceStart flips the sequence start indicator of the share provided
181 | func (b *builder) FlipSequenceStart() {
182 | infoByteIndex := b.indexOfInfoBytes()
183 |
184 | // the sequence start indicator is the last bit of the info byte so flip the
185 | // last bit
186 | b.rawShareData[infoByteIndex] ^= 0x01
187 | }
188 |
189 | func (b *builder) prepareCompactShare() error {
190 | shareData := make([]byte, 0, ShareSize)
191 | infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare)
192 | if err != nil {
193 | return err
194 | }
195 | placeholderSequenceLen := make([]byte, SequenceLenBytes)
196 | placeholderReservedBytes := make([]byte, ShareReservedBytes)
197 |
198 | shareData = append(shareData, b.namespace.Bytes()...)
199 | shareData = append(shareData, byte(infoByte))
200 |
201 | if b.isFirstShare {
202 | shareData = append(shareData, placeholderSequenceLen...)
203 | }
204 |
205 | shareData = append(shareData, placeholderReservedBytes...)
206 |
207 | b.rawShareData = shareData
208 |
209 | return nil
210 | }
211 |
212 | func (b *builder) prepareSparseShare() error {
213 | shareData := make([]byte, 0, ShareSize)
214 | infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare)
215 | if err != nil {
216 | return err
217 | }
218 | placeholderSequenceLen := make([]byte, SequenceLenBytes)
219 |
220 | shareData = append(shareData, b.namespace.Bytes()...)
221 | shareData = append(shareData, byte(infoByte))
222 |
223 | if b.isFirstShare {
224 | shareData = append(shareData, placeholderSequenceLen...)
225 | }
226 |
227 | b.rawShareData = shareData
228 | return nil
229 | }
230 |
231 | func isCompactShare(ns Namespace) bool {
232 | return ns.IsTx() || ns.IsPayForBlob()
233 | }
234 |
--------------------------------------------------------------------------------
/share/share_builder_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestShareBuilderIsEmptyShare(t *testing.T) {
12 | type testCase struct {
13 | name string
14 | builder *builder
15 | data []byte // input data
16 | want bool
17 | }
18 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
19 |
20 | testCases := []testCase{
21 | {
22 | name: "first compact share empty",
23 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, true),
24 | data: nil,
25 | want: true,
26 | },
27 | {
28 | name: "first compact share not empty",
29 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, true),
30 | data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
31 | want: false,
32 | },
33 | {
34 | name: "first sparse share empty",
35 | builder: mustNewBuilder(t, ns1, ShareVersionZero, true),
36 | data: nil,
37 | want: true,
38 | },
39 | {
40 | name: "first sparse share not empty",
41 | builder: mustNewBuilder(t, ns1, ShareVersionZero, true),
42 | data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
43 | want: false,
44 | },
45 | {
46 | name: "continues compact share empty",
47 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, false),
48 | data: nil,
49 | want: true,
50 | },
51 | {
52 | name: "continues compact share not empty",
53 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, false),
54 | data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
55 | want: false,
56 | },
57 | {
58 | name: "continues sparse share not empty",
59 | builder: mustNewBuilder(t, ns1, ShareVersionZero, false),
60 | data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
61 | want: false,
62 | },
63 | {
64 | name: "continues sparse share empty",
65 | builder: mustNewBuilder(t, ns1, ShareVersionZero, false),
66 | data: nil,
67 | want: true,
68 | },
69 | }
70 |
71 | for _, tc := range testCases {
72 | t.Run(tc.name, func(t *testing.T) {
73 | tc.builder.AddData(tc.data)
74 | assert.Equal(t, tc.want, tc.builder.IsEmptyShare())
75 | })
76 | }
77 | }
78 |
79 | func TestShareBuilderWriteSequenceLen(t *testing.T) {
80 | type testCase struct {
81 | name string
82 | builder *builder
83 | wantLen uint32
84 | wantErr bool
85 | }
86 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
87 |
88 | testCases := []testCase{
89 | {
90 | name: "first share",
91 | builder: mustNewBuilder(t, ns1, 1, true),
92 | wantLen: 10,
93 | wantErr: false,
94 | },
95 | {
96 | name: "first share with long sequence",
97 | builder: mustNewBuilder(t, ns1, 1, true),
98 | wantLen: 323,
99 | wantErr: false,
100 | },
101 | {
102 | name: "continuation sparse share",
103 | builder: mustNewBuilder(t, ns1, 1, false),
104 | wantLen: 10,
105 | wantErr: true,
106 | },
107 | {
108 | name: "compact share",
109 | builder: mustNewBuilder(t, TxNamespace, 1, true),
110 | wantLen: 10,
111 | wantErr: false,
112 | },
113 | {
114 | name: "continuation compact share",
115 | builder: mustNewBuilder(t, ns1, 1, false),
116 | wantLen: 10,
117 | wantErr: true,
118 | },
119 | {
120 | name: "nil builder",
121 | builder: &builder{},
122 | wantLen: 10,
123 | wantErr: true,
124 | },
125 | }
126 |
127 | for _, tc := range testCases {
128 | t.Run(tc.name, func(t *testing.T) {
129 | if err := tc.builder.WriteSequenceLen(tc.wantLen); tc.wantErr {
130 | assert.Error(t, err)
131 | return
132 | }
133 |
134 | tc.builder.ZeroPadIfNecessary()
135 | share, err := tc.builder.Build()
136 | require.NoError(t, err)
137 |
138 | length := share.SequenceLen()
139 | assert.Equal(t, tc.wantLen, length)
140 | })
141 | }
142 | }
143 |
144 | func TestShareBuilderAddData(t *testing.T) {
145 | type testCase struct {
146 | name string
147 | builder *builder
148 | data []byte // input data
149 | want []byte
150 | }
151 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
152 |
153 | testCases := []testCase{
154 | {
155 | name: "small share",
156 | builder: mustNewBuilder(t, ns1, ShareVersionZero, true),
157 | data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
158 | want: nil,
159 | },
160 | {
161 | name: "exact fit first compact share",
162 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, true),
163 | data: bytes.Repeat([]byte{1}, ShareSize-NamespaceSize-ShareInfoBytes-ShareReservedBytes-SequenceLenBytes),
164 | want: nil,
165 | },
166 | {
167 | name: "exact fit first sparse share",
168 | builder: mustNewBuilder(t, ns1, ShareVersionZero, true),
169 | data: bytes.Repeat([]byte{1}, ShareSize-NamespaceSize-SequenceLenBytes-1 /*1 = info byte*/),
170 | want: nil,
171 | },
172 | {
173 | name: "exact fit continues compact share",
174 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, false),
175 | data: bytes.Repeat([]byte{1}, ShareSize-NamespaceSize-ShareReservedBytes-1 /*1 = info byte*/),
176 | want: nil,
177 | },
178 | {
179 | name: "exact fit continues sparse share",
180 | builder: mustNewBuilder(t, ns1, ShareVersionZero, false),
181 | data: bytes.Repeat([]byte{1}, ShareSize-NamespaceSize-1 /*1 = info byte*/),
182 | want: nil,
183 | },
184 | {
185 | name: "oversize first compact share",
186 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, true),
187 | data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +ShareSize-NamespaceSize-ShareReservedBytes-SequenceLenBytes-1 /*1 = info byte*/),
188 | want: []byte{1},
189 | },
190 | {
191 | name: "oversize first sparse share",
192 | builder: mustNewBuilder(t, ns1, ShareVersionZero, true),
193 | data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +ShareSize-NamespaceSize-SequenceLenBytes-1 /*1 = info byte*/),
194 | want: []byte{1},
195 | },
196 | {
197 | name: "oversize continues compact share",
198 | builder: mustNewBuilder(t, TxNamespace, ShareVersionZero, false),
199 | data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +ShareSize-NamespaceSize-ShareReservedBytes-1 /*1 = info byte*/),
200 | want: []byte{1},
201 | },
202 | {
203 | name: "oversize continues sparse share",
204 | builder: mustNewBuilder(t, ns1, ShareVersionZero, false),
205 | data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +ShareSize-NamespaceSize-1 /*1 = info byte*/),
206 | want: []byte{1},
207 | },
208 | }
209 |
210 | for _, tc := range testCases {
211 | t.Run(tc.name, func(t *testing.T) {
212 | got := tc.builder.AddData(tc.data)
213 | assert.Equal(t, tc.want, got)
214 | })
215 | }
216 | }
217 |
218 | func TestShareBuilderImportRawData(t *testing.T) {
219 | type testCase struct {
220 | name string
221 | shareBytes []byte
222 | want []byte
223 | wantErr bool
224 | }
225 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
226 |
227 | firstSparseShare := append(ns1.Bytes(), []byte{
228 | 1, // info byte
229 | 0, 0, 0, 10, // sequence len
230 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
231 | }...)
232 |
233 | continuationSparseShare := append(ns1.Bytes(), []byte{
234 | 0, // info byte
235 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
236 | }...)
237 |
238 | firstCompactShare := append(TxNamespace.Bytes(), []byte{
239 | 1, // info byte
240 | 0, 0, 0, 10, // sequence len
241 | 0, 0, 0, 15, // reserved bytes
242 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
243 | }...)
244 |
245 | continuationCompactShare := append(TxNamespace.Bytes(), []byte{
246 | 0, // info byte
247 | 0, 0, 0, 0, // reserved bytes
248 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
249 | }...)
250 |
251 | oversizedImport := append(
252 | append(
253 | ns1.Bytes(),
254 | []byte{
255 | 0, // info byte
256 | 0, 0, 0, 0, // reserved bytes
257 | }...), bytes.Repeat([]byte{1}, 513)...) // data
258 |
259 | testCases := []testCase{
260 | {
261 | name: "first sparse share",
262 | shareBytes: firstSparseShare,
263 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
264 | },
265 | {
266 | name: "continuation sparse share",
267 | shareBytes: continuationSparseShare,
268 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
269 | },
270 | {
271 | name: "first compact share",
272 | shareBytes: firstCompactShare,
273 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
274 | },
275 | {
276 | name: "continuation compact share",
277 | shareBytes: continuationCompactShare,
278 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
279 | },
280 | {
281 | name: "oversized import",
282 | shareBytes: oversizedImport,
283 | wantErr: true,
284 | },
285 | }
286 |
287 | for _, tc := range testCases {
288 | t.Run(tc.name, func(t *testing.T) {
289 | b := newEmptyBuilder().ImportRawShare(tc.shareBytes)
290 | b.ZeroPadIfNecessary()
291 | builtShare, err := b.Build()
292 | if tc.wantErr {
293 | assert.Error(t, err)
294 | return
295 | }
296 |
297 | rawData := builtShare.RawData()
298 | // Since rawData has padding, we need to use contains
299 | if !bytes.Contains(rawData, tc.want) {
300 | t.Errorf("%#v does not contain %#v", rawData, tc.want)
301 | }
302 | })
303 | }
304 | }
305 |
306 | // mustNewBuilder returns a new builder with the given parameters. It fails the test if an error is encountered.
307 | func mustNewBuilder(t *testing.T, ns Namespace, shareVersion uint8, isFirstShare bool) *builder {
308 | b, err := newBuilder(ns, shareVersion, isFirstShare)
309 | require.NoError(t, err)
310 | return b
311 | }
312 |
--------------------------------------------------------------------------------
/share/share_sequence.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // Sequence represents a contiguous sequence of shares that are part of the
8 | // same namespace and blob. For compact shares, one share sequence exists per
9 | // reserved namespace. For sparse shares, one share sequence exists per blob.
10 | type Sequence struct {
11 | Namespace Namespace
12 | Shares []Share
13 | }
14 |
15 | // RawData returns the raw share data of this share sequence. The raw data does
16 | // not contain the namespace ID, info byte, sequence length, or reserved bytes.
17 | func (s Sequence) RawData() (data []byte, err error) {
18 | for _, share := range s.Shares {
19 | data = append(data, share.RawData()...)
20 | }
21 |
22 | sequenceLen, err := s.SequenceLen()
23 | if err != nil {
24 | return []byte{}, err
25 | }
26 | // trim any padding that may have been added to the last share
27 | return data[:sequenceLen], nil
28 | }
29 |
30 | func (s Sequence) SequenceLen() (uint32, error) {
31 | if len(s.Shares) == 0 {
32 | return 0, fmt.Errorf("invalid sequence length because share sequence %v has no shares", s)
33 | }
34 | firstShare := s.Shares[0]
35 | return firstShare.SequenceLen(), nil
36 | }
37 |
38 | // validSequenceLen extracts the sequenceLen written to the first share
39 | // and returns an error if the number of shares needed to store a sequence of
40 | // length sequenceLen doesn't match the number of shares in this share
41 | // sequence. Returns nil if there is no error.
42 | func (s Sequence) validSequenceLen() error {
43 | if len(s.Shares) == 0 {
44 | return fmt.Errorf("invalid sequence length because share sequence %v has no shares", s)
45 | }
46 | if s.isPadding() {
47 | return nil
48 | }
49 |
50 | firstShare := s.Shares[0]
51 | sharesNeeded, err := numberOfSharesNeeded(firstShare)
52 | if err != nil {
53 | return err
54 | }
55 |
56 | if len(s.Shares) != sharesNeeded {
57 | return fmt.Errorf("share sequence has %d shares but needed %d shares", len(s.Shares), sharesNeeded)
58 | }
59 | return nil
60 | }
61 |
62 | func (s Sequence) isPadding() bool {
63 | if len(s.Shares) != 1 {
64 | return false
65 | }
66 | return s.Shares[0].IsPadding()
67 | }
68 |
69 | // numberOfSharesNeeded extracts the sequenceLen written to the share
70 | // firstShare and returns the number of shares needed to store a sequence of
71 | // that length.
72 | func numberOfSharesNeeded(firstShare Share) (sharesUsed int, err error) {
73 | sequenceLen := firstShare.SequenceLen()
74 | if firstShare.IsCompactShare() {
75 | return CompactSharesNeeded(sequenceLen), nil
76 | }
77 | return SparseSharesNeeded(sequenceLen), nil
78 | }
79 |
80 | // CompactSharesNeeded returns the number of compact shares needed to store a
81 | // sequence of length sequenceLen. The parameter sequenceLen is the number
82 | // of bytes of transactions or intermediate state roots in a sequence.
83 | func CompactSharesNeeded(sequenceLen uint32) (sharesNeeded int) {
84 | if sequenceLen == 0 {
85 | return 0
86 | }
87 |
88 | if sequenceLen < FirstCompactShareContentSize {
89 | return 1
90 | }
91 |
92 | // Calculate remaining bytes after first share
93 | remainingBytes := sequenceLen - FirstCompactShareContentSize
94 |
95 | // Calculate number of continuation shares needed
96 | continuationShares := remainingBytes / ContinuationCompactShareContentSize
97 | overflow := remainingBytes % ContinuationCompactShareContentSize
98 | if overflow > 0 {
99 | continuationShares++
100 | }
101 |
102 | // 1 first share + continuation shares
103 | return 1 + int(continuationShares)
104 | }
105 |
106 | // SparseSharesNeeded returns the number of shares needed to store a sequence of
107 | // length sequenceLen.
108 | func SparseSharesNeeded(sequenceLen uint32) (sharesNeeded int) {
109 | if sequenceLen == 0 {
110 | return 0
111 | }
112 |
113 | if sequenceLen < FirstSparseShareContentSize {
114 | return 1
115 | }
116 |
117 | // Calculate remaining bytes after first share
118 | remainingBytes := sequenceLen - FirstSparseShareContentSize
119 |
120 | // Calculate number of continuation shares needed
121 | continuationShares := remainingBytes / ContinuationSparseShareContentSize
122 | overflow := remainingBytes % ContinuationSparseShareContentSize
123 | if overflow > 0 {
124 | continuationShares++
125 | }
126 |
127 | // 1 first share + continuation shares
128 | return 1 + int(continuationShares)
129 | }
130 |
--------------------------------------------------------------------------------
/share/share_sequence_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "math"
7 | "testing"
8 |
9 | "github.com/stretchr/testify/assert"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestSequenceRawData(t *testing.T) {
14 | type testCase struct {
15 | name string
16 | Sequence Sequence
17 | want []byte
18 | wantErr bool
19 | }
20 | blobNamespace := RandomBlobNamespace()
21 |
22 | testCases := []testCase{
23 | {
24 | name: "empty share sequence",
25 | Sequence: Sequence{
26 | Namespace: TxNamespace,
27 | Shares: []Share{},
28 | },
29 | want: []byte{},
30 | wantErr: false,
31 | },
32 | {
33 | name: "one empty share",
34 | Sequence: Sequence{
35 | Namespace: TxNamespace,
36 | Shares: []Share{
37 | shareWithData(blobNamespace, true, 0, []byte{}),
38 | },
39 | },
40 | want: []byte{},
41 | wantErr: false,
42 | },
43 | {
44 | name: "one share with one byte",
45 | Sequence: Sequence{
46 | Namespace: TxNamespace,
47 | Shares: []Share{
48 | shareWithData(blobNamespace, true, 1, []byte{0x0f}),
49 | },
50 | },
51 | want: []byte{0xf},
52 | wantErr: false,
53 | },
54 | {
55 | name: "removes padding from last share",
56 | Sequence: Sequence{
57 | Namespace: TxNamespace,
58 | Shares: []Share{
59 | shareWithData(blobNamespace, true, FirstSparseShareContentSize+1, bytes.Repeat([]byte{0xf}, FirstSparseShareContentSize)),
60 | shareWithData(blobNamespace, false, 0, []byte{0x0f}),
61 | },
62 | },
63 | want: bytes.Repeat([]byte{0xf}, FirstSparseShareContentSize+1),
64 | wantErr: false,
65 | },
66 | }
67 |
68 | for _, tc := range testCases {
69 | t.Run(tc.name, func(t *testing.T) {
70 | got, err := tc.Sequence.RawData()
71 | if tc.wantErr {
72 | assert.Error(t, err)
73 | return
74 | }
75 | assert.Equal(t, tc.want, got)
76 | })
77 | }
78 | }
79 |
80 | func TestCompactSharesNeeded(t *testing.T) {
81 | type testCase struct {
82 | sequenceLen uint32
83 | want int
84 | }
85 | testCases := []testCase{
86 | {0, 0},
87 | {1, 1},
88 | {2, 1},
89 | {FirstCompactShareContentSize, 1},
90 | {FirstCompactShareContentSize + 1, 2},
91 | {FirstCompactShareContentSize + ContinuationCompactShareContentSize, 2},
92 | {FirstCompactShareContentSize + ContinuationCompactShareContentSize*100, 101},
93 | {1000, 3},
94 | {10000, 21},
95 | {100000, 210},
96 | {math.MaxUint32 - ShareSize, 8985287},
97 | {math.MaxUint32, 8985288},
98 | }
99 | for _, tc := range testCases {
100 | got := CompactSharesNeeded(tc.sequenceLen)
101 | assert.Equal(t, tc.want, got)
102 | }
103 | }
104 |
105 | func TestSparseSharesNeeded(t *testing.T) {
106 | type testCase struct {
107 | sequenceLen uint32
108 | want int
109 | }
110 | testCases := []testCase{
111 | {0, 0},
112 | {1, 1},
113 | {2, 1},
114 | {FirstSparseShareContentSize, 1},
115 | {FirstSparseShareContentSize + 1, 2},
116 | {FirstSparseShareContentSize + ContinuationSparseShareContentSize, 2},
117 | {FirstSparseShareContentSize + ContinuationCompactShareContentSize*2, 3},
118 | {FirstSparseShareContentSize + ContinuationCompactShareContentSize*99, 100},
119 | {1000, 3},
120 | {10000, 21},
121 | {100000, 208},
122 | {math.MaxUint32 - ShareSize, 8910720},
123 | {math.MaxUint32, 8910721},
124 | }
125 | for _, tc := range testCases {
126 | got := SparseSharesNeeded(tc.sequenceLen)
127 | assert.Equal(t, tc.want, got)
128 | }
129 | }
130 |
131 | func shareWithData(namespace Namespace, isSequenceStart bool, sequenceLen uint32, data []byte) (rawShare Share) {
132 | infoByte, _ := NewInfoByte(ShareVersionZero, isSequenceStart)
133 | rawShareBytes := make([]byte, 0, ShareSize)
134 | rawShareBytes = append(rawShareBytes, namespace.Bytes()...)
135 | rawShareBytes = append(rawShareBytes, byte(infoByte))
136 | if isSequenceStart {
137 | sequenceLenBuf := make([]byte, SequenceLenBytes)
138 | binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen)
139 | rawShareBytes = append(rawShareBytes, sequenceLenBuf...)
140 | }
141 | rawShareBytes = append(rawShareBytes, data...)
142 |
143 | return padShare(Share{data: rawShareBytes})
144 | }
145 |
146 | func Test_validSequenceLen(t *testing.T) {
147 | type testCase struct {
148 | name string
149 | Sequence Sequence
150 | wantErr bool
151 | }
152 |
153 | tailPadding := Sequence{
154 | Namespace: TailPaddingNamespace,
155 | Shares: []Share{TailPaddingShare()},
156 | }
157 |
158 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{0x1}, NamespaceVersionZeroIDSize))
159 | share, err := NamespacePaddingShare(ns1, ShareVersionZero)
160 | require.NoError(t, err)
161 | namespacePadding := Sequence{
162 | Namespace: ns1,
163 | Shares: []Share{share},
164 | }
165 |
166 | reservedPadding := Sequence{
167 | Namespace: PrimaryReservedPaddingNamespace,
168 | Shares: []Share{ReservedPaddingShare()},
169 | }
170 |
171 | notSequenceStart := Sequence{
172 | Namespace: ns1,
173 | Shares: []Share{
174 | shareWithData(ns1, false, 0, []byte{0x0f}),
175 | },
176 | }
177 |
178 | testCases := []testCase{
179 | {
180 | name: "empty share sequence",
181 | Sequence: Sequence{},
182 | wantErr: true,
183 | },
184 | {
185 | name: "valid share sequence",
186 | Sequence: generateValidSequence(t),
187 | wantErr: false,
188 | },
189 | {
190 | name: "tail padding",
191 | Sequence: tailPadding,
192 | wantErr: false,
193 | },
194 | {
195 | name: "namespace padding",
196 | Sequence: namespacePadding,
197 | wantErr: false,
198 | },
199 | {
200 | name: "reserved padding",
201 | Sequence: reservedPadding,
202 | wantErr: false,
203 | },
204 | {
205 | name: "sequence length where first share is not sequence start",
206 | Sequence: notSequenceStart,
207 | wantErr: true, // error: "share sequence has 1 shares but needed 0 shares"
208 | },
209 | }
210 |
211 | for _, tc := range testCases {
212 | t.Run(tc.name, func(t *testing.T) {
213 | err := tc.Sequence.validSequenceLen()
214 | if tc.wantErr {
215 | assert.Error(t, err)
216 | return
217 | }
218 | assert.NoError(t, err)
219 | })
220 | }
221 | }
222 |
223 | func generateValidSequence(t *testing.T) Sequence {
224 | css := NewCompactShareSplitter(TxNamespace, ShareVersionZero)
225 | txs := generateRandomTxs(5, 200)
226 | for _, tx := range txs {
227 | err := css.WriteTx(tx)
228 | require.NoError(t, err)
229 | }
230 | shares, err := css.Export()
231 | require.NoError(t, err)
232 |
233 | return Sequence{
234 | Namespace: TxNamespace,
235 | Shares: shares,
236 | }
237 | }
238 |
239 | func FuzzValidSequenceLen(f *testing.F) {
240 | f.Fuzz(func(t *testing.T, rawData []byte, rawNamespace []byte) {
241 | share, err := NewShare(rawData)
242 | if err != nil {
243 | t.Skip()
244 | }
245 |
246 | ns, err := NewNamespaceFromBytes(rawNamespace)
247 | if err != nil {
248 | t.Skip()
249 | }
250 |
251 | Sequence := Sequence{
252 | Namespace: ns,
253 | Shares: []Share{*share},
254 | }
255 |
256 | // want := fmt.Errorf("share sequence has 1 shares but needed 0 shares")
257 | err = Sequence.validSequenceLen()
258 | assert.NoError(t, err)
259 | })
260 | }
261 |
262 | // padShare returns a share padded with trailing zeros.
263 | func padShare(share Share) (paddedShare Share) {
264 | return fillShare(share, 0)
265 | }
266 |
--------------------------------------------------------------------------------
/share/share_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestSequenceLen(t *testing.T) {
12 | type testCase struct {
13 | name string
14 | share Share
15 | wantLen uint32
16 | }
17 | firstShare := append(bytes.Repeat([]byte{1}, NamespaceSize),
18 | []byte{
19 | 1, // info byte
20 | 0, 0, 0, 10, // sequence len
21 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
22 | }...)
23 | firstShareWithLongSequence := append(bytes.Repeat([]byte{1}, NamespaceSize),
24 | []byte{
25 | 1, // info byte
26 | 0, 0, 1, 67, // sequence len
27 | }...)
28 | continuationShare := append(bytes.Repeat([]byte{1}, NamespaceSize),
29 | []byte{
30 | 0, // info byte
31 | }...)
32 | compactShare := append(TxNamespace.Bytes(),
33 | []byte{
34 | 1, // info byte
35 | 0, 0, 0, 10, // sequence len
36 | }...)
37 | testCases := []testCase{
38 | {
39 | name: "first share",
40 | share: Share{data: firstShare},
41 | wantLen: 10,
42 | },
43 | {
44 | name: "first share with long sequence",
45 | share: Share{data: firstShareWithLongSequence},
46 | wantLen: 323,
47 | },
48 | {
49 | name: "continuation share",
50 | share: Share{data: continuationShare},
51 | wantLen: 0,
52 | },
53 | {
54 | name: "compact share",
55 | share: Share{data: compactShare},
56 | wantLen: 10,
57 | },
58 | }
59 |
60 | for _, tc := range testCases {
61 | t.Run(tc.name, func(t *testing.T) {
62 | length := tc.share.SequenceLen()
63 | if tc.wantLen != length {
64 | t.Errorf("want %d, got %d", tc.wantLen, length)
65 | }
66 | })
67 | }
68 | }
69 |
70 | func TestRawData(t *testing.T) {
71 | type testCase struct {
72 | name string
73 | share Share
74 | want []byte
75 | }
76 | sparseNamespaceID := MustNewV0Namespace(bytes.Repeat([]byte{0x1}, NamespaceVersionZeroIDSize))
77 | firstSparseShare := append(
78 | sparseNamespaceID.Bytes(),
79 | []byte{
80 | 1, // info byte
81 | 0, 0, 0, 10, // sequence len
82 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
83 | }...)
84 | continuationSparseShare := append(
85 | sparseNamespaceID.Bytes(),
86 | []byte{
87 | 0, // info byte
88 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
89 | }...)
90 | firstCompactShare := append(TxNamespace.Bytes(),
91 | []byte{
92 | 1, // info byte
93 | 0, 0, 0, 10, // sequence len
94 | 0, 0, 0, 15, // reserved bytes
95 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
96 | }...)
97 | continuationCompactShare := append(TxNamespace.Bytes(),
98 | []byte{
99 | 0, // info byte
100 | 0, 0, 0, 0, // reserved bytes
101 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data
102 | }...)
103 | testCases := []testCase{
104 | {
105 | name: "first sparse share",
106 | share: Share{data: firstSparseShare},
107 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
108 | },
109 | {
110 | name: "continuation sparse share",
111 | share: Share{data: continuationSparseShare},
112 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
113 | },
114 | {
115 | name: "first compact share",
116 | share: Share{data: firstCompactShare},
117 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
118 | },
119 | {
120 | name: "continuation compact share",
121 | share: Share{data: continuationCompactShare},
122 | want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
123 | },
124 | }
125 |
126 | for _, tc := range testCases {
127 | t.Run(tc.name, func(t *testing.T) {
128 | assert.Equal(t, tc.want, tc.share.RawData())
129 | })
130 | }
131 | }
132 |
133 | func TestIsCompactShare(t *testing.T) {
134 | type testCase struct {
135 | name string
136 | share Share
137 | want bool
138 | }
139 |
140 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
141 | txShare, _ := zeroPadIfNecessary(TxNamespace.Bytes(), ShareSize)
142 | pfbTxShare, _ := zeroPadIfNecessary(PayForBlobNamespace.Bytes(), ShareSize)
143 | blobShare, _ := zeroPadIfNecessary(ns1.Bytes(), ShareSize)
144 |
145 | testCases := []testCase{
146 | {
147 | name: "tx share",
148 | share: Share{data: txShare},
149 | want: true,
150 | },
151 | {
152 | name: "pfb tx share",
153 | share: Share{data: pfbTxShare},
154 | want: true,
155 | },
156 | {
157 | name: "blob share",
158 | share: Share{data: blobShare},
159 | want: false,
160 | },
161 | }
162 |
163 | for _, tc := range testCases {
164 | assert.Equal(t, tc.want, tc.share.IsCompactShare())
165 | }
166 | }
167 |
168 | func TestIsPadding(t *testing.T) {
169 | type testCase struct {
170 | name string
171 | share Share
172 | want bool
173 | }
174 | blobShare, _ := zeroPadIfNecessary(
175 | append(
176 | ns1.Bytes(),
177 | []byte{
178 | 1, // info byte
179 | 0, 0, 0, 1, // sequence len
180 | 0xff, // data
181 | }...,
182 | ),
183 | ShareSize)
184 |
185 | nsPadding, err := NamespacePaddingShare(ns1, ShareVersionZero)
186 | require.NoError(t, err)
187 |
188 | testCases := []testCase{
189 | {
190 | name: "blob share",
191 | share: Share{data: blobShare},
192 | want: false,
193 | },
194 | {
195 | name: "namespace padding",
196 | share: nsPadding,
197 | want: true,
198 | },
199 | {
200 | name: "tail padding",
201 | share: TailPaddingShare(),
202 | want: true,
203 | },
204 | {
205 | name: "reserved padding",
206 | share: ReservedPaddingShare(),
207 | want: true,
208 | },
209 | }
210 |
211 | for _, tc := range testCases {
212 | t.Run(tc.name, func(t *testing.T) {
213 | assert.Equal(t, tc.want, tc.share.IsPadding())
214 | })
215 | }
216 | }
217 |
218 | func TestUnsupportedShareVersion(t *testing.T) {
219 | unsupportedShareVersion := 5
220 | infoByte, _ := NewInfoByte(uint8(unsupportedShareVersion), true)
221 |
222 | rawShare := RandomNamespace().Bytes()
223 | rawShare = append(rawShare, byte(infoByte))
224 | rawShare = append(rawShare, bytes.Repeat([]byte{0}, ShareSize-len(rawShare))...)
225 | share, err := NewShare(rawShare)
226 | require.NoError(t, err)
227 | require.Error(t, share.CheckVersionSupported())
228 | }
229 |
230 | func TestShareToBytesAndFromBytes(t *testing.T) {
231 | blobs, err := GenerateV0Blobs([]int{580, 380, 1100}, true)
232 | require.NoError(t, err)
233 |
234 | SortBlobs(blobs)
235 | shares, err := splitBlobs(blobs...)
236 | require.NoError(t, err)
237 |
238 | shareBytes := ToBytes(shares)
239 | reconstructedShares, err := FromBytes(shareBytes)
240 | require.NoError(t, err)
241 | assert.Equal(t, shares, reconstructedShares)
242 | }
243 |
244 | func TestMarshalShare(t *testing.T) {
245 | sh, err := RandShares(1)
246 | require.NoError(t, err)
247 | b, err := sh[0].MarshalJSON()
248 | require.NoError(t, err)
249 |
250 | newShare := Share{}
251 | err = newShare.UnmarshalJSON(b)
252 | require.NoError(t, err)
253 |
254 | require.Equal(t, sh[0], newShare)
255 | }
256 |
--------------------------------------------------------------------------------
/share/sparse_shares_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestSparseShareContainsInfoByte(t *testing.T) {
11 | blob := generateRandomBlobOfShareCount(4)
12 |
13 | sequenceStartInfoByte, err := NewInfoByte(ShareVersionZero, true)
14 | require.NoError(t, err)
15 |
16 | sequenceContinuationInfoByte, err := NewInfoByte(ShareVersionZero, false)
17 | require.NoError(t, err)
18 |
19 | type testCase struct {
20 | name string
21 | shareIndex int
22 | expected InfoByte
23 | }
24 | testCases := []testCase{
25 | {
26 | name: "first share of blob",
27 | shareIndex: 0,
28 | expected: sequenceStartInfoByte,
29 | },
30 | {
31 | name: "second share of blob",
32 | shareIndex: 1,
33 | expected: sequenceContinuationInfoByte,
34 | },
35 | }
36 |
37 | for _, tc := range testCases {
38 | t.Run(tc.name, func(t *testing.T) {
39 | sss := NewSparseShareSplitter()
40 | err := sss.Write(blob)
41 | assert.NoError(t, err)
42 | shares := sss.Export()
43 | got := shares[tc.shareIndex].InfoByte()
44 | assert.Equal(t, tc.expected, got)
45 | })
46 | }
47 | }
48 |
49 | func TestSparseShareSplitterCount(t *testing.T) {
50 | type testCase struct {
51 | name string
52 | blob *Blob
53 | expected int
54 | }
55 | testCases := []testCase{
56 | {
57 | name: "one share",
58 | blob: generateRandomBlobOfShareCount(1),
59 | expected: 1,
60 | },
61 | {
62 | name: "two shares",
63 | blob: generateRandomBlobOfShareCount(2),
64 | expected: 2,
65 | },
66 | {
67 | name: "ten shares",
68 | blob: generateRandomBlobOfShareCount(10),
69 | expected: 10,
70 | },
71 | }
72 |
73 | for _, tc := range testCases {
74 | t.Run(tc.name, func(t *testing.T) {
75 | sss := NewSparseShareSplitter()
76 | err := sss.Write(tc.blob)
77 | assert.NoError(t, err)
78 | got := sss.Count()
79 | assert.Equal(t, tc.expected, got)
80 | })
81 | }
82 | }
83 |
84 | // generateRandomBlobOfShareCount returns a blob that spans the given
85 | // number of shares
86 | func generateRandomBlobOfShareCount(count int) *Blob {
87 | size := rawTxSize(FirstSparseShareContentSize * count)
88 | return generateRandomBlob(size)
89 | }
90 |
--------------------------------------------------------------------------------
/share/split_compact_shares.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "crypto/sha256"
5 | "encoding/binary"
6 | "fmt"
7 | )
8 |
9 | // CompactShareSplitter will write raw data compactly across a progressively
10 | // increasing set of shares. It is used to lazily split block data such as
11 | // transactions or intermediate state roots into shares.
12 | type CompactShareSplitter struct {
13 | shares []Share
14 | // pendingShare Share
15 | shareBuilder *builder
16 | namespace Namespace
17 | done bool
18 | shareVersion uint8
19 | // shareRanges is a map from a transaction key to the range of shares it
20 | // occupies. The range assumes this compact share splitter is the only
21 | // thing in the data square (e.g. the range for the first tx starts at index
22 | // 0).
23 | shareRanges map[[sha256.Size]byte]Range
24 | }
25 |
26 | // NewCompactShareSplitter returns a CompactShareSplitter using the provided
27 | // namespace and shareVersion.
28 | func NewCompactShareSplitter(ns Namespace, shareVersion uint8) *CompactShareSplitter {
29 | sb, err := newBuilder(ns, shareVersion, true)
30 | if err != nil {
31 | panic(err)
32 | }
33 |
34 | return &CompactShareSplitter{
35 | shares: []Share{},
36 | namespace: ns,
37 | shareVersion: shareVersion,
38 | shareRanges: map[[sha256.Size]byte]Range{},
39 | shareBuilder: sb,
40 | }
41 | }
42 |
43 | // WriteTx adds the delimited data for the provided tx to the underlying compact
44 | // share splitter.
45 | func (css *CompactShareSplitter) WriteTx(tx []byte) error {
46 | rawData, err := MarshalDelimitedTx(tx)
47 | if err != nil {
48 | return fmt.Errorf("included Tx in mem-pool that can not be encoded %v", tx)
49 | }
50 |
51 | startShare := len(css.shares)
52 |
53 | if err := css.write(rawData); err != nil {
54 | return err
55 | }
56 | endShare := css.Count()
57 | css.shareRanges[sha256.Sum256(tx)] = NewRange(startShare, endShare)
58 |
59 | return nil
60 | }
61 |
62 | // write adds the delimited data to the underlying compact shares.
63 | func (css *CompactShareSplitter) write(rawData []byte) error {
64 | if css.done {
65 | // remove the last element
66 | if !css.shareBuilder.IsEmptyShare() {
67 | css.shares = css.shares[:len(css.shares)-1]
68 | }
69 | css.done = false
70 | }
71 |
72 | if err := css.shareBuilder.MaybeWriteReservedBytes(); err != nil {
73 | return err
74 | }
75 |
76 | for {
77 | rawDataLeftOver := css.shareBuilder.AddData(rawData)
78 | if rawDataLeftOver == nil {
79 | break
80 | }
81 | if err := css.stackPending(); err != nil {
82 | return err
83 | }
84 |
85 | rawData = rawDataLeftOver
86 | }
87 |
88 | if css.shareBuilder.AvailableBytes() == 0 {
89 | if err := css.stackPending(); err != nil {
90 | return err
91 | }
92 | }
93 | return nil
94 | }
95 |
96 | // stackPending will build & add the pending share to accumulated shares
97 | func (css *CompactShareSplitter) stackPending() error {
98 | pendingShare, err := css.shareBuilder.Build()
99 | if err != nil {
100 | return err
101 | }
102 | css.shares = append(css.shares, *pendingShare)
103 |
104 | // Now we need to create a new builder
105 | css.shareBuilder, err = newBuilder(css.namespace, css.shareVersion, false)
106 | return err
107 | }
108 |
109 | // Export returns the underlying compact shares
110 | func (css *CompactShareSplitter) Export() ([]Share, error) {
111 | if css.isEmpty() {
112 | return []Share{}, nil
113 | }
114 |
115 | // in case Export is called multiple times
116 | if css.done {
117 | return css.shares, nil
118 | }
119 |
120 | var bytesOfPadding int
121 | // add the pending share to the current shares before returning
122 | if !css.shareBuilder.IsEmptyShare() {
123 | bytesOfPadding = css.shareBuilder.ZeroPadIfNecessary()
124 | if err := css.stackPending(); err != nil {
125 | return []Share{}, err
126 | }
127 | }
128 |
129 | sequenceLen := css.sequenceLen(bytesOfPadding)
130 | if err := css.writeSequenceLen(sequenceLen); err != nil {
131 | return []Share{}, err
132 | }
133 | css.done = true
134 | return css.shares, nil
135 | }
136 |
137 | // ShareRanges returns a map of share ranges to the corresponding tx keys. All
138 | // share ranges in the map of shareRanges will be offset (i.e. incremented) by
139 | // the shareRangeOffset provided. shareRangeOffset should be 0 for the first
140 | // compact share sequence in the data square (transactions) but should be some
141 | // non-zero number for subsequent compact share sequences (e.g. pfb txs).
142 | func (css *CompactShareSplitter) ShareRanges(shareRangeOffset int) map[[sha256.Size]byte]Range {
143 | // apply the shareRangeOffset to all share ranges
144 | shareRanges := make(map[[sha256.Size]byte]Range, len(css.shareRanges))
145 |
146 | for k, v := range css.shareRanges {
147 | shareRanges[k] = Range{
148 | Start: v.Start + shareRangeOffset,
149 | End: v.End + shareRangeOffset,
150 | }
151 | }
152 |
153 | return shareRanges
154 | }
155 |
156 | // writeSequenceLen writes the sequence length to the first share.
157 | func (css *CompactShareSplitter) writeSequenceLen(sequenceLen uint32) error {
158 | if css.isEmpty() {
159 | return nil
160 | }
161 |
162 | // We may find a more efficient way to write seqLen
163 | b, err := newBuilder(css.namespace, css.shareVersion, true)
164 | if err != nil {
165 | return err
166 | }
167 | b.ImportRawShare(css.shares[0].ToBytes())
168 | if err := b.WriteSequenceLen(sequenceLen); err != nil {
169 | return err
170 | }
171 |
172 | firstShare, err := b.Build()
173 | if err != nil {
174 | return err
175 | }
176 |
177 | // replace existing first share with new first share
178 | css.shares[0] = *firstShare
179 |
180 | return nil
181 | }
182 |
183 | // sequenceLen returns the total length in bytes of all units (transactions or
184 | // intermediate state roots) written to this splitter. sequenceLen does not
185 | // include the number of bytes occupied by the namespace ID, the share info
186 | // byte, or the reserved bytes. sequenceLen does include the unit length
187 | // delimiter prefixed to each unit.
188 | func (css *CompactShareSplitter) sequenceLen(bytesOfPadding int) uint32 {
189 | if len(css.shares) == 0 {
190 | return 0
191 | }
192 | if len(css.shares) == 1 {
193 | return uint32(FirstCompactShareContentSize) - uint32(bytesOfPadding)
194 | }
195 |
196 | continuationSharesCount := len(css.shares) - 1
197 | continuationSharesSequenceLen := continuationSharesCount * ContinuationCompactShareContentSize
198 | return uint32(FirstCompactShareContentSize + continuationSharesSequenceLen - bytesOfPadding)
199 | }
200 |
201 | // isEmpty returns whether this compact share splitter is empty.
202 | func (css *CompactShareSplitter) isEmpty() bool {
203 | return len(css.shares) == 0 && css.shareBuilder.IsEmptyShare()
204 | }
205 |
206 | // Count returns the number of shares that would be made if `Export` was invoked
207 | // on this compact share splitter.
208 | func (css *CompactShareSplitter) Count() int {
209 | if !css.shareBuilder.IsEmptyShare() && !css.done {
210 | // pending share is non-empty, so it will be zero padded and added to shares during export
211 | return len(css.shares) + 1
212 | }
213 | return len(css.shares)
214 | }
215 |
216 | // MarshalDelimitedTx prefixes a transaction with the length of the transaction
217 | // encoded as a varint.
218 | func MarshalDelimitedTx(tx []byte) ([]byte, error) {
219 | lenBuf := make([]byte, binary.MaxVarintLen64)
220 | length := uint64(len(tx))
221 | n := binary.PutUvarint(lenBuf, length)
222 | return append(lenBuf[:n], tx...), nil
223 | }
224 |
--------------------------------------------------------------------------------
/share/split_sparse_shares.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "golang.org/x/exp/slices"
8 | )
9 |
10 | // SparseShareSplitter lazily splits blobs into shares that will eventually be
11 | // included in a data square. It also has methods to help progressively count
12 | // how many shares the blobs written take up.
13 | type SparseShareSplitter struct {
14 | shares []Share
15 | }
16 |
17 | func NewSparseShareSplitter() *SparseShareSplitter {
18 | return &SparseShareSplitter{}
19 | }
20 |
21 | // Write writes the provided blob to this sparse share splitter. It returns an
22 | // error or nil if no error is encountered.
23 | func (sss *SparseShareSplitter) Write(blob *Blob) error {
24 | if !slices.Contains(SupportedShareVersions, blob.ShareVersion()) {
25 | return fmt.Errorf("unsupported share version: %d", blob.ShareVersion())
26 | }
27 |
28 | rawData := blob.Data()
29 | blobNamespace := blob.Namespace()
30 |
31 | b, err := newBuilder(blobNamespace, blob.ShareVersion(), true)
32 | if err != nil {
33 | return err
34 | }
35 | if err := b.WriteSequenceLen(uint32(len(rawData))); err != nil {
36 | return err
37 | }
38 | // add the signer to the first share for v1 share versions only
39 | if blob.ShareVersion() == ShareVersionOne {
40 | b.WriteSigner(blob.Signer())
41 | }
42 |
43 | for rawData != nil {
44 | rawDataLeftOver := b.AddData(rawData)
45 | if rawDataLeftOver == nil {
46 | // Just call it on the latest share
47 | b.ZeroPadIfNecessary()
48 | }
49 |
50 | share, err := b.Build()
51 | if err != nil {
52 | return err
53 | }
54 | sss.shares = append(sss.shares, *share)
55 |
56 | b, err = newBuilder(blobNamespace, blob.ShareVersion(), false)
57 | if err != nil {
58 | return err
59 | }
60 | rawData = rawDataLeftOver
61 | }
62 |
63 | return nil
64 | }
65 |
66 | // WriteNamespacePaddingShares adds padding shares with the namespace of the
67 | // last written share. This is useful to follow the non-interactive default
68 | // rules. This function assumes that at least one share has already been
69 | // written.
70 | func (sss *SparseShareSplitter) WriteNamespacePaddingShares(count int) error {
71 | if count < 0 {
72 | return errors.New("cannot write negative namespaced shares")
73 | }
74 | if count == 0 {
75 | return nil
76 | }
77 | if len(sss.shares) == 0 {
78 | return errors.New("cannot write namespace padding shares on an empty SparseShareSplitter")
79 | }
80 | lastBlob := sss.shares[len(sss.shares)-1]
81 | lastBlobNs := lastBlob.Namespace()
82 | lastBlobInfo := lastBlob.InfoByte()
83 | nsPaddingShares, err := NamespacePaddingShares(lastBlobNs, lastBlobInfo.Version(), count)
84 | if err != nil {
85 | return err
86 | }
87 | sss.shares = append(sss.shares, nsPaddingShares...)
88 |
89 | return nil
90 | }
91 |
92 | // Export finalizes and returns the underlying shares.
93 | func (sss *SparseShareSplitter) Export() []Share {
94 | return sss.shares
95 | }
96 |
97 | // Count returns the current number of shares that will be made if exporting.
98 | func (sss *SparseShareSplitter) Count() int {
99 | return len(sss.shares)
100 | }
101 |
--------------------------------------------------------------------------------
/share/split_sparse_shares_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | // TestSparseShareSplitter tests that the spare share splitter can split blobs
12 | // with different namespaces.
13 | func TestSparseShareSplitter(t *testing.T) {
14 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
15 | ns2 := MustNewV0Namespace(bytes.Repeat([]byte{2}, NamespaceVersionZeroIDSize))
16 | signer := bytes.Repeat([]byte{1}, SignerSize)
17 |
18 | blob1, err := NewV0Blob(ns1, []byte("data1"))
19 | require.NoError(t, err)
20 | blob2, err := NewV1Blob(ns2, []byte("data2"), signer)
21 | require.NoError(t, err)
22 | sss := NewSparseShareSplitter()
23 |
24 | err = sss.Write(blob1)
25 | assert.NoError(t, err)
26 |
27 | err = sss.Write(blob2)
28 | assert.NoError(t, err)
29 |
30 | got := sss.Export()
31 | assert.Len(t, got, 2)
32 |
33 | assert.Equal(t, ShareVersionZero, got[0].Version())
34 | assert.Equal(t, ShareVersionOne, got[1].Version())
35 | assert.Equal(t, signer, GetSigner(got[1]))
36 | assert.Nil(t, GetSigner(got[0])) // this is v0 so should not have any signer attached
37 | }
38 |
39 | func TestWriteNamespacePaddingShares(t *testing.T) {
40 | ns1 := MustNewV0Namespace(bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize))
41 | blob1, err := NewV0Blob(ns1, []byte("data1"))
42 | require.NoError(t, err)
43 |
44 | sss := NewSparseShareSplitter()
45 |
46 | err = sss.Write(blob1)
47 | assert.NoError(t, err)
48 | err = sss.WriteNamespacePaddingShares(1)
49 | assert.NoError(t, err)
50 |
51 | // got is expected to be [blob1, padding]
52 | got := sss.Export()
53 | assert.Len(t, got, 2)
54 |
55 | // verify that the second share is padding
56 | assert.True(t, got[1].IsPadding())
57 |
58 | // verify that the padding share has the same share version as blob1
59 | version := got[1].Version()
60 | assert.Equal(t, version, ShareVersionZero)
61 | }
62 |
--------------------------------------------------------------------------------
/share/utils.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | )
7 |
8 | // delimLen calculates the length of the delimiter for a given unit size
9 | func delimLen(size uint64) int {
10 | lenBuf := make([]byte, binary.MaxVarintLen64)
11 | return binary.PutUvarint(lenBuf, size)
12 | }
13 |
14 | // rawTxSize returns the raw tx size that can be used to construct a
15 | // tx of desiredSize bytes. This function is useful in tests to account for
16 | // the length delimiter that is prefixed to a tx when it is converted into
17 | // a compact share
18 | func rawTxSize(desiredSize int) int {
19 | return desiredSize - delimLen(uint64(desiredSize))
20 | }
21 |
22 | // zeroPadIfNecessary pads the share with trailing zero bytes if the provided
23 | // share has fewer bytes than width. Returns the share unmodified if the
24 | // len(share) is greater than or equal to width.
25 | func zeroPadIfNecessary(share []byte, width int) (padded []byte, bytesOfPadding int) {
26 | oldLen := len(share)
27 | if oldLen >= width {
28 | return share, 0
29 | }
30 |
31 | missingBytes := width - oldLen
32 | padding := make([]byte, missingBytes)
33 | share = append(share, padding...)
34 | return share, missingBytes
35 | }
36 |
37 | // parseDelimiter attempts to parse a varint length delimiter from the input
38 | // provided. It returns the input without the len delimiter bytes, the length
39 | // parsed from the varint optionally an error. Unit length delimiters are used
40 | // in compact shares where units (i.e. a transaction) are prefixed with a length
41 | // delimiter that is encoded as a varint. Input should not contain the namespace
42 | // ID or info byte of a share.
43 | func parseDelimiter(input []byte) (inputWithoutLenDelimiter []byte, unitLen uint64, err error) {
44 | if len(input) == 0 {
45 | return input, 0, nil
46 | }
47 |
48 | l := binary.MaxVarintLen64
49 | if len(input) < binary.MaxVarintLen64 {
50 | l = len(input)
51 | }
52 |
53 | delimiter, _ := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64)
54 |
55 | // read the length of the data
56 | r := bytes.NewBuffer(delimiter)
57 | dataLen, err := binary.ReadUvarint(r)
58 | if err != nil {
59 | return nil, 0, err
60 | }
61 |
62 | // calculate the number of bytes used by the delimiter
63 | lenBuf := make([]byte, binary.MaxVarintLen64)
64 | n := binary.PutUvarint(lenBuf, dataLen)
65 |
66 | // return the input without the length delimiter
67 | return input[n:], dataLen, nil
68 | }
69 |
70 | // AvailableBytesFromCompactShares returns the maximum amount of bytes that could fit in `n` compact shares.
71 | // Note that all compact shares are length prefixed. To account for this use `RawTxSize`.
72 | func AvailableBytesFromCompactShares(n int) int {
73 | if n <= 0 {
74 | return 0
75 | }
76 | if n == 1 {
77 | return FirstCompactShareContentSize
78 | }
79 | return (n-1)*ContinuationCompactShareContentSize + FirstCompactShareContentSize
80 | }
81 |
82 | // AvailableBytesFromSparseShares returns the maximum amount of bytes that could fit in `n` sparse shares
83 | func AvailableBytesFromSparseShares(n int) int {
84 | if n <= 0 {
85 | return 0
86 | }
87 | if n == 1 {
88 | return FirstSparseShareContentSize
89 | }
90 | return (n-1)*ContinuationSparseShareContentSize + FirstSparseShareContentSize
91 | }
92 |
--------------------------------------------------------------------------------
/share/utils_test.go:
--------------------------------------------------------------------------------
1 | package share
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func Test_zeroPadIfNecessary(t *testing.T) {
11 | type args struct {
12 | share []byte
13 | width int
14 | }
15 | tests := []struct {
16 | name string
17 | args args
18 | wantPadded []byte
19 | wantBytesOfPadding int
20 | }{
21 | {"pad", args{[]byte{1, 2, 3}, 6}, []byte{1, 2, 3, 0, 0, 0}, 3},
22 | {"not necessary (equal to shareSize)", args{[]byte{1, 2, 3}, 3}, []byte{1, 2, 3}, 0},
23 | {"not necessary (greater shareSize)", args{[]byte{1, 2, 3}, 2}, []byte{1, 2, 3}, 0},
24 | }
25 | for _, tt := range tests {
26 | t.Run(tt.name, func(t *testing.T) {
27 | gotPadded, gotBytesOfPadding := zeroPadIfNecessary(tt.args.share, tt.args.width)
28 | if !reflect.DeepEqual(gotPadded, tt.wantPadded) {
29 | t.Errorf("zeroPadIfNecessary gotPadded %v, wantPadded %v", gotPadded, tt.wantPadded)
30 | }
31 | if gotBytesOfPadding != tt.wantBytesOfPadding {
32 | t.Errorf("zeroPadIfNecessary gotBytesOfPadding %v, wantBytesOfPadding %v", gotBytesOfPadding, tt.wantBytesOfPadding)
33 | }
34 | })
35 | }
36 | }
37 |
38 | func TestParseDelimiter(t *testing.T) {
39 | for i := uint64(0); i < 100; i++ {
40 | tx := generateRandomTxs(1, int(i))[0]
41 | input, err := MarshalDelimitedTx(tx)
42 | if err != nil {
43 | panic(err)
44 | }
45 | res, txLen, err := parseDelimiter(input)
46 | if err != nil {
47 | panic(err)
48 | }
49 | assert.Equal(t, i, txLen)
50 | assert.Equal(t, tx, res)
51 | }
52 | }
53 |
54 | func TestAvailableBytesFromCompactShares(t *testing.T) {
55 | testCases := []struct {
56 | name string
57 | numShares int
58 | expectedBytes int
59 | }{
60 | {
61 | name: "1 share",
62 | numShares: 1,
63 | expectedBytes: 474,
64 | },
65 | {
66 | name: "10 shares",
67 | numShares: 10,
68 | expectedBytes: 4776,
69 | },
70 | {
71 | name: "negative",
72 | numShares: -1,
73 | expectedBytes: 0,
74 | },
75 | }
76 |
77 | for _, tc := range testCases {
78 | t.Run(tc.name, func(t *testing.T) {
79 | assert.Equal(t, tc.expectedBytes, AvailableBytesFromCompactShares(tc.numShares))
80 | })
81 | }
82 | }
83 |
84 | func TestAvailableBytesFromSparseShares(t *testing.T) {
85 | testCases := []struct {
86 | name string
87 | numShares int
88 | expectedBytes int
89 | }{
90 | {
91 | name: "1 share",
92 | numShares: 1,
93 | expectedBytes: 478,
94 | },
95 | {
96 | name: "10 shares",
97 | numShares: 10,
98 | expectedBytes: 4816,
99 | },
100 | {
101 | name: "negative",
102 | numShares: -1,
103 | },
104 | }
105 |
106 | for _, tc := range testCases {
107 | t.Run(tc.name, func(t *testing.T) {
108 | assert.Equal(t, tc.expectedBytes, AvailableBytesFromSparseShares(tc.numShares))
109 | })
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/square.go:
--------------------------------------------------------------------------------
1 | // Package square implements the logic to construct the original data square
2 | // based on a list of transactions.
3 | package square
4 |
5 | import (
6 | "bytes"
7 | "fmt"
8 | "math"
9 |
10 | "github.com/celestiaorg/go-square/v2/share"
11 | "github.com/celestiaorg/go-square/v2/tx"
12 | "golang.org/x/exp/constraints"
13 | )
14 |
15 | // Build takes an arbitrary long list of (prioritized) transactions and builds a square that is never
16 | // greater than maxSquareSize. It also returns the ordered list of transactions that are present
17 | // in the square and which have all PFBs trailing regular transactions. Note, this function does
18 | // not check the underlying validity of the transactions.
19 | // Errors should not occur and would reflect a violation in an invariant.
20 | func Build(txs [][]byte, maxSquareSize, subtreeRootThreshold int) (Square, [][]byte, error) {
21 | builder, err := NewBuilder(maxSquareSize, subtreeRootThreshold)
22 | if err != nil {
23 | return nil, nil, err
24 | }
25 | normalTxs := make([][]byte, 0, len(txs))
26 | blobTxs := make([][]byte, 0, len(txs))
27 | for idx, txBytes := range txs {
28 | blobTx, isBlobTx, err := tx.UnmarshalBlobTx(txBytes)
29 | if err != nil && isBlobTx {
30 | return nil, nil, fmt.Errorf("unmarshalling blob tx at index %d: %w", idx, err)
31 | }
32 | if isBlobTx {
33 | if builder.AppendBlobTx(blobTx) {
34 | blobTxs = append(blobTxs, txBytes)
35 | }
36 | } else {
37 | if builder.AppendTx(txBytes) {
38 | normalTxs = append(normalTxs, txBytes)
39 | }
40 | }
41 | }
42 | square, err := builder.Export()
43 | return square, append(normalTxs, blobTxs...), err
44 | }
45 |
46 | // Construct takes the exact list of ordered transactions and constructs a square, validating that
47 | // - all blobTxs are ordered after non-blob transactions
48 | // - the transactions don't collectively exceed the maxSquareSize.
49 | //
50 | // Note that this function does not check the underlying validity of
51 | // the transactions.
52 | func Construct(txs [][]byte, maxSquareSize, subtreeRootThreshold int) (Square, error) {
53 | builder, err := NewBuilder(maxSquareSize, subtreeRootThreshold, txs...)
54 | if err != nil {
55 | return nil, err
56 | }
57 | return builder.Export()
58 | }
59 |
60 | // Deconstruct takes a square and returns the ordered list of block
61 | // transactions that constructed that square
62 | //
63 | // This method uses the wrapped pfbs in the PFB namespace to identify and
64 | // decode the blobs. Data that may be included in the square but isn't
65 | // recognised by the square construction algorithm will be ignored
66 | func Deconstruct(s Square, decoder PFBDecoder) ([][]byte, error) {
67 | if s.IsEmpty() {
68 | return [][]byte{}, nil
69 | }
70 |
71 | // Work out which range of shares are non-pfb transactions
72 | // and which ones are pfb transactions
73 | txShareRange := share.GetShareRangeForNamespace(s, share.TxNamespace)
74 | if txShareRange.Start != 0 {
75 | return nil, fmt.Errorf("expected txs to start at index 0, but got %d", txShareRange.Start)
76 | }
77 |
78 | wpfbShareRange := share.GetShareRangeForNamespace(s[txShareRange.End:], share.PayForBlobNamespace)
79 | // If there are no pfb transactions, then we can just return the txs
80 | if wpfbShareRange.IsEmpty() {
81 | return share.ParseTxs(s[txShareRange.Start:txShareRange.End])
82 | }
83 |
84 | // We expect pfb transactions to come directly after non-pfb transactions
85 | if wpfbShareRange.Start != 0 {
86 | return nil, fmt.Errorf("expected PFBs to start directly after non PFBs at index %d, but got %d", txShareRange.End, wpfbShareRange.Start)
87 | }
88 | wpfbShareRange.Add(txShareRange.End)
89 |
90 | // Parse both txs
91 | txs, err := share.ParseTxs(s[txShareRange.Start:txShareRange.End])
92 | if err != nil {
93 | return nil, err
94 | }
95 |
96 | wpfbs, err := share.ParseTxs(s[wpfbShareRange.Start:wpfbShareRange.End])
97 | if err != nil {
98 | return nil, err
99 | }
100 |
101 | // loop through the wrapped pfbs and generate the original
102 | // blobTx that they derive from
103 | for i, wpfbBytes := range wpfbs {
104 | wpfb, isWpfb := tx.UnmarshalIndexWrapper(wpfbBytes)
105 | if !isWpfb {
106 | return nil, fmt.Errorf("expected wrapped PFB at index %d", i)
107 | }
108 | if len(wpfb.ShareIndexes) == 0 {
109 | return nil, fmt.Errorf("wrapped PFB %d has no blobs attached", i)
110 | }
111 | blobSizes, err := decoder(wpfb.Tx)
112 | if err != nil {
113 | return nil, err
114 | }
115 | if len(blobSizes) != len(wpfb.ShareIndexes) {
116 | return nil, fmt.Errorf("expected PFB to have %d blob sizes, but got %d", len(wpfb.ShareIndexes), len(blobSizes))
117 | }
118 |
119 | blobs := make([]*share.Blob, len(wpfb.ShareIndexes))
120 | for j, shareIndex := range wpfb.ShareIndexes {
121 | end := int(shareIndex) + share.SparseSharesNeeded(blobSizes[j])
122 | parsedBlobs, err := share.ParseBlobs(s[shareIndex:end])
123 | if err != nil {
124 | return nil, err
125 | }
126 | if len(parsedBlobs) != 1 {
127 | return nil, fmt.Errorf("expected to parse a single blob, but got %d", len(blobs))
128 | }
129 |
130 | blobs[j] = parsedBlobs[0]
131 | }
132 |
133 | txBytes, err := tx.MarshalBlobTx(wpfb.Tx, blobs...)
134 | if err != nil {
135 | return nil, err
136 | }
137 | txs = append(txs, txBytes)
138 | }
139 |
140 | return txs, nil
141 | }
142 |
143 | // TxShareRange returns the range of share indexes that the tx, specified by txIndex, occupies.
144 | // The range is end exclusive.
145 | func TxShareRange(txs [][]byte, txIndex, maxSquareSize, subtreeRootThreshold int) (share.Range, error) {
146 | builder, err := NewBuilder(maxSquareSize, subtreeRootThreshold, txs...)
147 | if err != nil {
148 | return share.Range{}, err
149 | }
150 |
151 | return builder.FindTxShareRange(txIndex)
152 | }
153 |
154 | // BlobShareRange returns the range of share indexes that the blob, identified by txIndex and blobIndex, occupies.
155 | // The range is end exclusive.
156 | func BlobShareRange(txs [][]byte, txIndex, blobIndex, maxSquareSize, subtreeRootThreshold int) (share.Range, error) {
157 | builder, err := NewBuilder(maxSquareSize, subtreeRootThreshold, txs...)
158 | if err != nil {
159 | return share.Range{}, err
160 | }
161 |
162 | start, err := builder.FindBlobStartingIndex(txIndex, blobIndex)
163 | if err != nil {
164 | return share.Range{}, err
165 | }
166 |
167 | blobLen, err := builder.BlobShareLength(txIndex, blobIndex)
168 | if err != nil {
169 | return share.Range{}, err
170 | }
171 | end := start + blobLen
172 |
173 | return share.NewRange(start, end), nil
174 | }
175 |
176 | // Square is a 2D square of shares with symmetrical sides that are always a power of 2.
177 | type Square []share.Share
178 |
179 | // Size returns the size of the sides of a square
180 | func (s Square) Size() int {
181 | return Size(len(s))
182 | }
183 |
184 | // Size returns the size of the row or column in shares of a square. This
185 | // function is currently a wrapper around the da packages equivalent function to
186 | // avoid breaking the api. In future versions there will not be a copy of this
187 | // code here.
188 | func Size(length int) int {
189 | return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(length)))))
190 | }
191 |
192 | // RoundUpPowerOfTwo returns the next power of two greater than or equal to input.
193 | func RoundUpPowerOfTwo[I constraints.Integer](input I) I {
194 | var result I = 1
195 | for result < input {
196 | result <<= 1
197 | }
198 | return result
199 | }
200 |
201 | // Equals returns true if two squares are equal
202 | func (s Square) Equals(other Square) bool {
203 | if len(s) != len(other) {
204 | return false
205 | }
206 | for i := range s {
207 | if !bytes.Equal(s[i].ToBytes(), other[i].ToBytes()) {
208 | return false
209 | }
210 | }
211 | return true
212 | }
213 |
214 | // WrappedPFBs returns the wrapped PFBs in a square
215 | func (s Square) WrappedPFBs() ([][]byte, error) {
216 | wpfbShareRange := share.GetShareRangeForNamespace(s, share.PayForBlobNamespace)
217 | if wpfbShareRange.IsEmpty() {
218 | return [][]byte{}, nil
219 | }
220 | return share.ParseTxs(s[wpfbShareRange.Start:wpfbShareRange.End])
221 | }
222 |
223 | func (s Square) IsEmpty() bool {
224 | return s.Equals(EmptySquare())
225 | }
226 |
227 | // EmptySquare returns a 1x1 square with a single tail padding share
228 | func EmptySquare() Square {
229 | return share.TailPaddingShares(share.MinShareCount)
230 | }
231 |
232 | func WriteSquare(
233 | txWriter, pfbWriter *share.CompactShareSplitter,
234 | blobWriter *share.SparseShareSplitter,
235 | nonReservedStart, squareSize int,
236 | ) (Square, error) {
237 | totalShares := squareSize * squareSize
238 | pfbStartIndex := txWriter.Count()
239 | paddingStartIndex := pfbStartIndex + pfbWriter.Count()
240 | if nonReservedStart < paddingStartIndex {
241 | return nil, fmt.Errorf("nonReservedStart %d is too small to fit all PFBs and txs", nonReservedStart)
242 | }
243 | padding := share.ReservedPaddingShares(nonReservedStart - paddingStartIndex)
244 | endOfLastBlob := nonReservedStart + blobWriter.Count()
245 | if totalShares < endOfLastBlob {
246 | return nil, fmt.Errorf("square size %d is too small to fit all blobs", totalShares)
247 | }
248 |
249 | txShares, err := txWriter.Export()
250 | if err != nil {
251 | return nil, fmt.Errorf("failed to export tx shares: %w", err)
252 | }
253 |
254 | pfbShares, err := pfbWriter.Export()
255 | if err != nil {
256 | return nil, fmt.Errorf("failed to export pfb shares: %w", err)
257 | }
258 |
259 | square := make([]share.Share, totalShares)
260 | copy(square, txShares)
261 | copy(square[pfbStartIndex:], pfbShares)
262 | if blobWriter.Count() > 0 {
263 | copy(square[paddingStartIndex:], padding)
264 | copy(square[nonReservedStart:], blobWriter.Export())
265 | }
266 | if totalShares > endOfLastBlob {
267 | copy(square[endOfLastBlob:], share.TailPaddingShares(totalShares-endOfLastBlob))
268 | }
269 |
270 | return square, nil
271 | }
272 |
273 | type PFBDecoder func(txBytes []byte) ([]uint32, error)
274 |
--------------------------------------------------------------------------------
/square_benchmark_test.go:
--------------------------------------------------------------------------------
1 | package square_test
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/celestiaorg/go-square/v2"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func BenchmarkSquareConstruct(b *testing.B) {
12 | for _, txCount := range []int{10, 100, 1000} {
13 | b.Run(fmt.Sprintf("txCount=%d", txCount), func(b *testing.B) {
14 | b.ReportAllocs()
15 | txs := generateOrderedTxs(txCount/2, txCount/2, 1, 1024)
16 | b.ResetTimer()
17 | for i := 0; i < b.N; i++ {
18 | _, err := square.Construct(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
19 | require.NoError(b, err)
20 | }
21 | })
22 | }
23 | }
24 |
25 | func BenchmarkSquareBuild(b *testing.B) {
26 | for _, txCount := range []int{10, 100, 1000, 10000} {
27 | b.Run(fmt.Sprintf("txCount=%d", txCount), func(b *testing.B) {
28 | b.ReportAllocs()
29 | txs := generateMixedTxs(txCount/2, txCount/2, 1, 1024)
30 | b.ResetTimer()
31 | for i := 0; i < b.N; i++ {
32 | _, _, err := square.Build(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
33 | require.NoError(b, err)
34 | }
35 | })
36 | }
37 | const txCount = 10
38 | for _, blobSize := range []int{10, 100, 1000, 10000} {
39 | b.Run(fmt.Sprintf("blobSize=%d", blobSize), func(b *testing.B) {
40 | b.ReportAllocs()
41 | txs := generateMixedTxs(0, txCount, 1, blobSize)
42 | b.ResetTimer()
43 | for i := 0; i < b.N; i++ {
44 | _, _, err := square.Build(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
45 | require.NoError(b, err)
46 | }
47 | })
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/square_test.go:
--------------------------------------------------------------------------------
1 | package square_test
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "testing"
7 |
8 | "github.com/celestiaorg/go-square/v2"
9 | "github.com/celestiaorg/go-square/v2/internal/test"
10 | "github.com/celestiaorg/go-square/v2/share"
11 | "github.com/celestiaorg/go-square/v2/tx"
12 | "github.com/stretchr/testify/assert"
13 | "github.com/stretchr/testify/require"
14 | )
15 |
16 | const (
17 | mebibyte = 1_048_576 // one mebibyte in bytes
18 | defaultMaxSquareSize = 128
19 | defaultSubtreeRootThreshold = 64
20 | )
21 |
22 | func TestSquareConstruction(t *testing.T) {
23 | sendTxs := test.GenerateTxs(250, 250, 250)
24 | pfbTxs := test.GenerateBlobTxs(10_000, 1, 1024)
25 | t.Run("normal transactions after PFB transactions", func(t *testing.T) {
26 | txs := sendTxs[:5]
27 | txs = append(txs, append(pfbTxs, txs...)...)
28 | _, err := square.Construct(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
29 | require.Error(t, err)
30 | })
31 | t.Run("not enough space to append transactions", func(t *testing.T) {
32 | _, err := square.Construct(sendTxs, 2, defaultSubtreeRootThreshold)
33 | require.Error(t, err)
34 | _, err = square.Construct(pfbTxs, 2, defaultSubtreeRootThreshold)
35 | require.Error(t, err)
36 | })
37 | t.Run("construction should fail if a single PFB tx contains a blob that is too large to fit in the square", func(t *testing.T) {
38 | pfbTxs := test.GenerateBlobTxs(1, 1, 2*mebibyte)
39 | _, err := square.Construct(pfbTxs, 64, defaultSubtreeRootThreshold)
40 | require.Error(t, err)
41 | })
42 | }
43 |
44 | func TestSquareTxShareRange(t *testing.T) {
45 | type test struct {
46 | name string
47 | txs [][]byte
48 | index int
49 | wantStart int
50 | wantEnd int
51 | expectErr bool
52 | }
53 |
54 | txOne := []byte{0x1}
55 | txTwo := bytes.Repeat([]byte{2}, 600)
56 | txThree := bytes.Repeat([]byte{3}, 1000)
57 |
58 | testCases := []test{
59 | {
60 | name: "txOne occupies shares 0 to 0",
61 | txs: [][]byte{txOne},
62 | index: 0,
63 | wantStart: 0,
64 | wantEnd: 1,
65 | expectErr: false,
66 | },
67 | {
68 | name: "txTwo occupies shares 0 to 1",
69 | txs: [][]byte{txTwo},
70 | index: 0,
71 | wantStart: 0,
72 | wantEnd: 2,
73 | expectErr: false,
74 | },
75 | {
76 | name: "txThree occupies shares 0 to 2",
77 | txs: [][]byte{txThree},
78 | index: 0,
79 | wantStart: 0,
80 | wantEnd: 3,
81 | expectErr: false,
82 | },
83 | {
84 | name: "txThree occupies shares 1 to 3",
85 | txs: [][]byte{txOne, txTwo, txThree},
86 | index: 2,
87 | wantStart: 1,
88 | wantEnd: 4,
89 | expectErr: false,
90 | },
91 | {
92 | name: "invalid index",
93 | txs: [][]byte{txOne, txTwo, txThree},
94 | index: 3,
95 | wantStart: 0,
96 | wantEnd: 0,
97 | expectErr: true,
98 | },
99 | }
100 |
101 | for _, tc := range testCases {
102 | t.Run(tc.name, func(t *testing.T) {
103 | shareRange, err := square.TxShareRange(tc.txs, tc.index, 128, 64)
104 | if tc.expectErr {
105 | require.Error(t, err)
106 | } else {
107 | require.NoError(t, err)
108 | }
109 | require.Equal(t, tc.wantStart, shareRange.Start)
110 | require.Equal(t, tc.wantEnd, shareRange.End)
111 | })
112 | }
113 | }
114 |
115 | func TestSquareBlobShareRange(t *testing.T) {
116 | txs := test.GenerateBlobTxs(10, 1, 1024)
117 |
118 | builder, err := square.NewBuilder(defaultMaxSquareSize, defaultSubtreeRootThreshold, txs...)
119 | require.NoError(t, err)
120 |
121 | dataSquare, err := builder.Export()
122 | require.NoError(t, err)
123 |
124 | for pfbIdx, txBytes := range txs {
125 | blobTx, isBlobTx, err := tx.UnmarshalBlobTx(txBytes)
126 | require.NoError(t, err)
127 | require.True(t, isBlobTx)
128 | for blobIdx := range blobTx.Blobs {
129 | shareRange, err := square.BlobShareRange(txs, pfbIdx, blobIdx, defaultMaxSquareSize, defaultSubtreeRootThreshold)
130 | require.NoError(t, err)
131 | require.LessOrEqual(t, shareRange.End, len(dataSquare))
132 | blobShares := dataSquare[shareRange.Start:shareRange.End]
133 | blobSharesBytes, err := rawData(blobShares)
134 | require.NoError(t, err)
135 | require.True(t, bytes.Contains(blobSharesBytes, blobTx.Blobs[blobIdx].Data()))
136 | }
137 | }
138 |
139 | // error on out of bounds cases
140 | _, err = square.BlobShareRange(txs, -1, 0, defaultMaxSquareSize, defaultSubtreeRootThreshold)
141 | require.Error(t, err)
142 |
143 | _, err = square.BlobShareRange(txs, 0, -1, defaultMaxSquareSize, defaultSubtreeRootThreshold)
144 | require.Error(t, err)
145 |
146 | _, err = square.BlobShareRange(txs, 10, 0, defaultMaxSquareSize, defaultSubtreeRootThreshold)
147 | require.Error(t, err)
148 |
149 | _, err = square.BlobShareRange(txs, 0, 10, defaultMaxSquareSize, defaultSubtreeRootThreshold)
150 | require.Error(t, err)
151 | }
152 |
153 | func TestSquareDeconstruct(t *testing.T) {
154 | t.Run("ConstructDeconstructParity", func(t *testing.T) {
155 | // 8192 -> square size 128
156 | for _, numTxs := range []int{2, 128, 1024, 8192} {
157 | t.Run(fmt.Sprintf("%d", numTxs), func(t *testing.T) {
158 | txs := generateOrderedTxs(numTxs/2, numTxs/2, 1, 800)
159 | dataSquare, err := square.Construct(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
160 | require.NoError(t, err)
161 | recomputedTxs, err := square.Deconstruct(dataSquare, test.DecodeMockPFB)
162 | require.NoError(t, err)
163 | require.Equal(t, txs, recomputedTxs)
164 | })
165 | }
166 | })
167 | t.Run("NoPFBs", func(t *testing.T) {
168 | const numTxs = 10
169 | txs := test.GenerateTxs(250, 250, numTxs)
170 | dataSquare, err := square.Construct(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
171 | require.NoError(t, err)
172 | recomputedTxs, err := square.Deconstruct(dataSquare, test.DecodeMockPFB)
173 | require.NoError(t, err)
174 | require.Equal(t, txs, recomputedTxs)
175 | })
176 | t.Run("PFBsOnly", func(t *testing.T) {
177 | txs := test.GenerateBlobTxs(100, 1, 1024)
178 | dataSquare, err := square.Construct(txs, defaultMaxSquareSize, defaultSubtreeRootThreshold)
179 | require.NoError(t, err)
180 | recomputedTxs, err := square.Deconstruct(dataSquare, test.DecodeMockPFB)
181 | require.NoError(t, err)
182 | require.Equal(t, txs, recomputedTxs)
183 | })
184 | t.Run("EmptySquare", func(t *testing.T) {
185 | tx, err := square.Deconstruct(square.EmptySquare(), test.DecodeMockPFB)
186 | require.NoError(t, err)
187 | require.Equal(t, [][]byte{}, tx)
188 | })
189 | }
190 |
191 | func TestSize(t *testing.T) {
192 | type test struct {
193 | input int
194 | expect int
195 | }
196 | tests := []test{
197 | {input: 0, expect: share.MinSquareSize},
198 | {input: 1, expect: share.MinSquareSize},
199 | {input: 64, expect: 8},
200 | {input: 100, expect: 16},
201 | {input: 1000, expect: 32},
202 | {input: defaultMaxSquareSize * defaultMaxSquareSize, expect: defaultMaxSquareSize},
203 | {input: defaultMaxSquareSize*defaultMaxSquareSize + 1, expect: defaultMaxSquareSize * 2},
204 | }
205 | for i, tt := range tests {
206 | res := square.Size(tt.input)
207 | assert.Equal(t, tt.expect, res, i)
208 | assert.True(t, square.IsPowerOfTwo(res))
209 | }
210 | }
211 |
--------------------------------------------------------------------------------
/tx/blob_tx.go:
--------------------------------------------------------------------------------
1 | package tx
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | v1 "github.com/celestiaorg/go-square/v2/proto/blob/v1"
8 | "github.com/celestiaorg/go-square/v2/share"
9 | "google.golang.org/protobuf/proto"
10 | )
11 |
12 | const (
13 | // ProtoBlobTxTypeID is included in each encoded BlobTx to help prevent
14 | // decoding binaries that are not actually BlobTxs.
15 | ProtoBlobTxTypeID = "BLOB"
16 | )
17 |
18 | type BlobTx struct {
19 | Tx []byte
20 | Blobs []*share.Blob
21 | }
22 |
23 | // UnmarshalBlobTx attempts to unmarshal a transaction into blob transaction. It returns a boolean
24 | // If the bytes are of type BlobTx and an error if there is a problem with decoding
25 | func UnmarshalBlobTx(tx []byte) (*BlobTx, bool, error) {
26 | bTx := v1.BlobTx{}
27 | err := proto.Unmarshal(tx, &bTx)
28 | if err != nil {
29 | return nil, false, err
30 | }
31 | // perform some quick basic checks to prevent false positives
32 | if bTx.TypeId != ProtoBlobTxTypeID {
33 | return nil, false, errors.New("invalid type id")
34 | }
35 | if len(bTx.Blobs) == 0 {
36 | return nil, true, errors.New("no blobs provided")
37 | }
38 | blobs := make([]*share.Blob, len(bTx.Blobs))
39 | for i, b := range bTx.Blobs {
40 | blobs[i], err = share.NewBlobFromProto(b)
41 | if err != nil {
42 | return nil, true, err
43 | }
44 | }
45 | return &BlobTx{
46 | Tx: bTx.Tx,
47 | Blobs: blobs,
48 | }, true, nil
49 | }
50 |
51 | // MarshalBlobTx creates a BlobTx using a normal transaction and some number of
52 | // blobs.
53 | //
54 | // NOTE: Any checks on the blobs or the transaction must be performed in the
55 | // application
56 | func MarshalBlobTx(tx []byte, blobs ...*share.Blob) ([]byte, error) {
57 | if len(blobs) == 0 {
58 | return nil, errors.New("at least one blob must be provided")
59 | }
60 | // nil check
61 | for i, b := range blobs {
62 | if b == nil || b.IsEmpty() {
63 | return nil, fmt.Errorf("blob %d is nil", i)
64 | }
65 | }
66 | bTx := &v1.BlobTx{
67 | Tx: tx,
68 | Blobs: blobsToProto(blobs),
69 | TypeId: ProtoBlobTxTypeID,
70 | }
71 | return proto.Marshal(bTx)
72 | }
73 |
74 | func blobsToProto(blobs []*share.Blob) []*v1.BlobProto {
75 | pb := make([]*v1.BlobProto, len(blobs))
76 | for i, b := range blobs {
77 | pb[i] = &v1.BlobProto{
78 | NamespaceId: b.Namespace().ID(),
79 | NamespaceVersion: uint32(b.Namespace().Version()),
80 | ShareVersion: uint32(b.ShareVersion()),
81 | Signer: b.Signer(),
82 | Data: b.Data(),
83 | }
84 | }
85 | return pb
86 | }
87 |
--------------------------------------------------------------------------------
/tx/index_wrapper.go:
--------------------------------------------------------------------------------
1 | package tx
2 |
3 | import (
4 | "google.golang.org/protobuf/proto"
5 |
6 | "github.com/celestiaorg/go-square/v2/proto/blob/v1"
7 | )
8 |
9 | const (
10 | // ProtoIndexWrapperTypeID is included in each encoded IndexWrapper to help prevent
11 | // decoding binaries that are not actually IndexWrappers.
12 | ProtoIndexWrapperTypeID = "INDX"
13 | )
14 |
15 | // UnmarshalIndexWrapper attempts to unmarshal the provided transaction into an
16 | // IndexWrapper transaction. It returns true if the provided transaction is an
17 | // IndexWrapper transaction. An IndexWrapper transaction is a transaction that contains
18 | // a MsgPayForBlob that has been wrapped with a share index.
19 | //
20 | // NOTE: protobuf sometimes does not throw an error if the transaction passed is
21 | // not a IndexWrapper, since the protobuf definition for MsgPayForBlob is
22 | // kept in the app, we cannot perform further checks without creating an import
23 | // cycle.
24 | func UnmarshalIndexWrapper(tx []byte) (*v1.IndexWrapper, bool) {
25 | indexWrapper := v1.IndexWrapper{}
26 | // attempt to unmarshal into an IndexWrapper transaction
27 | err := proto.Unmarshal(tx, &indexWrapper)
28 | if err != nil {
29 | return &indexWrapper, false
30 | }
31 | if indexWrapper.TypeId != ProtoIndexWrapperTypeID {
32 | return &indexWrapper, false
33 | }
34 | return &indexWrapper, true
35 | }
36 |
37 | // MarshalIndexWrapper creates a wrapped Tx that includes the original transaction
38 | // and the share index of the start of its blob.
39 | //
40 | // NOTE: must be unwrapped to be a viable sdk.Tx
41 | func MarshalIndexWrapper(tx []byte, shareIndexes ...uint32) ([]byte, error) {
42 | wTx := NewIndexWrapper(tx, shareIndexes...)
43 | return proto.Marshal(wTx)
44 | }
45 |
46 | // NewIndexWrapper creates a new IndexWrapper transaction.
47 | func NewIndexWrapper(tx []byte, shareIndexes ...uint32) *v1.IndexWrapper {
48 | return &v1.IndexWrapper{
49 | Tx: tx,
50 | ShareIndexes: shareIndexes,
51 | TypeId: ProtoIndexWrapperTypeID,
52 | }
53 | }
54 |
--------------------------------------------------------------------------------