├── .github ├── pull_request_template.md └── workflows │ └── codec.yml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── build ├── .golangci.yml └── lint.go ├── encoding ├── bitmap.go ├── bitmap_test.go ├── codecv0.go ├── codecv0_test.go ├── codecv0_types.go ├── codecv1.go ├── codecv1_test.go ├── codecv1_types.go ├── codecv2.go ├── codecv2_test.go ├── codecv3.go ├── codecv3_test.go ├── codecv3_types.go ├── codecv4.go ├── codecv4_test.go ├── codecv5.go ├── codecv6.go ├── codecv7.go ├── codecv7_test.go ├── codecv7_types.go ├── da.go ├── da_test.go ├── interfaces.go ├── interfaces_test.go ├── testdata │ ├── blockTrace_02.json │ ├── blockTrace_03.json │ ├── blockTrace_04.json │ ├── blockTrace_05.json │ ├── blockTrace_06.json │ └── blockTrace_07.json └── zstd │ ├── add_scroll_prefix_in_zstd_related_symbols.sh │ ├── libscroll_zstd_darwin_arm64.a │ ├── libscroll_zstd_darwin_arm64.go │ ├── libscroll_zstd_linux_amd64.a │ ├── libscroll_zstd_linux_amd64.go │ ├── libscroll_zstd_linux_arm64.a │ ├── libscroll_zstd_linux_arm64.go │ └── zstd.go ├── go.mod ├── go.sum └── libzstd ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Makefile ├── encoder ├── Cargo.toml └── src │ └── lib.rs ├── rust-toolchain └── src └── lib.rs /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Purpose or design rationale of this PR 2 | 3 | *Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?* 4 | 5 | 6 | ### PR title 7 | 8 | Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type): 9 | 10 | - [ ] build: Changes that affect the build system or external dependencies (example scopes: yarn, eslint, typescript) 11 | - [ ] ci: Changes to our CI configuration files and scripts (example scopes: vercel, github, cypress) 12 | - [ ] docs: Documentation-only changes 13 | - [ ] feat: A new feature 14 | - [ ] fix: A bug fix 15 | - [ ] perf: A code change that improves performance 16 | - [ ] refactor: A code change that doesn't fix a bug, or add a feature, or improves performance 17 | - [ ] style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) 18 | - [ ] test: Adding missing tests or correcting existing tests 19 | 20 | 21 | ### Breaking change label 22 | 23 | Does this PR have the `breaking-change` label? 24 | 25 | - [ ] No, this PR is not a breaking change 26 | - [ ] Yes 27 | -------------------------------------------------------------------------------- /.github/workflows/codec.yml: -------------------------------------------------------------------------------- 1 | name: codec 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | types: 9 | - opened 10 | - reopened 11 | - synchronize 12 | - ready_for_review 13 | 14 | jobs: 15 | check: 16 | if: github.event.pull_request.draft == false 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Install Go 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: 1.21.x 23 | - name: Checkout code 24 | uses: actions/checkout@v2 25 | - name: Lint 26 | run: | 27 | rm -rf $HOME/.cache/golangci-lint 28 | make lint 29 | goimports-lint: 30 | if: github.event.pull_request.draft == false 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: Install Go 34 | uses: actions/setup-go@v2 35 | with: 36 | go-version: 1.21.x 37 | - name: Checkout code 38 | uses: actions/checkout@v2 39 | - name: Install goimports 40 | run: go install golang.org/x/tools/cmd/goimports@latest 41 | - run: goimports -local github.com/scroll-tech/da-codec/encoding/ -w . 42 | - run: go mod tidy 43 | # If there are any diffs from goimports or go mod tidy, fail. 44 | - name: Verify no changes from goimports and go mod tidy 45 | run: | 46 | if [ -n "$(git status --porcelain)" ]; then 47 | exit 1 48 | fi 49 | tests: 50 | if: github.event.pull_request.draft == false 51 | runs-on: ubuntu-latest 52 | steps: 53 | - name: Install Go 54 | uses: actions/setup-go@v2 55 | with: 56 | go-version: 1.21.x 57 | - name: Checkout code 58 | uses: actions/checkout@v2 59 | - name: Test codec packages 60 | working-directory: '.' 61 | run: | 62 | make test 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | 23 | # Built binaries 24 | build/bin 25 | 26 | coverage.txt 27 | *.integration.txt 28 | 29 | # Visual Studio Code 30 | .vscode 31 | 32 | # IntelliJ 33 | .idea 34 | 35 | # MacOS 36 | .DS_Store 37 | 38 | # misc 39 | sftp-config.json 40 | *~ 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Scroll 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: fmt lint test build run 2 | 3 | lint: 4 | GOBIN=$(PWD)/build/bin go run ./build/lint.go 5 | 6 | fmt: 7 | go mod tidy 8 | goimports -w . 9 | gofumpt -l -w . 10 | 11 | test: 12 | go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./... 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # da-codec 2 | 3 | Scroll's DA encoding/decoding libraries. 4 | 5 | ## Running unit tests 6 | ``` 7 | go test -v -race ./... 8 | ``` 9 | 10 | ## FAQ 11 | 12 | **Q: Why the repo contains `libscroll_zstd*.a` binary files?** 13 | 14 | A: This simplifies package installation with `go get` without the need to perform additional steps for building the `libscroll_zstd*.a`. 15 | 16 | **Q: Which platforms/architectures are supported?** 17 | 18 | A: `linux/amd64`, `linux/arm64`, `darwin/arm64`. Pull requests for other platforms/architectures are accepted. 19 | 20 | **Q: I don't trust `libscroll_zstd*.a` binary files from the repo or these files don't work on my OS/ARCH. How to rebuild them?** 21 | 22 | A: Just run `cd libzstd && make libzstd` if your OS/ARCH is supported. 23 | -------------------------------------------------------------------------------- /build/.golangci.yml: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml 2 | # options for analysis running 3 | run: 4 | # default concurrency is a available CPU number 5 | concurrency: 4 6 | 7 | # timeout for analysis, e.g. 30s, 5m, default is 1m 8 | deadline: 5m 9 | 10 | # exit code when at least one issue was found, default is 1 11 | issues-exit-code: 1 12 | 13 | # include test files or not, default is true 14 | tests: true 15 | 16 | # list of build tags, all linters use it. Default is empty list. 17 | #build-tags: 18 | 19 | # which dirs to skip: they won't be analyzed; 20 | # can use regexp here: generated.*, regexp is applied on full path; 21 | # default value is empty list, but next dirs are always skipped independently 22 | # from this option's value: 23 | # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ 24 | #skip-dirs: 25 | 26 | # which files to skip: they will be analyzed, but issues from them 27 | # won't be reported. Default value is empty list, but there is 28 | # no need to include all autogenerated files, we confidently recognize 29 | # autogenerated files. If it's not please let us know. 30 | #skip-files: 31 | 32 | # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": 33 | # If invoked with -mod=readonly, the go command is disallowed from the implicit 34 | # automatic updating of go.mod described above. Instead, it fails when any changes 35 | # to go.mod are needed. This setting is most useful to check that go.mod does 36 | # not need updates, such as in a continuous integration and testing system. 37 | # If invoked with -mod=vendor, the go command assumes that the vendor 38 | # directory holds the correct copies of dependencies and ignores 39 | # the dependency descriptions in go.mod. 40 | #modules-download-mode: (release|readonly|vendor) 41 | 42 | 43 | # output configuration options 44 | output: 45 | # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" 46 | format: colored-line-number 47 | 48 | # print lines of code with issue, default is true 49 | print-issued-lines: true 50 | 51 | # print linter name in the end of issue text, default is true 52 | print-linter-name: true 53 | 54 | 55 | # all available settings of specific linters 56 | linters-settings: 57 | errcheck: 58 | # report about not checking of errors in type assetions: `a := b.(MyStruct)`; 59 | # default is false: such cases aren't reported by default. 60 | check-type-assertions: false 61 | 62 | # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; 63 | # default is false: such cases aren't reported by default. 64 | check-blank: false 65 | 66 | # [deprecated] comma-separated list of pairs of the form pkg:regex 67 | # the regex is used to ignore names within pkg. (default "fmt:.*"). 68 | # see https://github.com/kisielk/errcheck#the-deprecated-method for details 69 | ignore: fmt:.*,io/ioutil:^Read.* 70 | 71 | # path to a file containing a list of functions to exclude from checking 72 | # see https://github.com/kisielk/errcheck#excluding-functions for details 73 | #exclude: /path/to/file.txt 74 | govet: 75 | # report about shadowed variables 76 | check-shadowing: true 77 | 78 | gosec: 79 | disable: 80 | - G108 81 | 82 | golint: 83 | # minimal confidence for issues, default is 0.8 84 | min-confidence: 0.8 85 | gofmt: 86 | # simplify code: gofmt with `-s` option, true by default 87 | simplify: true 88 | goimports: 89 | # put imports beginning with prefix after 3rd-party packages; 90 | # it's a comma-separated list of prefixes 91 | #local-prefixes: github.com/org/project 92 | gocyclo: 93 | # minimal code complexity to report, 30 by default (but we recommend 10-20) 94 | min-complexity: 30 95 | maligned: 96 | # print struct with more effective memory layout or not, false by default 97 | suggest-new: true 98 | dupl: 99 | # tokens count to trigger issue, 150 by default 100 | threshold: 100 101 | goconst: 102 | # minimal length of string constant, 3 by default 103 | min-len: 3 104 | # minimal occurrences count to trigger, 3 by default 105 | min-occurrences: 3 106 | depguard: 107 | rules: 108 | main: 109 | files: 110 | - $all 111 | deny: 112 | - pkg: "github.com/davecgh/go-spew/spew" 113 | misspell: 114 | # Correct spellings using locale preferences for US or UK. 115 | # Default is to use a neutral variety of English. 116 | # Setting locale to US will correct the British spelling of 'colour' to 'color'. 117 | locale: US 118 | ignore-words: 119 | - gossamer 120 | lll: 121 | # max line length, lines longer will be reported. Default is 120. 122 | # '\t' is counted as 1 character by default, and can be changed with the tab-width option 123 | line-length: 120 124 | # tab width in spaces. Default to 1. 125 | tab-width: 1 126 | unused: 127 | # treat code as a program (not a library) and report unused exported identifiers; default is false. 128 | # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: 129 | # if it's called for subdir of a project it can't find funcs usages. All text editor integrations 130 | # with golangci-lint call it on a directory with the changed file. 131 | check-exported: false 132 | unparam: 133 | # Inspect exported functions, default is false. Set to true if no external program/library imports your code. 134 | # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: 135 | # if it's called for subdir of a project it can't find external interfaces. All text editor integrations 136 | # with golangci-lint call it on a directory with the changed file. 137 | check-exported: false 138 | nakedret: 139 | # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 140 | max-func-lines: 30 141 | prealloc: 142 | # XXX: we don't recommend using this linter before doing performance profiling. 143 | # For most programs usage of prealloc will be a premature optimization. 144 | 145 | # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. 146 | # True by default. 147 | simple: true 148 | range-loops: true # Report preallocation suggestions on range loops, true by default 149 | for-loops: false # Report preallocation suggestions on for loops, false by default 150 | gocritic: 151 | # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty 152 | disabled-checks: 153 | - regexpMust 154 | 155 | # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. 156 | # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". 157 | enabled-tags: 158 | - performance 159 | 160 | settings: # settings passed to gocritic 161 | captLocal: # must be valid enabled check name 162 | paramsOnly: true 163 | rangeValCopy: 164 | sizeThreshold: 32 165 | 166 | linters: 167 | enable: 168 | - megacheck 169 | - govet 170 | - gofmt 171 | - goimports 172 | - varcheck 173 | - misspell 174 | - ineffassign 175 | - gosimple 176 | - unconvert 177 | - goconst 178 | - errcheck 179 | - govet 180 | - staticcheck 181 | - gosec 182 | - bodyclose 183 | - goprintffuncname 184 | - golint 185 | - depguard 186 | - gocyclo 187 | - unparam 188 | - exportloopref 189 | - sqlclosecheck 190 | - rowserrcheck 191 | - durationcheck 192 | - bidichk 193 | - typecheck 194 | - unused 195 | enable-all: false 196 | disable: 197 | 198 | disable-all: false 199 | presets: 200 | 201 | fast: false 202 | 203 | 204 | issues: 205 | # List of regexps of issue texts to exclude, empty list by default. 206 | # But independently from this option we use default exclude patterns, 207 | # it can be disabled by `exclude-use-default: false`. To list all 208 | # excluded by default patterns execute `golangci-lint run --help` 209 | #exclude: 210 | 211 | # Excluding configuration per-path, per-linter, per-text and per-source 212 | exclude-rules: 213 | # Exclude some linters from running on tests files. 214 | - path: _test\.go 215 | linters: 216 | - errcheck 217 | - gosec 218 | 219 | # Exclude abi files in bridge-history-api 220 | - path: backend_abi\.go 221 | linters: 222 | - errcheck 223 | - gosec 224 | - golint 225 | 226 | # Exclude some staticcheck messages 227 | - linters: 228 | - staticcheck 229 | text: "SA9003:" 230 | 231 | # Exclude lll issues for long lines with go:generate 232 | - linters: 233 | - lll 234 | source: "^//go:generate " 235 | text: "long-lines" 236 | 237 | # Exclude gosec issues for G108: Profiling endpoint is automatically exposed 238 | - linters: 239 | - gosec 240 | text: "G108" 241 | 242 | - linters: 243 | - wsl 244 | text: "return statements should not be cuddled if block has more than two lines" 245 | 246 | - linters: 247 | - wsl 248 | text: "branch statements should not be cuddled if block has more than two lines" 249 | 250 | - linters: 251 | - wsl 252 | text: "declarations should never be cuddled" 253 | 254 | - linters: 255 | - wsl 256 | text: "expressions should not be cuddled with declarations or returns" 257 | 258 | # Independently from option `exclude` we use default exclude patterns, 259 | # it can be disabled by this option. To list all 260 | # excluded by default patterns execute `golangci-lint run --help`. 261 | # Default value for this option is true. 262 | exclude-use-default: false 263 | 264 | # Maximum issues count per one linter. Set to 0 to disable. Default is 50. 265 | max-per-linter: 0 266 | 267 | # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. 268 | max-same-issues: 0 269 | 270 | # Show only new issues: if there are unstaged changes or untracked files, 271 | # only those changes are analyzed, else only changes in HEAD~ are analyzed. 272 | # It's a super-useful option for integration of golangci-lint into existing 273 | # large codebase. It's not practical to fix all existing issues at the moment 274 | # of integration: much better don't allow issues in new code. 275 | # Default is false. 276 | new: false 277 | -------------------------------------------------------------------------------- /build/lint.go: -------------------------------------------------------------------------------- 1 | //go:build none 2 | // +build none 3 | 4 | package main 5 | 6 | import ( 7 | "flag" 8 | "fmt" 9 | "log" 10 | "os" 11 | "os/exec" 12 | "path/filepath" 13 | "runtime" 14 | ) 15 | 16 | const ( 17 | // GolangCIVersion to be used for linting. 18 | GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2" 19 | ) 20 | 21 | // GOBIN environment variable. 22 | func goBin() string { 23 | if os.Getenv("GOBIN") == "" { 24 | log.Fatal("GOBIN not set") 25 | } 26 | 27 | return os.Getenv("GOBIN") 28 | } 29 | 30 | func main() { 31 | log.SetFlags(log.Lshortfile) 32 | 33 | if _, err := os.Stat(filepath.Join("build", "lint.go")); os.IsNotExist(err) { 34 | log.Fatal("should run build from root dir") 35 | } 36 | 37 | lint() 38 | } 39 | 40 | //nolint:gosec 41 | func lint() { 42 | v := flag.Bool("v", false, "log verbosely") 43 | 44 | // Make sure GOLANGCI is downloaded and available. 45 | argsGet := []string{"install", GolangCIVersion} 46 | cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), argsGet...) 47 | 48 | out, err := cmd.CombinedOutput() 49 | if err != nil { 50 | log.Fatalf("could not list pkgs: %v\n%s", err, string(out)) 51 | } 52 | 53 | cmd = exec.Command(filepath.Join(goBin(), "golangci-lint")) 54 | cmd.Args = append(cmd.Args, "run", "--config", "build/.golangci.yml", "--timeout", "2m") 55 | 56 | if *v { 57 | cmd.Args = append(cmd.Args, "-v") 58 | } 59 | 60 | fmt.Println("Linting...") 61 | cmd.Stderr, cmd.Stdout = os.Stderr, os.Stdout 62 | 63 | if err := cmd.Run(); err != nil { 64 | log.Fatal("Error: Could not Lint ", "error: ", err, ", cmd: ", cmd) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /encoding/bitmap.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "fmt" 5 | "math/big" 6 | 7 | "github.com/scroll-tech/go-ethereum/core/types" 8 | ) 9 | 10 | // constructSkippedBitmap constructs skipped L1 message bitmap of the batch. 11 | func constructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePoppedBefore uint64) ([]byte, uint64, error) { 12 | // skipped L1 message bitmap, an array of 256-bit bitmaps 13 | var skippedBitmap []*big.Int 14 | 15 | // the first queue index that belongs to this batch 16 | baseIndex := totalL1MessagePoppedBefore 17 | 18 | // the next queue index that we need to process 19 | nextIndex := totalL1MessagePoppedBefore 20 | 21 | for chunkID, chunk := range chunks { 22 | for blockID, block := range chunk.Blocks { 23 | for _, tx := range block.Transactions { 24 | if tx.Type != types.L1MessageTxType { 25 | continue 26 | } 27 | 28 | currentIndex := tx.Nonce 29 | 30 | if currentIndex < nextIndex { 31 | return nil, 0, fmt.Errorf("unexpected batch payload, expected queue index: %d, got: %d. Batch index: %d, chunk index in batch: %d, block index in chunk: %d, block hash: %v, transaction hash: %v", nextIndex, currentIndex, batchIndex, chunkID, blockID, block.Header.Hash(), tx.TxHash) 32 | } 33 | 34 | // mark skipped messages 35 | for skippedIndex := nextIndex; skippedIndex < currentIndex; skippedIndex++ { 36 | quo := int((skippedIndex - baseIndex) / 256) 37 | rem := int((skippedIndex - baseIndex) % 256) 38 | for len(skippedBitmap) <= quo { 39 | bitmap := big.NewInt(0) 40 | skippedBitmap = append(skippedBitmap, bitmap) 41 | } 42 | skippedBitmap[quo].SetBit(skippedBitmap[quo], rem, 1) 43 | } 44 | 45 | // process included message 46 | quo := int((currentIndex - baseIndex) / 256) 47 | for len(skippedBitmap) <= quo { 48 | bitmap := big.NewInt(0) 49 | skippedBitmap = append(skippedBitmap, bitmap) 50 | } 51 | 52 | nextIndex = currentIndex + 1 53 | } 54 | } 55 | } 56 | 57 | skippedL1MessageBitmap := make([]byte, len(skippedBitmap)*skippedL1MessageBitmapByteSize) 58 | for ii, num := range skippedBitmap { 59 | bytes := num.Bytes() 60 | padding := skippedL1MessageBitmapByteSize - len(bytes) 61 | copy(skippedL1MessageBitmap[skippedL1MessageBitmapByteSize*ii+padding:], bytes) 62 | } 63 | 64 | return skippedL1MessageBitmap, nextIndex, nil 65 | } 66 | 67 | // DecodeBitmap decodes skipped L1 message bitmap of the batch from bytes to big.Int's. 68 | func DecodeBitmap(skippedL1MessageBitmap []byte, totalL1MessagePopped int) ([]*big.Int, error) { 69 | length := len(skippedL1MessageBitmap) 70 | if length%skippedL1MessageBitmapByteSize != 0 { 71 | return nil, fmt.Errorf("skippedL1MessageBitmap length doesn't match, skippedL1MessageBitmap length should be equal 0 modulo %v, length of skippedL1MessageBitmap: %v", skippedL1MessageBitmapByteSize, length) 72 | } 73 | if length*8 < totalL1MessagePopped { 74 | return nil, fmt.Errorf("skippedL1MessageBitmap length is too small, skippedL1MessageBitmap length should be at least %v, length of skippedL1MessageBitmap: %v", (totalL1MessagePopped+7)/8, length) 75 | } 76 | var skippedBitmap []*big.Int 77 | for index := 0; index < length/skippedL1MessageBitmapByteSize; index++ { 78 | bitmap := big.NewInt(0).SetBytes(skippedL1MessageBitmap[index*skippedL1MessageBitmapByteSize : index*skippedL1MessageBitmapByteSize+skippedL1MessageBitmapByteSize]) 79 | skippedBitmap = append(skippedBitmap, bitmap) 80 | } 81 | return skippedBitmap, nil 82 | } 83 | 84 | // IsL1MessageSkipped checks if the L1 message at the given index is skipped. 85 | func IsL1MessageSkipped(skippedBitmap []*big.Int, index uint64) bool { 86 | if index >= uint64(len(skippedBitmap))*256 { 87 | return false 88 | } 89 | quo := index / 256 90 | rem := index % 256 91 | return skippedBitmap[quo].Bit(int(rem)) == 1 92 | } 93 | -------------------------------------------------------------------------------- /encoding/bitmap_test.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDecodeBitmap(t *testing.T) { 11 | bitmapHex := "0000000000000000000000000000000000000000000000000000001ffffffbff" 12 | skippedL1MessageBitmap, err := hex.DecodeString(bitmapHex) 13 | assert.NoError(t, err) 14 | 15 | decodedBitmap, err := DecodeBitmap(skippedL1MessageBitmap, 42) 16 | assert.NoError(t, err) 17 | 18 | assert.True(t, IsL1MessageSkipped(decodedBitmap, 0)) 19 | assert.True(t, IsL1MessageSkipped(decodedBitmap, 9)) 20 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 10)) 21 | assert.True(t, IsL1MessageSkipped(decodedBitmap, 11)) 22 | assert.True(t, IsL1MessageSkipped(decodedBitmap, 36)) 23 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 37)) 24 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 38)) 25 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 39)) 26 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 40)) 27 | assert.False(t, IsL1MessageSkipped(decodedBitmap, 41)) 28 | 29 | _, err = DecodeBitmap([]byte{0x00}, 8) 30 | assert.Error(t, err) 31 | 32 | _, err = DecodeBitmap([]byte{0x00, 0x00, 0x00, 0x00}, 33) 33 | assert.Error(t, err) 34 | } 35 | -------------------------------------------------------------------------------- /encoding/codecv0.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "math" 8 | 9 | "github.com/scroll-tech/go-ethereum/common" 10 | "github.com/scroll-tech/go-ethereum/core/types" 11 | "github.com/scroll-tech/go-ethereum/crypto" 12 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 13 | ) 14 | 15 | type DACodecV0 struct{} 16 | 17 | // codecv0MaxNumChunks is the maximum number of chunks that a batch can contain. 18 | const codecv0MaxNumChunks = 15 19 | 20 | // Version returns the codec version. 21 | func (d *DACodecV0) Version() CodecVersion { 22 | return CodecV0 23 | } 24 | 25 | // MaxNumChunksPerBatch returns the maximum number of chunks per batch. 26 | func (d *DACodecV0) MaxNumChunksPerBatch() int { 27 | return codecv0MaxNumChunks 28 | } 29 | 30 | // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. 31 | func (d *DACodecV0) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) { 32 | if !block.Header.Number.IsUint64() { 33 | return nil, errors.New("block number is not uint64") 34 | } 35 | 36 | // note: numL1Messages includes skipped messages 37 | numL1Messages := block.NumL1Messages(totalL1MessagePoppedBefore) 38 | if numL1Messages > math.MaxUint16 { 39 | return nil, errors.New("number of L1 messages exceeds max uint16") 40 | } 41 | 42 | // note: numTransactions includes skipped messages 43 | numL2Transactions := block.NumL2Transactions() 44 | numTransactions := numL1Messages + numL2Transactions 45 | if numTransactions > math.MaxUint16 { 46 | return nil, errors.New("number of transactions exceeds max uint16") 47 | } 48 | 49 | daBlock := newDABlockV0( 50 | block.Header.Number.Uint64(), // number 51 | block.Header.Time, // timestamp 52 | block.Header.BaseFee, // baseFee 53 | block.Header.GasLimit, // gasLimit 54 | uint16(numTransactions), // numTransactions 55 | uint16(numL1Messages), // numL1Messages 56 | ) 57 | 58 | return daBlock, nil 59 | } 60 | 61 | // NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before. 62 | func (d *DACodecV0) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64) (DAChunk, error) { 63 | if chunk == nil { 64 | return nil, errors.New("chunk is nil") 65 | } 66 | 67 | if len(chunk.Blocks) == 0 { 68 | return nil, errors.New("number of blocks is 0") 69 | } 70 | 71 | if len(chunk.Blocks) > math.MaxUint8 { 72 | return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(chunk.Blocks), math.MaxUint8) 73 | } 74 | 75 | blocks := make([]DABlock, 0, len(chunk.Blocks)) 76 | txs := make([][]*types.TransactionData, 0, len(chunk.Blocks)) 77 | 78 | for _, block := range chunk.Blocks { 79 | b, err := d.NewDABlock(block, totalL1MessagePoppedBefore) 80 | if err != nil { 81 | return nil, err 82 | } 83 | blocks = append(blocks, b) 84 | totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore) 85 | txs = append(txs, block.Transactions) 86 | } 87 | 88 | if len(blocks) != len(txs) { 89 | return nil, fmt.Errorf("number of blocks (%d) does not match number of transactions (%d)", len(blocks), len(txs)) 90 | } 91 | 92 | return &daChunkV0{ 93 | blocks: blocks, 94 | transactions: txs, 95 | }, nil 96 | } 97 | 98 | // DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx. 99 | func (d *DACodecV0) DecodeDAChunksRawTx(chunkBytes [][]byte) ([]*DAChunkRawTx, error) { 100 | chunks := make([]*DAChunkRawTx, 0, len(chunkBytes)) 101 | for _, chunk := range chunkBytes { 102 | if len(chunk) < 1 { 103 | return nil, fmt.Errorf("invalid chunk, length is less than 1") 104 | } 105 | 106 | numBlocks := int(chunk[0]) 107 | if len(chunk) < 1+numBlocks*blockContextByteSize { 108 | return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*blockContextByteSize) 109 | } 110 | 111 | blocks := make([]DABlock, numBlocks) 112 | for i := 0; i < numBlocks; i++ { 113 | startIdx := 1 + i*blockContextByteSize // add 1 to skip numBlocks byte 114 | endIdx := startIdx + blockContextByteSize 115 | blocks[i] = &daBlockV0{} 116 | err := blocks[i].Decode(chunk[startIdx:endIdx]) 117 | if err != nil { 118 | return nil, err 119 | } 120 | } 121 | 122 | var transactions []types.Transactions 123 | currentIndex := 1 + numBlocks*blockContextByteSize 124 | for _, block := range blocks { 125 | var blockTransactions types.Transactions 126 | // ignore L1 msg transactions from the block, consider only L2 transactions 127 | txNum := int(block.NumTransactions()) - int(block.NumL1Messages()) 128 | if txNum < 0 { 129 | return nil, fmt.Errorf("invalid transaction count: NumL1Messages (%d) exceeds NumTransactions (%d)", block.NumL1Messages(), block.NumTransactions()) 130 | } 131 | for i := 0; i < txNum; i++ { 132 | if len(chunk) < currentIndex+txLenByteSize { 133 | return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+txLenByteSize, i) 134 | } 135 | txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+txLenByteSize])) 136 | if len(chunk) < currentIndex+txLenByteSize+txLen { 137 | return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected minimum length: %v, txNum without l1 msgs: %d", len(chunk), currentIndex+txLenByteSize+txLen, i) 138 | } 139 | txData := chunk[currentIndex+txLenByteSize : currentIndex+txLenByteSize+txLen] 140 | tx := &types.Transaction{} 141 | err := tx.UnmarshalBinary(txData) 142 | if err != nil { 143 | return nil, fmt.Errorf("failed to unmarshal tx, pos of tx in chunk bytes: %d. tx num without l1 msgs: %d, err: %w", currentIndex, i, err) 144 | } 145 | blockTransactions = append(blockTransactions, tx) 146 | currentIndex += txLenByteSize + txLen 147 | } 148 | transactions = append(transactions, blockTransactions) 149 | } 150 | 151 | chunks = append(chunks, &DAChunkRawTx{ 152 | Blocks: blocks, 153 | Transactions: transactions, 154 | }) 155 | } 156 | return chunks, nil 157 | } 158 | 159 | // DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks 160 | func (d *DACodecV0) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { 161 | return nil 162 | } 163 | 164 | func (d *DACodecV0) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) { 165 | return nil, nil 166 | } 167 | 168 | // NewDABatch creates a DABatch from the provided Batch. 169 | func (d *DACodecV0) NewDABatch(batch *Batch) (DABatch, error) { 170 | // this encoding can only support a fixed number of chunks per batch 171 | if len(batch.Chunks) > d.MaxNumChunksPerBatch() { 172 | return nil, fmt.Errorf("too many chunks in batch: got %d, maximum allowed is %d", len(batch.Chunks), d.MaxNumChunksPerBatch()) 173 | } 174 | 175 | if len(batch.Chunks) == 0 { 176 | return nil, errors.New("batch must contain at least one chunk") 177 | } 178 | 179 | // compute batch data hash 180 | dataHash, err := d.computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore) 181 | if err != nil { 182 | return nil, fmt.Errorf("failed to compute batch data hash, index: %d, err: %w", batch.Index, err) 183 | } 184 | 185 | // skipped L1 messages bitmap 186 | skippedL1MessageBitmap, totalL1MessagePoppedAfter, err := constructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) 187 | if err != nil { 188 | return nil, fmt.Errorf("failed to construct skipped bitmap, index: %d, err: %w", batch.Index, err) 189 | } 190 | 191 | if totalL1MessagePoppedAfter < batch.TotalL1MessagePoppedBefore { 192 | return nil, fmt.Errorf("batch index: %d, totalL1MessagePoppedAfter (%d) is less than batch.TotalL1MessagePoppedBefore (%d)", batch.Index, totalL1MessagePoppedAfter, batch.TotalL1MessagePoppedBefore) 193 | } 194 | l1MessagePopped := totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore 195 | 196 | daBatch := newDABatchV0( 197 | CodecV0, // version 198 | batch.Index, // batchIndex 199 | l1MessagePopped, // l1MessagePopped 200 | totalL1MessagePoppedAfter, // totalL1MessagePopped 201 | dataHash, // dataHash 202 | batch.ParentBatchHash, // parentBatchHash 203 | skippedL1MessageBitmap, // skippedL1MessageBitmap 204 | ) 205 | 206 | return daBatch, nil 207 | } 208 | 209 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 210 | func (d *DACodecV0) NewDABatchFromBytes(data []byte) (DABatch, error) { 211 | if len(data) < daBatchV0EncodedMinLength { 212 | return nil, fmt.Errorf("insufficient data for DABatch, expected at least %d bytes but got %d", daBatchV0EncodedMinLength, len(data)) 213 | } 214 | 215 | if CodecVersion(data[daBatchOffsetVersion]) != CodecV0 { 216 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV0, data[daBatchOffsetVersion]) 217 | } 218 | 219 | return newDABatchV0( 220 | CodecVersion(data[daBatchOffsetVersion]), // version 221 | binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV0OffsetL1MessagePopped]), // batchIndex 222 | binary.BigEndian.Uint64(data[daBatchV0OffsetL1MessagePopped:daBatchV0OffsetTotalL1MessagePopped]), // l1MessagePopped 223 | binary.BigEndian.Uint64(data[daBatchV0OffsetTotalL1MessagePopped:daBatchOffsetDataHash]), // totalL1MessagePopped 224 | common.BytesToHash(data[daBatchOffsetDataHash:daBatchV0OffsetParentBatchHash]), // dataHash 225 | common.BytesToHash(data[daBatchV0OffsetParentBatchHash:daBatchV0OffsetSkippedL1MessageBitmap]), // parentBatchHash 226 | data[daBatchV0OffsetSkippedL1MessageBitmap:], // skippedL1MessageBitmap 227 | ), nil 228 | } 229 | 230 | func (d *DACodecV0) NewDABatchFromParams(_ uint64, _, _ common.Hash) (DABatch, error) { 231 | return nil, nil 232 | } 233 | 234 | // EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately. 235 | func (d *DACodecV0) EstimateBlockL1CommitCalldataSize(b *Block) (uint64, error) { 236 | var size uint64 237 | for _, txData := range b.Transactions { 238 | if txData.Type == types.L1MessageTxType { 239 | continue 240 | } 241 | size += payloadLengthBytes 242 | txPayloadLength, err := getTxPayloadLength(txData) 243 | if err != nil { 244 | return 0, err 245 | } 246 | size += txPayloadLength 247 | } 248 | size += blockContextByteSize 249 | return size, nil 250 | } 251 | 252 | // EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately. 253 | func (d *DACodecV0) EstimateBlockL1CommitGas(b *Block) (uint64, error) { 254 | var total uint64 255 | var numL1Messages uint64 256 | for _, txData := range b.Transactions { 257 | if txData.Type == types.L1MessageTxType { 258 | numL1Messages++ 259 | continue 260 | } 261 | 262 | txPayloadLength, err := getTxPayloadLength(txData) 263 | if err != nil { 264 | return 0, err 265 | } 266 | total += calldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero 267 | total += calldataNonZeroByteGas * 4 // 4 bytes payload length 268 | total += getKeccak256Gas(txPayloadLength) // l2 tx hash 269 | } 270 | 271 | total += calldataNonZeroByteGas * blockContextByteSize 272 | 273 | // sload 274 | total += coldSloadGas * numL1Messages // numL1Messages times cold sload in L1MessageQueue 275 | 276 | // staticcall 277 | total += warmAddressAccessGas * numL1Messages // numL1Messages times call to L1MessageQueue 278 | total += warmAddressAccessGas * numL1Messages // numL1Messages times warm address access to L1MessageQueue 279 | 280 | total += getMemoryExpansionCost(functionSignatureBytes+defaultParameterBytes) * numL1Messages // staticcall to proxy 281 | total += warmAddressAccessGas * numL1Messages // read admin in proxy 282 | total += warmAddressAccessGas * numL1Messages // read impl in proxy 283 | total += warmAddressAccessGas * numL1Messages // access impl 284 | total += getMemoryExpansionCost(functionSignatureBytes+defaultParameterBytes) * numL1Messages // delegatecall to impl 285 | 286 | return total, nil 287 | } 288 | 289 | // EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately. 290 | func (d *DACodecV0) EstimateChunkL1CommitCalldataSize(c *Chunk) (uint64, error) { 291 | var totalL1CommitCalldataSize uint64 292 | for _, block := range c.Blocks { 293 | blockL1CommitCalldataSize, err := d.EstimateBlockL1CommitCalldataSize(block) 294 | if err != nil { 295 | return 0, err 296 | } 297 | totalL1CommitCalldataSize += blockL1CommitCalldataSize 298 | } 299 | return totalL1CommitCalldataSize, nil 300 | } 301 | 302 | // EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately. 303 | func (d *DACodecV0) EstimateChunkL1CommitGas(c *Chunk) (uint64, error) { 304 | var totalTxNum uint64 305 | var totalL1CommitGas uint64 306 | for _, block := range c.Blocks { 307 | totalTxNum += uint64(len(block.Transactions)) 308 | blockL1CommitGas, err := d.EstimateBlockL1CommitGas(block) 309 | if err != nil { 310 | return 0, err 311 | } 312 | totalL1CommitGas += blockL1CommitGas 313 | } 314 | 315 | numBlocks := uint64(len(c.Blocks)) 316 | totalL1CommitGas += warmSloadGas * numBlocks // numBlocks times warm sload 317 | totalL1CommitGas += calldataNonZeroByteGas // numBlocks field of chunk encoding in calldata 318 | 319 | totalL1CommitGas += getKeccak256Gas(blockContextBytesForHashing*numBlocks + common.HashLength*totalTxNum) // chunk hash 320 | return totalL1CommitGas, nil 321 | } 322 | 323 | // EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately. 324 | func (d *DACodecV0) EstimateBatchL1CommitGas(b *Batch) (uint64, error) { 325 | var totalL1CommitGas uint64 326 | 327 | // Add extra gas costs 328 | totalL1CommitGas += extraGasCost // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc 329 | totalL1CommitGas += 4 * coldSloadGas // 4 one-time cold sload for commitBatch 330 | totalL1CommitGas += sstoreGas // 1 time sstore 331 | totalL1CommitGas += baseTxGas // base gas for tx 332 | totalL1CommitGas += calldataNonZeroByteGas // version in calldata 333 | 334 | // adjusting gas: 335 | // add 1 time cold sload (2100 gas) for L1MessageQueue 336 | // add 1 time cold address access (2600 gas) for L1MessageQueue 337 | // minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas) 338 | totalL1CommitGas += (coldSloadGas + coldAddressAccessGas - warmSloadGas - warmAddressAccessGas) 339 | totalL1CommitGas += getKeccak256Gas(daBatchV0EncodedMinLength + skippedL1MessageBitmapByteSize) // parent batch header hash, length is estimated as (constant part) + (1 skippedL1MessageBitmap) 340 | totalL1CommitGas += calldataNonZeroByteGas * (daBatchV0EncodedMinLength + skippedL1MessageBitmapByteSize) // parent batch header in calldata 341 | 342 | // adjust batch data hash gas cost 343 | totalL1CommitGas += getKeccak256Gas(uint64(common.HashLength * len(b.Chunks))) 344 | 345 | totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore 346 | 347 | for _, chunk := range b.Chunks { 348 | chunkL1CommitGas, err := d.EstimateChunkL1CommitGas(chunk) 349 | if err != nil { 350 | return 0, err 351 | } 352 | totalL1CommitGas += chunkL1CommitGas 353 | 354 | totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore) 355 | totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk 356 | 357 | totalL1CommitGas += calldataNonZeroByteGas * (skippedL1MessageBitmapByteSize * (totalL1MessagePoppedInChunk + 255) / 256) 358 | totalL1CommitGas += getKeccak256Gas(daBatchV0EncodedMinLength + skippedL1MessageBitmapByteSize*(totalL1MessagePoppedInChunk+255)/256) 359 | 360 | chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk) 361 | if err != nil { 362 | return 0, err 363 | } 364 | totalL1CommitGas += getMemoryExpansionCost(chunkL1CommitCalldataSize) 365 | } 366 | 367 | return totalL1CommitGas, nil 368 | } 369 | 370 | // EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately. 371 | func (d *DACodecV0) EstimateBatchL1CommitCalldataSize(b *Batch) (uint64, error) { 372 | var totalL1CommitCalldataSize uint64 373 | for _, chunk := range b.Chunks { 374 | chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk) 375 | if err != nil { 376 | return 0, err 377 | } 378 | totalL1CommitCalldataSize += chunkL1CommitCalldataSize 379 | } 380 | return totalL1CommitCalldataSize, nil 381 | } 382 | 383 | // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. 384 | func (d *DACodecV0) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { 385 | return true, nil 386 | } 387 | 388 | // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. 389 | func (d *DACodecV0) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { 390 | return true, nil 391 | } 392 | 393 | // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. 394 | func (d *DACodecV0) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { 395 | return 0, 0, nil 396 | } 397 | 398 | // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. 399 | func (d *DACodecV0) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { 400 | return 0, 0, nil 401 | } 402 | 403 | // JSONFromBytes for CodecV0 returns empty values. 404 | func (c *DACodecV0) JSONFromBytes(data []byte) ([]byte, error) { 405 | // DACodecV0 doesn't need this, so just return empty values 406 | return nil, nil 407 | } 408 | 409 | // computeBatchDataHash computes the data hash of the batch. 410 | // Note: The batch hash and batch data hash are two different hashes, 411 | // the former is used for identifying a batch in the contracts, 412 | // the latter is used in the public input to the provers. 413 | func (d *DACodecV0) computeBatchDataHash(chunks []*Chunk, totalL1MessagePoppedBefore uint64) (common.Hash, error) { 414 | dataBytes := make([]byte, 0, len(chunks)*common.HashLength) 415 | totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore 416 | 417 | for _, chunk := range chunks { 418 | daChunk, err := d.NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk) 419 | if err != nil { 420 | return common.Hash{}, err 421 | } 422 | totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk) 423 | chunkHash, err := daChunk.Hash() 424 | if err != nil { 425 | return common.Hash{}, err 426 | } 427 | dataBytes = append(dataBytes, chunkHash.Bytes()...) 428 | } 429 | 430 | dataHash := crypto.Keccak256Hash(dataBytes) 431 | return dataHash, nil 432 | } 433 | -------------------------------------------------------------------------------- /encoding/codecv0_types.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "math" 8 | "math/big" 9 | 10 | "github.com/scroll-tech/go-ethereum/common" 11 | "github.com/scroll-tech/go-ethereum/core/types" 12 | "github.com/scroll-tech/go-ethereum/crypto" 13 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 14 | ) 15 | 16 | const ( 17 | numberOffset = 0 18 | timestampOffset = numberOffset + 8 19 | baseFeeOffset = timestampOffset + 8 20 | gasLimitOffset = baseFeeOffset + 32 21 | numTransactionsOffset = gasLimitOffset + 8 22 | numL1MessagesOffset = numTransactionsOffset + 2 23 | ) 24 | 25 | // daBlockV0 represents a Data Availability Block. 26 | type daBlockV0 struct { 27 | number uint64 28 | timestamp uint64 29 | baseFee *big.Int 30 | gasLimit uint64 31 | numTransactions uint16 32 | numL1Messages uint16 33 | } 34 | 35 | // newDABlockV0 is a constructor function for daBlockV0 that initializes the internal fields. 36 | func newDABlockV0(number uint64, timestamp uint64, baseFee *big.Int, gasLimit uint64, numTransactions uint16, numL1Messages uint16) *daBlockV0 { 37 | return &daBlockV0{ 38 | number: number, 39 | timestamp: timestamp, 40 | baseFee: baseFee, 41 | gasLimit: gasLimit, 42 | numTransactions: numTransactions, 43 | numL1Messages: numL1Messages, 44 | } 45 | } 46 | 47 | // Encode serializes the DABlock into a slice of bytes. 48 | func (b *daBlockV0) Encode() []byte { 49 | bytes := make([]byte, blockContextByteSize) 50 | binary.BigEndian.PutUint64(bytes[numberOffset:timestampOffset], b.number) 51 | binary.BigEndian.PutUint64(bytes[timestampOffset:baseFeeOffset], b.timestamp) 52 | if b.baseFee != nil { 53 | b.baseFee.FillBytes(bytes[baseFeeOffset:gasLimitOffset]) 54 | } 55 | binary.BigEndian.PutUint64(bytes[gasLimitOffset:numTransactionsOffset], b.gasLimit) 56 | binary.BigEndian.PutUint16(bytes[numTransactionsOffset:numL1MessagesOffset], b.numTransactions) 57 | binary.BigEndian.PutUint16(bytes[numL1MessagesOffset:], b.numL1Messages) 58 | return bytes 59 | } 60 | 61 | // Decode populates the fields of a DABlock from a byte slice. 62 | func (b *daBlockV0) Decode(bytes []byte) error { 63 | if len(bytes) != blockContextByteSize { 64 | return errors.New("block encoding is not blockContextByteSize bytes long") 65 | } 66 | 67 | b.number = binary.BigEndian.Uint64(bytes[numberOffset:timestampOffset]) 68 | b.timestamp = binary.BigEndian.Uint64(bytes[timestampOffset:baseFeeOffset]) 69 | b.baseFee = new(big.Int).SetBytes(bytes[baseFeeOffset:gasLimitOffset]) 70 | b.gasLimit = binary.BigEndian.Uint64(bytes[gasLimitOffset:numTransactionsOffset]) 71 | b.numTransactions = binary.BigEndian.Uint16(bytes[numTransactionsOffset:numL1MessagesOffset]) 72 | b.numL1Messages = binary.BigEndian.Uint16(bytes[numL1MessagesOffset:]) 73 | 74 | return nil 75 | } 76 | 77 | // Number returns the block number. 78 | func (b *daBlockV0) Number() uint64 { 79 | return b.number 80 | } 81 | 82 | // Timestamp returns the block timestamp. 83 | func (b *daBlockV0) Timestamp() uint64 { 84 | return b.timestamp 85 | } 86 | 87 | // BaseFee returns the block base fee. 88 | func (b *daBlockV0) BaseFee() *big.Int { 89 | return b.baseFee 90 | } 91 | 92 | // GasLimit returns the block gas limit. 93 | func (b *daBlockV0) GasLimit() uint64 { 94 | return b.gasLimit 95 | } 96 | 97 | // NumTransactions returns the number of transactions in the block. 98 | func (b *daBlockV0) NumTransactions() uint16 { 99 | return b.numTransactions 100 | } 101 | 102 | // NumL1Messages returns the number of L1 messages in the block. 103 | func (b *daBlockV0) NumL1Messages() uint16 { 104 | return b.numL1Messages 105 | } 106 | 107 | // DAChunkRawTx groups consecutive DABlocks with their L2 transactions, L1 msgs are loaded in another place. 108 | type DAChunkRawTx struct { 109 | Blocks []DABlock 110 | Transactions []types.Transactions 111 | } 112 | 113 | // daChunkV0 groups consecutive DABlocks with their transactions. 114 | type daChunkV0 struct { 115 | blocks []DABlock 116 | transactions [][]*types.TransactionData 117 | } 118 | 119 | // Encode serializes the DAChunk into a slice of bytes. 120 | func (c *daChunkV0) Encode() ([]byte, error) { 121 | if len(c.blocks) == 0 { 122 | return nil, errors.New("number of blocks is 0") 123 | } 124 | 125 | if len(c.blocks) > math.MaxUint8 { 126 | return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(c.blocks), math.MaxUint8) 127 | } 128 | 129 | var chunkBytes []byte 130 | chunkBytes = append(chunkBytes, byte(len(c.blocks))) 131 | 132 | var l2TxDataBytes []byte 133 | 134 | for _, block := range c.blocks { 135 | chunkBytes = append(chunkBytes, block.Encode()...) 136 | } 137 | 138 | for _, blockTxs := range c.transactions { 139 | for _, txData := range blockTxs { 140 | if txData.Type == types.L1MessageTxType { 141 | continue 142 | } 143 | 144 | var txLen [4]byte 145 | rlpTxData, err := convertTxDataToRLPEncoding(txData) 146 | if err != nil { 147 | return nil, fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 148 | } 149 | binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData))) 150 | l2TxDataBytes = append(l2TxDataBytes, txLen[:]...) 151 | l2TxDataBytes = append(l2TxDataBytes, rlpTxData...) 152 | } 153 | } 154 | 155 | chunkBytes = append(chunkBytes, l2TxDataBytes...) 156 | return chunkBytes, nil 157 | } 158 | 159 | // Hash computes the hash of the DAChunk data. 160 | func (c *daChunkV0) Hash() (common.Hash, error) { 161 | chunkBytes, err := c.Encode() 162 | if err != nil { 163 | return common.Hash{}, fmt.Errorf("failed to encode DAChunk: %w", err) 164 | } 165 | 166 | if len(chunkBytes) == 0 { 167 | return common.Hash{}, errors.New("chunk data is empty and cannot be processed") 168 | } 169 | numBlocks := chunkBytes[0] 170 | 171 | // concatenate block contexts 172 | var dataBytes []byte 173 | for i := 0; i < int(numBlocks); i++ { 174 | start := 1 + blockContextByteSize*i 175 | end := start + blockContextBytesForHashing 176 | if end > len(chunkBytes) { 177 | return common.Hash{}, fmt.Errorf("unexpected end index: %d, chunkBytes length: %d", end, len(chunkBytes)) 178 | } 179 | dataBytes = append(dataBytes, chunkBytes[start:end]...) 180 | } 181 | 182 | // concatenate l1 and l2 tx hashes 183 | for _, blockTxs := range c.transactions { 184 | var l1TxHashes []byte 185 | var l2TxHashes []byte 186 | for _, txData := range blockTxs { 187 | hashBytes := common.FromHex(txData.TxHash) 188 | if len(hashBytes) != common.HashLength { 189 | return common.Hash{}, fmt.Errorf("unexpected hash: %s", txData.TxHash) 190 | } 191 | if txData.Type == types.L1MessageTxType { 192 | l1TxHashes = append(l1TxHashes, hashBytes...) 193 | } else { 194 | l2TxHashes = append(l2TxHashes, hashBytes...) 195 | } 196 | } 197 | dataBytes = append(dataBytes, l1TxHashes...) 198 | dataBytes = append(dataBytes, l2TxHashes...) 199 | } 200 | 201 | hash := crypto.Keccak256Hash(dataBytes) 202 | return hash, nil 203 | } 204 | 205 | // BlockRange returns the block range of the DAChunk. 206 | func (c *daChunkV0) BlockRange() (uint64, uint64, error) { 207 | if len(c.blocks) == 0 { 208 | return 0, 0, errors.New("number of blocks is 0") 209 | } 210 | 211 | return c.blocks[0].Number(), c.blocks[len(c.blocks)-1].Number(), nil 212 | } 213 | 214 | // daBatchV0 contains metadata about a batch of DAChunks. 215 | type daBatchV0 struct { 216 | version CodecVersion 217 | batchIndex uint64 218 | l1MessagePopped uint64 219 | totalL1MessagePopped uint64 220 | dataHash common.Hash 221 | parentBatchHash common.Hash 222 | skippedL1MessageBitmap []byte 223 | } 224 | 225 | // newDABatchV0 is a constructor for daBatchV0. 226 | func newDABatchV0(version CodecVersion, batchIndex, l1MessagePopped, totalL1MessagePopped uint64, dataHash, parentBatchHash common.Hash, skippedL1MessageBitmap []byte) *daBatchV0 { 227 | return &daBatchV0{ 228 | version: version, 229 | batchIndex: batchIndex, 230 | l1MessagePopped: l1MessagePopped, 231 | totalL1MessagePopped: totalL1MessagePopped, 232 | dataHash: dataHash, 233 | parentBatchHash: parentBatchHash, 234 | skippedL1MessageBitmap: skippedL1MessageBitmap, 235 | } 236 | } 237 | 238 | // Encode serializes the DABatchV0 into bytes. 239 | func (b *daBatchV0) Encode() []byte { 240 | batchBytes := make([]byte, daBatchV0EncodedMinLength+len(b.skippedL1MessageBitmap)) 241 | batchBytes[daBatchOffsetVersion] = byte(b.version) 242 | binary.BigEndian.PutUint64(batchBytes[daBatchOffsetBatchIndex:daBatchV0OffsetL1MessagePopped], b.batchIndex) 243 | binary.BigEndian.PutUint64(batchBytes[daBatchV0OffsetL1MessagePopped:daBatchV0OffsetTotalL1MessagePopped], b.l1MessagePopped) 244 | binary.BigEndian.PutUint64(batchBytes[daBatchV0OffsetTotalL1MessagePopped:daBatchOffsetDataHash], b.totalL1MessagePopped) 245 | copy(batchBytes[daBatchOffsetDataHash:daBatchV0OffsetParentBatchHash], b.dataHash[:]) 246 | copy(batchBytes[daBatchV0OffsetParentBatchHash:daBatchV0OffsetSkippedL1MessageBitmap], b.parentBatchHash[:]) 247 | copy(batchBytes[daBatchV0OffsetSkippedL1MessageBitmap:], b.skippedL1MessageBitmap[:]) 248 | return batchBytes 249 | } 250 | 251 | // Hash computes the hash of the serialized DABatch. 252 | func (b *daBatchV0) Hash() common.Hash { 253 | bytes := b.Encode() 254 | return crypto.Keccak256Hash(bytes) 255 | } 256 | 257 | // Blob returns the blob of the batch. 258 | func (b *daBatchV0) Blob() *kzg4844.Blob { 259 | return nil 260 | } 261 | 262 | // BlobBytes returns the blob bytes of the batch. 263 | func (b *daBatchV0) BlobBytes() []byte { 264 | return nil 265 | } 266 | 267 | // BlobDataProofForPointEvaluation computes the abi-encoded blob verification data. 268 | func (b *daBatchV0) BlobDataProofForPointEvaluation() ([]byte, error) { 269 | return nil, nil 270 | } 271 | 272 | // Version returns the version of the DABatch. 273 | func (b *daBatchV0) Version() CodecVersion { 274 | return b.version 275 | } 276 | 277 | // SkippedL1MessageBitmap returns the skipped L1 message bitmap of the DABatch. 278 | func (b *daBatchV0) SkippedL1MessageBitmap() []byte { 279 | return b.skippedL1MessageBitmap 280 | } 281 | 282 | // DataHash returns the data hash of the DABatch. 283 | func (b *daBatchV0) DataHash() common.Hash { 284 | return b.dataHash 285 | } 286 | 287 | // ChallengeDigest returns the challenge digest of the DABatch. 288 | func (b *daBatchV0) ChallengeDigest() common.Hash { 289 | return common.Hash{} 290 | } 291 | -------------------------------------------------------------------------------- /encoding/codecv1.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "math" 9 | "math/big" 10 | 11 | "github.com/scroll-tech/go-ethereum/common" 12 | "github.com/scroll-tech/go-ethereum/core/types" 13 | "github.com/scroll-tech/go-ethereum/crypto" 14 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 15 | ) 16 | 17 | type DACodecV1 struct { 18 | DACodecV0 19 | } 20 | 21 | // Version returns the codec version. 22 | func (d *DACodecV1) Version() CodecVersion { 23 | return CodecV1 24 | } 25 | 26 | // NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before. 27 | func (d *DACodecV1) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64) (DAChunk, error) { 28 | if chunk == nil { 29 | return nil, errors.New("chunk is nil") 30 | } 31 | 32 | if len(chunk.Blocks) == 0 { 33 | return nil, errors.New("number of blocks is 0") 34 | } 35 | 36 | if len(chunk.Blocks) > math.MaxUint8 { 37 | return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(chunk.Blocks), math.MaxUint8) 38 | } 39 | 40 | blocks := make([]DABlock, 0, len(chunk.Blocks)) 41 | txs := make([][]*types.TransactionData, 0, len(chunk.Blocks)) 42 | 43 | for _, block := range chunk.Blocks { 44 | b, err := d.NewDABlock(block, totalL1MessagePoppedBefore) 45 | if err != nil { 46 | return nil, err 47 | } 48 | blocks = append(blocks, b) 49 | totalL1MessagePoppedBefore += block.NumL1Messages(totalL1MessagePoppedBefore) 50 | txs = append(txs, block.Transactions) 51 | } 52 | 53 | daChunk := newDAChunkV1( 54 | blocks, // blocks 55 | txs, // transactions 56 | ) 57 | 58 | return daChunk, nil 59 | } 60 | 61 | // DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx. 62 | // Beginning from codecv1 tx data posted to blobs, not to chunk bytes in calldata 63 | func (d *DACodecV1) DecodeDAChunksRawTx(chunkBytes [][]byte) ([]*DAChunkRawTx, error) { 64 | chunks := make([]*DAChunkRawTx, 0, len(chunkBytes)) 65 | for _, chunk := range chunkBytes { 66 | if len(chunk) < 1 { 67 | return nil, fmt.Errorf("invalid chunk, length is less than 1") 68 | } 69 | 70 | numBlocks := int(chunk[0]) 71 | if len(chunk) < 1+numBlocks*blockContextByteSize { 72 | return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*blockContextByteSize) 73 | } 74 | 75 | blocks := make([]DABlock, numBlocks) 76 | for i := 0; i < numBlocks; i++ { 77 | startIdx := 1 + i*blockContextByteSize // add 1 to skip numBlocks byte 78 | endIdx := startIdx + blockContextByteSize 79 | blocks[i] = &daBlockV0{} 80 | err := blocks[i].Decode(chunk[startIdx:endIdx]) 81 | if err != nil { 82 | return nil, err 83 | } 84 | } 85 | 86 | chunks = append(chunks, &DAChunkRawTx{ 87 | Blocks: blocks, 88 | Transactions: nil, // Transactions field is still empty in the phase of DecodeDAChunksRawTx, because txs moved to blobs and filled in DecodeTxsFromBlob method. 89 | }) 90 | } 91 | return chunks, nil 92 | } 93 | 94 | // DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks 95 | func (d *DACodecV1) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { 96 | batchBytes := bytesFromBlobCanonical(blob) 97 | return decodeTxsFromBytes(batchBytes[:], chunks, d.MaxNumChunksPerBatch()) 98 | } 99 | 100 | // NewDABatch creates a DABatch from the provided Batch. 101 | func (d *DACodecV1) NewDABatch(batch *Batch) (DABatch, error) { 102 | // this encoding can only support a fixed number of chunks per batch 103 | if len(batch.Chunks) > d.MaxNumChunksPerBatch() { 104 | return nil, fmt.Errorf("too many chunks in batch: got %d, maximum allowed is %d", len(batch.Chunks), d.MaxNumChunksPerBatch()) 105 | } 106 | 107 | if len(batch.Chunks) == 0 { 108 | return nil, errors.New("batch must contain at least one chunk") 109 | } 110 | 111 | // batch data hash 112 | dataHash, err := d.computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore) 113 | if err != nil { 114 | return nil, fmt.Errorf("failed to compute batch data hash, index: %d, err: %w", batch.Index, err) 115 | } 116 | 117 | // skipped L1 messages bitmap 118 | skippedL1MessageBitmap, totalL1MessagePoppedAfter, err := constructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) 119 | if err != nil { 120 | return nil, fmt.Errorf("failed to construct skipped bitmap, index: %d, err: %w", batch.Index, err) 121 | } 122 | 123 | // blob payload 124 | blob, blobVersionedHash, z, err := d.constructBlobPayload(batch.Chunks, d.MaxNumChunksPerBatch()) 125 | if err != nil { 126 | return nil, fmt.Errorf("failed to construct blob payload, index: %d, err: %w", batch.Index, err) 127 | } 128 | 129 | if totalL1MessagePoppedAfter < batch.TotalL1MessagePoppedBefore { 130 | return nil, fmt.Errorf("batch index: %d, totalL1MessagePoppedAfter (%d) is less than batch.TotalL1MessagePoppedBefore (%d)", batch.Index, totalL1MessagePoppedAfter, batch.TotalL1MessagePoppedBefore) 131 | } 132 | l1MessagePopped := totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore 133 | 134 | daBatch := newDABatchV1( 135 | CodecV1, // version 136 | batch.Index, // batchIndex 137 | l1MessagePopped, // l1MessagePopped 138 | totalL1MessagePoppedAfter, // totalL1MessagePopped 139 | dataHash, // dataHash 140 | blobVersionedHash, // blobVersionedHash 141 | batch.ParentBatchHash, // parentBatchHash 142 | skippedL1MessageBitmap, // skippedL1MessageBitmap 143 | blob, // blob 144 | z, // z 145 | ) 146 | 147 | return daBatch, nil 148 | } 149 | 150 | // constructBlobPayload constructs the 4844 blob payload. 151 | func (d *DACodecV1) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch int) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) { 152 | // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) 153 | metadataLength := 2 + maxNumChunksPerBatch*4 154 | 155 | // the raw (un-padded) blob payload 156 | blobBytes := make([]byte, metadataLength) 157 | 158 | // challenge digest preimage 159 | // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash 160 | challengePreimage := make([]byte, (1+maxNumChunksPerBatch+1)*common.HashLength) 161 | 162 | // the chunk data hash used for calculating the challenge preimage 163 | var chunkDataHash common.Hash 164 | 165 | // blob metadata: num_chunks 166 | binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks))) 167 | 168 | // encode blob metadata and L2 transactions, 169 | // and simultaneously also build challenge preimage 170 | for chunkID, chunk := range chunks { 171 | currentChunkStartIndex := len(blobBytes) 172 | 173 | for _, block := range chunk.Blocks { 174 | for _, tx := range block.Transactions { 175 | if tx.Type == types.L1MessageTxType { 176 | continue 177 | } 178 | 179 | // encode L2 txs into blob payload 180 | rlpTxData, err := convertTxDataToRLPEncoding(tx) 181 | if err != nil { 182 | return nil, common.Hash{}, nil, fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 183 | } 184 | blobBytes = append(blobBytes, rlpTxData...) 185 | } 186 | } 187 | 188 | // blob metadata: chunki_size 189 | chunkSize := len(blobBytes) - currentChunkStartIndex 190 | binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize)) 191 | 192 | // challenge: compute chunk data hash 193 | chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:]) 194 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 195 | } 196 | 197 | // if we have fewer than maxNumChunksPerBatch chunks, the rest 198 | // of the blob metadata is correctly initialized to 0, 199 | // but we need to add padding to the challenge preimage 200 | for chunkID := len(chunks); chunkID < maxNumChunksPerBatch; chunkID++ { 201 | // use the last chunk's data hash as padding 202 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 203 | } 204 | 205 | // challenge: compute metadata hash 206 | hash := crypto.Keccak256Hash(blobBytes[0:metadataLength]) 207 | copy(challengePreimage[0:], hash[:]) 208 | 209 | // convert raw data to BLSFieldElements 210 | blob, err := makeBlobCanonical(blobBytes) 211 | if err != nil { 212 | return nil, common.Hash{}, nil, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) 213 | } 214 | 215 | // compute blob versioned hash 216 | c, err := kzg4844.BlobToCommitment(blob) 217 | if err != nil { 218 | return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment: %w", err) 219 | } 220 | blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) 221 | 222 | // challenge: append blob versioned hash 223 | copy(challengePreimage[(1+maxNumChunksPerBatch)*common.HashLength:], blobVersionedHash[:]) 224 | 225 | // compute z = challenge_digest % BLS_MODULUS 226 | challengeDigest := crypto.Keccak256Hash(challengePreimage) 227 | pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), blsModulus) 228 | pointBytes := pointBigInt.Bytes() 229 | 230 | // the challenge point z 231 | var z kzg4844.Point 232 | if len(pointBytes) > kzgPointByteSize { 233 | return nil, common.Hash{}, nil, fmt.Errorf("pointBytes length exceeds %d bytes, got %d bytes", kzgPointByteSize, len(pointBytes)) 234 | } 235 | start := kzgPointByteSize - len(pointBytes) 236 | copy(z[start:], pointBytes) 237 | 238 | return blob, blobVersionedHash, &z, nil 239 | } 240 | 241 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 242 | // Note: This function only populates the batch header, it leaves the blob-related fields empty. 243 | func (d *DACodecV1) NewDABatchFromBytes(data []byte) (DABatch, error) { 244 | if len(data) < daBatchV1EncodedMinLength { 245 | return nil, fmt.Errorf("insufficient data for DABatch, expected at least %d bytes but got %d", daBatchV1EncodedMinLength, len(data)) 246 | } 247 | 248 | if CodecVersion(data[daBatchOffsetVersion]) != CodecV1 { 249 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV1, data[daBatchOffsetVersion]) 250 | } 251 | 252 | return newDABatchV1( 253 | CodecVersion(data[daBatchOffsetVersion]), // version 254 | binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV1OffsetL1MessagePopped]), // batchIndex 255 | binary.BigEndian.Uint64(data[daBatchV1OffsetL1MessagePopped:daBatchV1OffsetTotalL1MessagePopped]), // l1MessagePopped 256 | binary.BigEndian.Uint64(data[daBatchV1OffsetTotalL1MessagePopped:daBatchOffsetDataHash]), // totalL1MessagePopped 257 | common.BytesToHash(data[daBatchOffsetDataHash:daBatchV1OffsetBlobVersionedHash]), // dataHash 258 | common.BytesToHash(data[daBatchV1OffsetBlobVersionedHash:daBatchV1OffsetParentBatchHash]), // blobVersionedHash 259 | common.BytesToHash(data[daBatchV1OffsetParentBatchHash:daBatchV1OffsetSkippedL1MessageBitmap]), // parentBatchHash 260 | data[daBatchV1OffsetSkippedL1MessageBitmap:], // skippedL1MessageBitmap 261 | nil, // blob 262 | nil, // z 263 | ), nil 264 | } 265 | 266 | func (d *DACodecV1) chunkL1CommitBlobDataSize(c *Chunk) (uint64, error) { 267 | var dataSize uint64 268 | for _, block := range c.Blocks { 269 | for _, tx := range block.Transactions { 270 | if tx.Type == types.L1MessageTxType { 271 | continue 272 | } 273 | 274 | rlpTxData, err := convertTxDataToRLPEncoding(tx) 275 | if err != nil { 276 | return 0, fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 277 | } 278 | dataSize += uint64(len(rlpTxData)) 279 | } 280 | } 281 | return dataSize, nil 282 | } 283 | 284 | // EstimateBlockL1CommitGas calculates the total L1 commit gas for this block approximately. 285 | func (d *DACodecV1) EstimateBlockL1CommitGas(b *Block) (uint64, error) { 286 | var total uint64 287 | var numL1Messages uint64 288 | for _, txData := range b.Transactions { 289 | if txData.Type == types.L1MessageTxType { 290 | numL1Messages++ 291 | continue 292 | } 293 | } 294 | 295 | total += calldataNonZeroByteGas * blockContextByteSize 296 | 297 | // sload 298 | total += coldSloadGas * numL1Messages // numL1Messages times cold sload in L1MessageQueue 299 | 300 | // staticcall 301 | total += warmAddressAccessGas * numL1Messages // numL1Messages times call to L1MessageQueue 302 | total += warmAddressAccessGas * numL1Messages // numL1Messages times warm address access to L1MessageQueue 303 | 304 | total += getMemoryExpansionCost(functionSignatureBytes+defaultParameterBytes) * numL1Messages // staticcall to proxy 305 | total += warmAddressAccessGas * numL1Messages // read admin in proxy 306 | total += warmAddressAccessGas * numL1Messages // read impl in proxy 307 | total += warmAddressAccessGas * numL1Messages // access impl 308 | total += getMemoryExpansionCost(functionSignatureBytes+defaultParameterBytes) * numL1Messages // delegatecall to impl 309 | 310 | return total, nil 311 | } 312 | 313 | // EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately. 314 | func (d *DACodecV1) EstimateChunkL1CommitGas(c *Chunk) (uint64, error) { 315 | var totalNonSkippedL1Messages uint64 316 | var totalL1CommitGas uint64 317 | for _, block := range c.Blocks { 318 | transactions := uint64(len(block.Transactions)) 319 | l2Transactions := block.NumL2Transactions() 320 | if transactions < l2Transactions { 321 | return 0, fmt.Errorf("number of L2 transactions (%d) exceeds total transactions (%d)", l2Transactions, transactions) 322 | } 323 | totalNonSkippedL1Messages += transactions - l2Transactions 324 | blockL1CommitGas, err := d.EstimateBlockL1CommitGas(block) 325 | if err != nil { 326 | return 0, err 327 | } 328 | totalL1CommitGas += blockL1CommitGas 329 | } 330 | 331 | numBlocks := uint64(len(c.Blocks)) 332 | totalL1CommitGas += warmSloadGas * numBlocks // numBlocks times warm sload 333 | totalL1CommitGas += calldataNonZeroByteGas // numBlocks field of chunk encoding in calldata 334 | 335 | totalL1CommitGas += getKeccak256Gas(58*numBlocks + common.HashLength*totalNonSkippedL1Messages) // chunk hash 336 | return totalL1CommitGas, nil 337 | } 338 | 339 | // EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately. 340 | func (d *DACodecV1) EstimateBatchL1CommitGas(b *Batch) (uint64, error) { 341 | var totalL1CommitGas uint64 342 | 343 | // Add extra gas costs 344 | totalL1CommitGas += extraGasCost // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc 345 | totalL1CommitGas += 4 * coldSloadGas // 4 one-time cold sload for commitBatch 346 | totalL1CommitGas += sstoreGas // 1 time sstore 347 | totalL1CommitGas += baseTxGas // base gas for tx 348 | totalL1CommitGas += calldataNonZeroByteGas // version in calldata 349 | 350 | // adjusting gas: 351 | // add 1 time cold sload (2100 gas) for L1MessageQueue 352 | // add 1 time cold address access (2600 gas) for L1MessageQueue 353 | // minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas) 354 | totalL1CommitGas += (coldSloadGas + coldAddressAccessGas - warmSloadGas - warmAddressAccessGas) 355 | totalL1CommitGas += getKeccak256Gas(daBatchV0EncodedMinLength + skippedL1MessageBitmapByteSize) // parent batch header hash, length is estimated as (constant part) + (1 skippedL1MessageBitmap) 356 | totalL1CommitGas += calldataNonZeroByteGas * (daBatchV0EncodedMinLength + skippedL1MessageBitmapByteSize) // parent batch header in calldata 357 | 358 | // adjust batch data hash gas cost 359 | totalL1CommitGas += getKeccak256Gas(uint64(common.HashLength * len(b.Chunks))) 360 | 361 | totalL1MessagePoppedBefore := b.TotalL1MessagePoppedBefore 362 | 363 | for _, chunk := range b.Chunks { 364 | chunkL1CommitGas, err := d.EstimateChunkL1CommitGas(chunk) 365 | if err != nil { 366 | return 0, err 367 | } 368 | totalL1CommitGas += chunkL1CommitGas 369 | 370 | totalL1MessagePoppedInChunk := chunk.NumL1Messages(totalL1MessagePoppedBefore) 371 | totalL1MessagePoppedBefore += totalL1MessagePoppedInChunk 372 | 373 | totalL1CommitGas += calldataNonZeroByteGas * (skippedL1MessageBitmapByteSize * (totalL1MessagePoppedInChunk + 255) / 256) 374 | totalL1CommitGas += getKeccak256Gas(daBatchV3OffsetParentBatchHash + skippedL1MessageBitmapByteSize*(totalL1MessagePoppedInChunk+255)/256) 375 | 376 | chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk) 377 | if err != nil { 378 | return 0, err 379 | } 380 | totalL1CommitGas += getMemoryExpansionCost(chunkL1CommitCalldataSize) 381 | } 382 | 383 | return totalL1CommitGas, nil 384 | } 385 | 386 | // EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately. 387 | func (d *DACodecV1) EstimateBlockL1CommitCalldataSize(_ *Block) (uint64, error) { 388 | return blockContextByteSize, nil 389 | } 390 | 391 | // EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately. 392 | func (d *DACodecV1) EstimateChunkL1CommitCalldataSize(c *Chunk) (uint64, error) { 393 | return uint64(blockContextByteSize * len(c.Blocks)), nil 394 | } 395 | 396 | // EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately. 397 | func (d *DACodecV1) EstimateBatchL1CommitCalldataSize(b *Batch) (uint64, error) { 398 | var totalL1CommitCalldataSize uint64 399 | for _, chunk := range b.Chunks { 400 | chunkL1CommitCalldataSize, err := d.EstimateChunkL1CommitCalldataSize(chunk) 401 | if err != nil { 402 | return 0, err 403 | } 404 | totalL1CommitCalldataSize += chunkL1CommitCalldataSize 405 | } 406 | return totalL1CommitCalldataSize, nil 407 | } 408 | 409 | // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. 410 | func (d *DACodecV1) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { 411 | metadataSize := uint64(2 + 4*d.MaxNumChunksPerBatch()) 412 | batchDataSize, err := d.chunkL1CommitBlobDataSize(c) 413 | if err != nil { 414 | return 0, 0, err 415 | } 416 | blobSize := calculatePaddedBlobSize(metadataSize + batchDataSize) 417 | return metadataSize + batchDataSize, blobSize, nil 418 | } 419 | 420 | // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. 421 | func (d *DACodecV1) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { 422 | metadataSize := uint64(2 + 4*d.MaxNumChunksPerBatch()) 423 | var batchDataSize uint64 424 | for _, c := range b.Chunks { 425 | chunkDataSize, err := d.chunkL1CommitBlobDataSize(c) 426 | if err != nil { 427 | return 0, 0, err 428 | } 429 | batchDataSize += chunkDataSize 430 | } 431 | blobSize := calculatePaddedBlobSize(metadataSize + batchDataSize) 432 | return metadataSize + batchDataSize, blobSize, nil 433 | } 434 | 435 | // computeBatchDataHash computes the data hash of the batch. 436 | // Note: The batch hash and batch data hash are two different hashes, 437 | // the former is used for identifying a batch in the contracts, 438 | // the latter is used in the public input to the provers. 439 | func (d *DACodecV1) computeBatchDataHash(chunks []*Chunk, totalL1MessagePoppedBefore uint64) (common.Hash, error) { 440 | dataBytes := make([]byte, 0, len(chunks)*common.HashLength) 441 | totalL1MessagePoppedBeforeChunk := totalL1MessagePoppedBefore 442 | 443 | for _, chunk := range chunks { 444 | daChunk, err := d.NewDAChunk(chunk, totalL1MessagePoppedBeforeChunk) 445 | if err != nil { 446 | return common.Hash{}, err 447 | } 448 | totalL1MessagePoppedBeforeChunk += chunk.NumL1Messages(totalL1MessagePoppedBeforeChunk) 449 | chunkHash, err := daChunk.Hash() 450 | if err != nil { 451 | return common.Hash{}, err 452 | } 453 | dataBytes = append(dataBytes, chunkHash.Bytes()...) 454 | } 455 | 456 | dataHash := crypto.Keccak256Hash(dataBytes) 457 | return dataHash, nil 458 | } 459 | -------------------------------------------------------------------------------- /encoding/codecv1_types.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "errors" 7 | "fmt" 8 | 9 | "github.com/scroll-tech/go-ethereum/common" 10 | "github.com/scroll-tech/go-ethereum/core/types" 11 | "github.com/scroll-tech/go-ethereum/crypto" 12 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 13 | ) 14 | 15 | // daChunkV1 groups consecutive DABlocks with their transactions. 16 | type daChunkV1 daChunkV0 17 | 18 | // newDAChunkV1 is a constructor for daChunkV1, initializing with blocks and transactions. 19 | func newDAChunkV1(blocks []DABlock, transactions [][]*types.TransactionData) *daChunkV1 { 20 | return &daChunkV1{ 21 | blocks: blocks, 22 | transactions: transactions, 23 | } 24 | } 25 | 26 | // Encode serializes the DAChunk into a slice of bytes. 27 | func (c *daChunkV1) Encode() ([]byte, error) { 28 | var chunkBytes []byte 29 | chunkBytes = append(chunkBytes, byte(len(c.blocks))) 30 | 31 | for _, block := range c.blocks { 32 | blockBytes := block.Encode() 33 | chunkBytes = append(chunkBytes, blockBytes...) 34 | } 35 | 36 | return chunkBytes, nil 37 | } 38 | 39 | // Hash computes the hash of the DAChunk data. 40 | func (c *daChunkV1) Hash() (common.Hash, error) { 41 | var dataBytes []byte 42 | 43 | // concatenate block contexts 44 | for _, block := range c.blocks { 45 | encodedBlock := block.Encode() 46 | dataBytes = append(dataBytes, encodedBlock[:blockContextBytesForHashing]...) 47 | } 48 | 49 | // concatenate l1 tx hashes 50 | for _, blockTxs := range c.transactions { 51 | for _, txData := range blockTxs { 52 | if txData.Type != types.L1MessageTxType { 53 | continue 54 | } 55 | 56 | hashBytes := common.FromHex(txData.TxHash) 57 | if len(hashBytes) != common.HashLength { 58 | return common.Hash{}, fmt.Errorf("unexpected hash: %s", txData.TxHash) 59 | } 60 | dataBytes = append(dataBytes, hashBytes...) 61 | } 62 | } 63 | 64 | hash := crypto.Keccak256Hash(dataBytes) 65 | return hash, nil 66 | } 67 | 68 | // BlockRange returns the block range of the DAChunk. 69 | func (c *daChunkV1) BlockRange() (uint64, uint64, error) { 70 | if len(c.blocks) == 0 { 71 | return 0, 0, errors.New("number of blocks is 0") 72 | } 73 | 74 | return c.blocks[0].Number(), c.blocks[len(c.blocks)-1].Number(), nil 75 | } 76 | 77 | // daBatchV1 contains metadata about a batch of DAChunks. 78 | type daBatchV1 struct { 79 | daBatchV0 80 | 81 | blobVersionedHash common.Hash 82 | blob *kzg4844.Blob 83 | z *kzg4844.Point 84 | } 85 | 86 | // newDABatchV1 is a constructor for daBatchV1. 87 | func newDABatchV1(version CodecVersion, batchIndex, l1MessagePopped, totalL1MessagePopped uint64, dataHash, blobVersionedHash, parentBatchHash common.Hash, skippedL1MessageBitmap []byte, blob *kzg4844.Blob, z *kzg4844.Point) *daBatchV1 { 88 | return &daBatchV1{ 89 | daBatchV0: daBatchV0{ 90 | version: version, 91 | batchIndex: batchIndex, 92 | l1MessagePopped: l1MessagePopped, 93 | totalL1MessagePopped: totalL1MessagePopped, 94 | dataHash: dataHash, 95 | parentBatchHash: parentBatchHash, 96 | skippedL1MessageBitmap: skippedL1MessageBitmap, 97 | }, 98 | blobVersionedHash: blobVersionedHash, 99 | blob: blob, 100 | z: z, 101 | } 102 | } 103 | 104 | // Encode serializes the DABatchV1 into bytes. 105 | func (b *daBatchV1) Encode() []byte { 106 | batchBytes := make([]byte, daBatchV1EncodedMinLength+len(b.skippedL1MessageBitmap)) 107 | batchBytes[daBatchOffsetVersion] = byte(b.version) 108 | binary.BigEndian.PutUint64(batchBytes[daBatchOffsetBatchIndex:daBatchV1OffsetL1MessagePopped], b.batchIndex) 109 | binary.BigEndian.PutUint64(batchBytes[daBatchV1OffsetL1MessagePopped:daBatchV1OffsetTotalL1MessagePopped], b.l1MessagePopped) 110 | binary.BigEndian.PutUint64(batchBytes[daBatchV1OffsetTotalL1MessagePopped:daBatchOffsetDataHash], b.totalL1MessagePopped) 111 | copy(batchBytes[daBatchOffsetDataHash:daBatchV1OffsetBlobVersionedHash], b.dataHash[:]) 112 | copy(batchBytes[daBatchV1OffsetBlobVersionedHash:daBatchV1OffsetParentBatchHash], b.blobVersionedHash[:]) 113 | copy(batchBytes[daBatchV1OffsetParentBatchHash:daBatchV1OffsetSkippedL1MessageBitmap], b.parentBatchHash[:]) 114 | copy(batchBytes[daBatchV1OffsetSkippedL1MessageBitmap:], b.skippedL1MessageBitmap[:]) 115 | return batchBytes 116 | } 117 | 118 | // Hash computes the hash of the serialized DABatch. 119 | func (b *daBatchV1) Hash() common.Hash { 120 | bytes := b.Encode() 121 | return crypto.Keccak256Hash(bytes) 122 | } 123 | 124 | // Blob returns the blob of the batch. 125 | func (b *daBatchV1) Blob() *kzg4844.Blob { 126 | return b.blob 127 | } 128 | 129 | // BlobBytes returns the blob bytes of the batch. 130 | func (b *daBatchV1) BlobBytes() []byte { 131 | return nil 132 | } 133 | 134 | // BlobDataProofForPointEvaluation computes the abi-encoded blob verification data. 135 | func (b *daBatchV1) BlobDataProofForPointEvaluation() ([]byte, error) { 136 | if b.blob == nil { 137 | return nil, errors.New("called BlobDataProofForPointEvaluation with empty blob") 138 | } 139 | if b.z == nil { 140 | return nil, errors.New("called BlobDataProofForPointEvaluation with empty z") 141 | } 142 | 143 | commitment, err := kzg4844.BlobToCommitment(b.blob) 144 | if err != nil { 145 | return nil, fmt.Errorf("failed to create blob commitment: %w", err) 146 | } 147 | 148 | proof, y, err := kzg4844.ComputeProof(b.blob, *b.z) 149 | if err != nil { 150 | return nil, fmt.Errorf("failed to create KZG proof at point, err: %w, z: %v", err, hex.EncodeToString(b.z[:])) 151 | } 152 | 153 | return blobDataProofFromValues(*b.z, y, commitment, proof), nil 154 | } 155 | 156 | // Version returns the version of the DABatch. 157 | func (b *daBatchV1) Version() CodecVersion { 158 | return b.version 159 | } 160 | 161 | // SkippedL1MessageBitmap returns the skipped L1 message bitmap of the DABatch. 162 | func (b *daBatchV1) SkippedL1MessageBitmap() []byte { 163 | return b.skippedL1MessageBitmap 164 | } 165 | 166 | // DataHash returns the data hash of the DABatch. 167 | func (b *daBatchV1) DataHash() common.Hash { 168 | return b.dataHash 169 | } 170 | -------------------------------------------------------------------------------- /encoding/codecv2.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "errors" 8 | "fmt" 9 | "math/big" 10 | 11 | "github.com/scroll-tech/go-ethereum/common" 12 | "github.com/scroll-tech/go-ethereum/core/types" 13 | "github.com/scroll-tech/go-ethereum/crypto" 14 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 15 | "github.com/scroll-tech/go-ethereum/log" 16 | 17 | "github.com/scroll-tech/da-codec/encoding/zstd" 18 | ) 19 | 20 | type DACodecV2 struct { 21 | DACodecV1 22 | } 23 | 24 | // codecv2MaxNumChunks is the maximum number of chunks that a batch can contain. 25 | const codecv2MaxNumChunks = 45 26 | 27 | // Version returns the codec version. 28 | func (d *DACodecV2) Version() CodecVersion { 29 | return CodecV2 30 | } 31 | 32 | // MaxNumChunksPerBatch returns the maximum number of chunks per batch. 33 | func (d *DACodecV2) MaxNumChunksPerBatch() int { 34 | return codecv2MaxNumChunks 35 | } 36 | 37 | // DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks 38 | func (d *DACodecV2) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { 39 | compressedBytes := bytesFromBlobCanonical(blob) 40 | batchBytes, err := decompressScrollBlobToBatch(append(zstdMagicNumber, compressedBytes[:]...)) 41 | if err != nil { 42 | return err 43 | } 44 | return decodeTxsFromBytes(batchBytes, chunks, d.MaxNumChunksPerBatch()) 45 | } 46 | 47 | // NewDABatch creates a DABatch from the provided Batch. 48 | func (d *DACodecV2) NewDABatch(batch *Batch) (DABatch, error) { 49 | // this encoding can only support a fixed number of chunks per batch 50 | if len(batch.Chunks) > d.MaxNumChunksPerBatch() { 51 | return nil, fmt.Errorf("too many chunks in batch: got %d, maximum allowed is %d", len(batch.Chunks), d.MaxNumChunksPerBatch()) 52 | } 53 | 54 | if len(batch.Chunks) == 0 { 55 | return nil, errors.New("batch must contain at least one chunk") 56 | } 57 | 58 | // batch data hash 59 | dataHash, err := d.computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore) 60 | if err != nil { 61 | return nil, fmt.Errorf("failed to compute batch data hash, index: %d, err: %w", batch.Index, err) 62 | } 63 | 64 | // skipped L1 messages bitmap 65 | skippedL1MessageBitmap, totalL1MessagePoppedAfter, err := constructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) 66 | if err != nil { 67 | return nil, fmt.Errorf("failed to construct skipped bitmap, index: %d, err: %w", batch.Index, err) 68 | } 69 | 70 | // blob payload 71 | blob, blobVersionedHash, z, _, _, err := d.constructBlobPayload(batch.Chunks, d.MaxNumChunksPerBatch()) 72 | if err != nil { 73 | return nil, fmt.Errorf("failed to construct blob payload, index: %d, err: %w", batch.Index, err) 74 | } 75 | 76 | if totalL1MessagePoppedAfter < batch.TotalL1MessagePoppedBefore { 77 | return nil, fmt.Errorf("batch index: %d, totalL1MessagePoppedAfter (%d) is less than batch.TotalL1MessagePoppedBefore (%d)", batch.Index, totalL1MessagePoppedAfter, batch.TotalL1MessagePoppedBefore) 78 | } 79 | l1MessagePopped := totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore 80 | 81 | daBatch := newDABatchV1( 82 | CodecV2, // version 83 | batch.Index, // batchIndex 84 | l1MessagePopped, // l1MessagePopped 85 | totalL1MessagePoppedAfter, // totalL1MessagePopped 86 | dataHash, // dataHash 87 | blobVersionedHash, // blobVersionedHash 88 | batch.ParentBatchHash, // parentBatchHash 89 | skippedL1MessageBitmap, // skippedL1MessageBitmap 90 | blob, // blob 91 | z, // z 92 | ) 93 | 94 | return daBatch, nil 95 | } 96 | 97 | // constructBlobPayload constructs the 4844 blob payload. 98 | func (d *DACodecV2) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch int) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, common.Hash, error) { 99 | // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) 100 | metadataLength := 2 + maxNumChunksPerBatch*4 101 | 102 | // batchBytes represents the raw (un-compressed and un-padded) blob payload 103 | batchBytes := make([]byte, metadataLength) 104 | 105 | // challenge digest preimage 106 | // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash 107 | challengePreimage := make([]byte, (1+maxNumChunksPerBatch+1)*common.HashLength) 108 | 109 | // the chunk data hash used for calculating the challenge preimage 110 | var chunkDataHash common.Hash 111 | 112 | // blob metadata: num_chunks 113 | binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks))) 114 | 115 | // encode blob metadata and L2 transactions, 116 | // and simultaneously also build challenge preimage 117 | for chunkID, chunk := range chunks { 118 | currentChunkStartIndex := len(batchBytes) 119 | 120 | for _, block := range chunk.Blocks { 121 | for _, tx := range block.Transactions { 122 | if tx.Type == types.L1MessageTxType { 123 | continue 124 | } 125 | 126 | // encode L2 txs into blob payload 127 | rlpTxData, err := convertTxDataToRLPEncoding(tx) 128 | if err != nil { 129 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 130 | } 131 | batchBytes = append(batchBytes, rlpTxData...) 132 | } 133 | } 134 | 135 | // blob metadata: chunki_size 136 | chunkSize := len(batchBytes) - currentChunkStartIndex 137 | binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize)) 138 | 139 | // challenge: compute chunk data hash 140 | chunkDataHash = crypto.Keccak256Hash(batchBytes[currentChunkStartIndex:]) 141 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 142 | } 143 | 144 | // if we have fewer than maxNumChunksPerBatch chunks, the rest 145 | // of the blob metadata is correctly initialized to 0, 146 | // but we need to add padding to the challenge preimage 147 | for chunkID := len(chunks); chunkID < maxNumChunksPerBatch; chunkID++ { 148 | // use the last chunk's data hash as padding 149 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 150 | } 151 | 152 | // challenge: compute metadata hash 153 | hash := crypto.Keccak256Hash(batchBytes[0:metadataLength]) 154 | copy(challengePreimage[0:], hash[:]) 155 | 156 | // blobBytes represents the compressed blob payload (batchBytes) 157 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 158 | if err != nil { 159 | return nil, common.Hash{}, nil, nil, common.Hash{}, err 160 | } 161 | 162 | // Only apply this check when the uncompressed batch data has exceeded 128 KiB. 163 | if len(batchBytes) > minCompressedDataCheckSize { 164 | // Check compressed data compatibility. 165 | if err = checkCompressedDataCompatibility(blobBytes); err != nil { 166 | log.Error("constructBlobPayload: compressed data compatibility check failed", "err", err, "batchBytes", hex.EncodeToString(batchBytes), "blobBytes", hex.EncodeToString(blobBytes)) 167 | return nil, common.Hash{}, nil, nil, common.Hash{}, err 168 | } 169 | } 170 | 171 | if len(blobBytes) > maxEffectiveBlobBytes { 172 | log.Error("constructBlobPayload: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes)) 173 | return nil, common.Hash{}, nil, nil, common.Hash{}, errors.New("Blob payload exceeds maximum size") 174 | } 175 | 176 | // convert raw data to BLSFieldElements 177 | blob, err := makeBlobCanonical(blobBytes) 178 | if err != nil { 179 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) 180 | } 181 | 182 | // compute blob versioned hash 183 | c, err := kzg4844.BlobToCommitment(blob) 184 | if err != nil { 185 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err) 186 | } 187 | blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) 188 | 189 | // challenge: append blob versioned hash 190 | copy(challengePreimage[(1+maxNumChunksPerBatch)*common.HashLength:], blobVersionedHash[:]) 191 | 192 | // compute z = challenge_digest % BLS_MODULUS 193 | challengeDigest := crypto.Keccak256Hash(challengePreimage) 194 | pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), blsModulus) 195 | pointBytes := pointBigInt.Bytes() 196 | 197 | // the challenge point z 198 | var z kzg4844.Point 199 | if len(pointBytes) > kzgPointByteSize { 200 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("pointBytes length exceeds %d bytes, got %d bytes", kzgPointByteSize, len(pointBytes)) 201 | } 202 | start := kzgPointByteSize - len(pointBytes) 203 | copy(z[start:], pointBytes) 204 | 205 | return blob, blobVersionedHash, &z, blobBytes, challengeDigest, nil 206 | } 207 | 208 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 209 | // Note: This function only populates the batch header, it leaves the blob-related fields empty. 210 | func (d *DACodecV2) NewDABatchFromBytes(data []byte) (DABatch, error) { 211 | if len(data) < daBatchV1EncodedMinLength { 212 | return nil, fmt.Errorf("insufficient data for DABatch, expected at least %d bytes but got %d", daBatchV1EncodedMinLength, len(data)) 213 | } 214 | 215 | if CodecVersion(data[daBatchOffsetVersion]) != CodecV2 { 216 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV2, data[daBatchOffsetVersion]) 217 | } 218 | 219 | return newDABatchV1( 220 | CodecVersion(data[daBatchOffsetVersion]), // version 221 | binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV1OffsetL1MessagePopped]), // batchIndex 222 | binary.BigEndian.Uint64(data[daBatchV1OffsetL1MessagePopped:daBatchV1OffsetTotalL1MessagePopped]), // l1MessagePopped 223 | binary.BigEndian.Uint64(data[daBatchV1OffsetTotalL1MessagePopped:daBatchOffsetDataHash]), // totalL1MessagePopped 224 | common.BytesToHash(data[daBatchOffsetDataHash:daBatchV1OffsetBlobVersionedHash]), // dataHash 225 | common.BytesToHash(data[daBatchV1OffsetBlobVersionedHash:daBatchV1OffsetParentBatchHash]), // blobVersionedHash 226 | common.BytesToHash(data[daBatchV1OffsetParentBatchHash:daBatchV1OffsetSkippedL1MessageBitmap]), // parentBatchHash 227 | data[daBatchV1OffsetSkippedL1MessageBitmap:], // skippedL1MessageBitmap 228 | nil, // blob 229 | nil, // z 230 | ), nil 231 | } 232 | 233 | // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. 234 | func (d *DACodecV2) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { 235 | batchBytes, err := constructBatchPayloadInBlob([]*Chunk{c}, d) 236 | if err != nil { 237 | return 0, 0, fmt.Errorf("failed to construct batch payload in blob: %w", err) 238 | } 239 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 240 | if err != nil { 241 | return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err) 242 | } 243 | return uint64(len(batchBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil 244 | } 245 | 246 | // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. 247 | func (d *DACodecV2) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { 248 | batchBytes, err := constructBatchPayloadInBlob(b.Chunks, d) 249 | if err != nil { 250 | return 0, 0, err 251 | } 252 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 253 | if err != nil { 254 | return 0, 0, err 255 | } 256 | return uint64(len(batchBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil 257 | } 258 | 259 | // checkCompressedDataCompatibility checks the compressed data compatibility for a batch's chunks. 260 | // It constructs a batch payload, compresses the data, and checks the compressed data compatibility. 261 | func (d *DACodecV2) checkCompressedDataCompatibility(chunks []*Chunk) (bool, error) { 262 | batchBytes, err := constructBatchPayloadInBlob(chunks, d) 263 | if err != nil { 264 | return false, fmt.Errorf("failed to construct batch payload in blob: %w", err) 265 | } 266 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 267 | if err != nil { 268 | return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err) 269 | } 270 | // Only apply this check when the uncompressed batch data has exceeded 128 KiB. 271 | if len(batchBytes) <= minCompressedDataCheckSize { 272 | return true, nil 273 | } 274 | if err = checkCompressedDataCompatibility(blobBytes); err != nil { 275 | log.Warn("Compressed data compatibility check failed", "err", err, "batchBytes", hex.EncodeToString(batchBytes), "blobBytes", hex.EncodeToString(blobBytes)) 276 | return false, nil 277 | } 278 | return true, nil 279 | } 280 | 281 | // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. 282 | // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. 283 | func (d *DACodecV2) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { 284 | return d.checkCompressedDataCompatibility([]*Chunk{c}) 285 | } 286 | 287 | // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. 288 | // It constructs a batch payload, compresses the data, and checks the compressed data compatibility if the uncompressed data exceeds 128 KiB. 289 | func (d *DACodecV2) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { 290 | return d.checkCompressedDataCompatibility(b.Chunks) 291 | } 292 | -------------------------------------------------------------------------------- /encoding/codecv3.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | 9 | "github.com/scroll-tech/go-ethereum/common" 10 | ) 11 | 12 | type DACodecV3 struct { 13 | DACodecV2 14 | } 15 | 16 | // Version returns the codec version. 17 | func (d *DACodecV3) Version() CodecVersion { 18 | return CodecV3 19 | } 20 | 21 | // NewDABatch creates a DABatch from the provided Batch. 22 | func (d *DACodecV3) NewDABatch(batch *Batch) (DABatch, error) { 23 | // this encoding can only support a fixed number of chunks per batch 24 | if len(batch.Chunks) > d.MaxNumChunksPerBatch() { 25 | return nil, fmt.Errorf("too many chunks in batch: got %d, maximum allowed is %d", len(batch.Chunks), d.MaxNumChunksPerBatch()) 26 | } 27 | 28 | if len(batch.Chunks) == 0 { 29 | return nil, errors.New("batch must contain at least one chunk") 30 | } 31 | 32 | if len(batch.Chunks[len(batch.Chunks)-1].Blocks) == 0 { 33 | return nil, errors.New("too few blocks in last chunk of the batch") 34 | } 35 | 36 | // batch data hash 37 | dataHash, err := d.computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore) 38 | if err != nil { 39 | return nil, err 40 | } 41 | 42 | // skipped L1 messages bitmap 43 | skippedL1MessageBitmap, totalL1MessagePoppedAfter, err := constructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | // blob payload 49 | blob, blobVersionedHash, z, blobBytes, challengeDigest, err := d.constructBlobPayload(batch.Chunks, d.MaxNumChunksPerBatch()) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | lastChunk := batch.Chunks[len(batch.Chunks)-1] 55 | lastBlock := lastChunk.Blocks[len(lastChunk.Blocks)-1] 56 | 57 | if totalL1MessagePoppedAfter < batch.TotalL1MessagePoppedBefore { 58 | return nil, fmt.Errorf("batch index: %d, totalL1MessagePoppedAfter (%d) is less than batch.TotalL1MessagePoppedBefore (%d)", batch.Index, totalL1MessagePoppedAfter, batch.TotalL1MessagePoppedBefore) 59 | } 60 | l1MessagePopped := totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore 61 | 62 | return newDABatchV3( 63 | CodecV3, // version 64 | batch.Index, // batchIndex 65 | l1MessagePopped, // l1MessagePopped 66 | totalL1MessagePoppedAfter, // totalL1MessagePopped 67 | lastBlock.Header.Time, // lastBlockTimestamp 68 | dataHash, // dataHash 69 | batch.ParentBatchHash, // parentBatchHash 70 | blobVersionedHash, // blobVersionedHash 71 | skippedL1MessageBitmap, // skippedL1MessageBitmap 72 | blob, // blob 73 | z, // z 74 | blobBytes, // blobBytes 75 | challengeDigest, // challengeDigest 76 | ) 77 | } 78 | 79 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 80 | // Note: This function only populates the batch header, it leaves the blob-related fields and skipped L1 message bitmap empty. 81 | func (d *DACodecV3) NewDABatchFromBytes(data []byte) (DABatch, error) { 82 | if len(data) != daBatchV3EncodedLength { 83 | return nil, fmt.Errorf("invalid data length for DABatch, expected %d bytes but got %d", daBatchV3EncodedLength, len(data)) 84 | } 85 | 86 | if CodecVersion(data[daBatchOffsetVersion]) != CodecV3 { 87 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV3, data[daBatchOffsetVersion]) 88 | } 89 | 90 | return newDABatchV3WithProof( 91 | CodecVersion(data[daBatchOffsetVersion]), // version 92 | binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV3OffsetL1MessagePopped]), // batchIndex 93 | binary.BigEndian.Uint64(data[daBatchV3OffsetL1MessagePopped:daBatchV3OffsetTotalL1MessagePopped]), // l1MessagePopped 94 | binary.BigEndian.Uint64(data[daBatchV3OffsetTotalL1MessagePopped:daBatchOffsetDataHash]), // totalL1MessagePopped 95 | binary.BigEndian.Uint64(data[daBatchV3OffsetLastBlockTimestamp:daBatchV3OffsetBlobDataProof]), // lastBlockTimestamp 96 | common.BytesToHash(data[daBatchOffsetDataHash:daBatchV3OffsetBlobVersionedHash]), // dataHash 97 | common.BytesToHash(data[daBatchV3OffsetParentBatchHash:daBatchV3OffsetLastBlockTimestamp]), // parentBatchHash 98 | common.BytesToHash(data[daBatchV3OffsetBlobVersionedHash:daBatchV3OffsetParentBatchHash]), // blobVersionedHash 99 | nil, // skippedL1MessageBitmap 100 | nil, // blob 101 | nil, // z 102 | nil, // blobBytes 103 | common.Hash{}, // challengeDigest 104 | [2]common.Hash{ // blobDataProof 105 | common.BytesToHash(data[daBatchV3OffsetBlobDataProof : daBatchV3OffsetBlobDataProof+kzgPointByteSize]), 106 | common.BytesToHash(data[daBatchV3OffsetBlobDataProof+kzgPointByteSize : daBatchV3EncodedLength]), 107 | }, 108 | ), nil 109 | } 110 | 111 | // EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately. 112 | func (d *DACodecV3) EstimateChunkL1CommitGas(c *Chunk) (uint64, error) { 113 | // Reuse the V2 implementation, should have slightly different gas cost, but sufficient for estimation in practice, 114 | // since we have extraGasCost to over-estimate the gas cost. 115 | totalL1CommitGas, err := d.DACodecV2.EstimateChunkL1CommitGas(c) 116 | if err != nil { 117 | return 0, fmt.Errorf("failed to estimate L1 commit gas for chunk: %w", err) 118 | } 119 | totalL1CommitGas += blobTxPointEvaluationPrecompileGas // plus gas cost for the point-evaluation precompile call. 120 | return totalL1CommitGas, nil 121 | } 122 | 123 | // EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately. 124 | func (d *DACodecV3) EstimateBatchL1CommitGas(b *Batch) (uint64, error) { 125 | // Reuse the V2 implementation, should have slightly different gas cost, but sufficient for estimation in practice, 126 | // since we have extraGasCost to over-estimate the gas cost. 127 | totalL1CommitGas, err := d.DACodecV2.EstimateBatchL1CommitGas(b) 128 | if err != nil { 129 | return 0, fmt.Errorf("failed to estimate L1 commit gas for batch: %w", err) 130 | } 131 | totalL1CommitGas += blobTxPointEvaluationPrecompileGas // plus gas cost for the point-evaluation precompile call. 132 | return totalL1CommitGas, nil 133 | } 134 | 135 | // JSONFromBytes converts the bytes to a daBatchV3 and then marshals it to JSON. 136 | func (d *DACodecV3) JSONFromBytes(data []byte) ([]byte, error) { 137 | batch, err := d.NewDABatchFromBytes(data) 138 | if err != nil { 139 | return nil, fmt.Errorf("failed to decode DABatch from bytes: %w", err) 140 | } 141 | 142 | jsonBytes, err := json.Marshal(batch) 143 | if err != nil { 144 | return nil, fmt.Errorf("failed to marshal DABatch to JSON, version %d, hash %s: %w", batch.Version(), batch.Hash(), err) 145 | } 146 | 147 | return jsonBytes, nil 148 | } 149 | -------------------------------------------------------------------------------- /encoding/codecv3_types.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | 10 | "github.com/scroll-tech/go-ethereum/common" 11 | "github.com/scroll-tech/go-ethereum/crypto" 12 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 13 | ) 14 | 15 | // daBatchV3 contains metadata about a batch of DAChunks. 16 | type daBatchV3 struct { 17 | daBatchV0 18 | 19 | blobVersionedHash common.Hash 20 | lastBlockTimestamp uint64 21 | blobDataProof [2]common.Hash 22 | blob *kzg4844.Blob 23 | z *kzg4844.Point 24 | blobBytes []byte 25 | challengeDigest common.Hash 26 | } 27 | 28 | // newDABatchV3 is a constructor for daBatchV3 that calls blobDataProofForPICircuit internally. 29 | func newDABatchV3(version CodecVersion, batchIndex, l1MessagePopped, totalL1MessagePopped, lastBlockTimestamp uint64, 30 | dataHash, parentBatchHash, blobVersionedHash common.Hash, skippedL1MessageBitmap []byte, blob *kzg4844.Blob, 31 | z *kzg4844.Point, blobBytes []byte, challengeDigest common.Hash, 32 | ) (*daBatchV3, error) { 33 | daBatch := &daBatchV3{ 34 | daBatchV0: daBatchV0{ 35 | version: version, 36 | batchIndex: batchIndex, 37 | l1MessagePopped: l1MessagePopped, 38 | totalL1MessagePopped: totalL1MessagePopped, 39 | dataHash: dataHash, 40 | parentBatchHash: parentBatchHash, 41 | skippedL1MessageBitmap: skippedL1MessageBitmap, 42 | }, 43 | blobVersionedHash: blobVersionedHash, 44 | lastBlockTimestamp: lastBlockTimestamp, 45 | blob: blob, 46 | z: z, 47 | blobBytes: blobBytes, 48 | challengeDigest: challengeDigest, 49 | } 50 | 51 | proof, err := daBatch.blobDataProofForPICircuit() 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | daBatch.blobDataProof = proof 57 | 58 | return daBatch, nil 59 | } 60 | 61 | // newDABatchV3WithProof is a constructor for daBatchV3 that allows directly passing blobDataProof. 62 | func newDABatchV3WithProof(version CodecVersion, batchIndex, l1MessagePopped, totalL1MessagePopped, lastBlockTimestamp uint64, 63 | dataHash, parentBatchHash, blobVersionedHash common.Hash, skippedL1MessageBitmap []byte, 64 | blob *kzg4844.Blob, z *kzg4844.Point, blobBytes []byte, challengeDigest common.Hash, blobDataProof [2]common.Hash, 65 | ) *daBatchV3 { 66 | return &daBatchV3{ 67 | daBatchV0: daBatchV0{ 68 | version: version, 69 | batchIndex: batchIndex, 70 | l1MessagePopped: l1MessagePopped, 71 | totalL1MessagePopped: totalL1MessagePopped, 72 | dataHash: dataHash, 73 | parentBatchHash: parentBatchHash, 74 | skippedL1MessageBitmap: skippedL1MessageBitmap, 75 | }, 76 | blobVersionedHash: blobVersionedHash, 77 | lastBlockTimestamp: lastBlockTimestamp, 78 | blob: blob, 79 | z: z, 80 | blobBytes: blobBytes, 81 | challengeDigest: challengeDigest, 82 | blobDataProof: blobDataProof, // Set blobDataProof directly 83 | } 84 | } 85 | 86 | // Encode serializes the DABatchV3 into bytes. 87 | func (b *daBatchV3) Encode() []byte { 88 | batchBytes := make([]byte, daBatchV3EncodedLength) 89 | batchBytes[daBatchOffsetVersion] = byte(b.version) 90 | binary.BigEndian.PutUint64(batchBytes[daBatchOffsetBatchIndex:daBatchV3OffsetL1MessagePopped], b.batchIndex) 91 | binary.BigEndian.PutUint64(batchBytes[daBatchV3OffsetL1MessagePopped:daBatchV3OffsetTotalL1MessagePopped], b.l1MessagePopped) 92 | binary.BigEndian.PutUint64(batchBytes[daBatchV3OffsetTotalL1MessagePopped:daBatchOffsetDataHash], b.totalL1MessagePopped) 93 | copy(batchBytes[daBatchOffsetDataHash:daBatchV3OffsetBlobVersionedHash], b.dataHash[:]) 94 | copy(batchBytes[daBatchV3OffsetBlobVersionedHash:daBatchV3OffsetParentBatchHash], b.blobVersionedHash[:]) 95 | copy(batchBytes[daBatchV3OffsetParentBatchHash:daBatchV3OffsetLastBlockTimestamp], b.parentBatchHash[:]) 96 | binary.BigEndian.PutUint64(batchBytes[daBatchV3OffsetLastBlockTimestamp:daBatchV3OffsetBlobDataProof], b.lastBlockTimestamp) 97 | copy(batchBytes[daBatchV3OffsetBlobDataProof:daBatchV3OffsetBlobDataProof+kzgPointByteSize], b.blobDataProof[0].Bytes()) 98 | copy(batchBytes[daBatchV3OffsetBlobDataProof+kzgPointByteSize:daBatchV3EncodedLength], b.blobDataProof[1].Bytes()) 99 | return batchBytes 100 | } 101 | 102 | // Hash computes the hash of the serialized DABatch. 103 | func (b *daBatchV3) Hash() common.Hash { 104 | bytes := b.Encode() 105 | return crypto.Keccak256Hash(bytes) 106 | } 107 | 108 | // blobDataProofForPICircuit computes the abi-encoded blob verification data. 109 | func (b *daBatchV3) blobDataProofForPICircuit() ([2]common.Hash, error) { 110 | if b.blob == nil { 111 | return [2]common.Hash{}, errors.New("called blobDataProofForPICircuit with empty blob") 112 | } 113 | if b.z == nil { 114 | return [2]common.Hash{}, errors.New("called blobDataProofForPICircuit with empty z") 115 | } 116 | 117 | _, y, err := kzg4844.ComputeProof(b.blob, *b.z) 118 | if err != nil { 119 | return [2]common.Hash{}, fmt.Errorf("failed to create KZG proof at point, err: %w, z: %v", err, hex.EncodeToString(b.z[:])) 120 | } 121 | 122 | // Memory layout of result: 123 | // | z | y | 124 | // |---------|---------| 125 | // | bytes32 | bytes32 | 126 | var result [2]common.Hash 127 | result[0] = common.BytesToHash(b.z[:]) 128 | result[1] = common.BytesToHash(y[:]) 129 | 130 | return result, nil 131 | } 132 | 133 | // BlobDataProofForPointEvaluation computes the abi-encoded blob verification data. 134 | func (b *daBatchV3) BlobDataProofForPointEvaluation() ([]byte, error) { 135 | if b.blob == nil { 136 | return nil, errors.New("called BlobDataProofForPointEvaluation with empty blob") 137 | } 138 | if b.z == nil { 139 | return nil, errors.New("called BlobDataProofForPointEvaluation with empty z") 140 | } 141 | 142 | commitment, err := kzg4844.BlobToCommitment(b.blob) 143 | if err != nil { 144 | return nil, fmt.Errorf("failed to create blob commitment: %w", err) 145 | } 146 | 147 | proof, y, err := kzg4844.ComputeProof(b.blob, *b.z) 148 | if err != nil { 149 | return nil, fmt.Errorf("failed to create KZG proof at point, err: %w, z: %v", err, hex.EncodeToString(b.z[:])) 150 | } 151 | 152 | return blobDataProofFromValues(*b.z, y, commitment, proof), nil 153 | } 154 | 155 | // Blob returns the blob of the batch. 156 | func (b *daBatchV3) Blob() *kzg4844.Blob { 157 | return b.blob 158 | } 159 | 160 | // BlobBytes returns the blob bytes of the batch. 161 | func (b *daBatchV3) BlobBytes() []byte { 162 | return b.blobBytes 163 | } 164 | 165 | // MarshalJSON implements the custom JSON serialization for daBatchV3. 166 | // This method is designed to provide prover with batch info in snake_case format. 167 | func (b *daBatchV3) MarshalJSON() ([]byte, error) { 168 | type daBatchV3JSON struct { 169 | Version CodecVersion `json:"version"` 170 | BatchIndex uint64 `json:"batch_index"` 171 | L1MessagePopped uint64 `json:"l1_message_popped"` 172 | TotalL1MessagePopped uint64 `json:"total_l1_message_popped"` 173 | DataHash string `json:"data_hash"` 174 | ParentBatchHash string `json:"parent_batch_hash"` 175 | BlobVersionedHash string `json:"blob_versioned_hash"` 176 | LastBlockTimestamp uint64 `json:"last_block_timestamp"` 177 | BlobDataProof [2]string `json:"blob_data_proof"` 178 | } 179 | 180 | return json.Marshal(&daBatchV3JSON{ 181 | Version: b.version, 182 | BatchIndex: b.batchIndex, 183 | L1MessagePopped: b.l1MessagePopped, 184 | TotalL1MessagePopped: b.totalL1MessagePopped, 185 | DataHash: b.dataHash.Hex(), 186 | ParentBatchHash: b.parentBatchHash.Hex(), 187 | BlobVersionedHash: b.blobVersionedHash.Hex(), 188 | LastBlockTimestamp: b.lastBlockTimestamp, 189 | BlobDataProof: [2]string{ 190 | b.blobDataProof[0].Hex(), 191 | b.blobDataProof[1].Hex(), 192 | }, 193 | }) 194 | } 195 | 196 | // Version returns the version of the DABatch. 197 | func (b *daBatchV3) Version() CodecVersion { 198 | return b.version 199 | } 200 | 201 | // SkippedL1MessageBitmap returns the skipped L1 message bitmap of the DABatch. 202 | func (b *daBatchV3) SkippedL1MessageBitmap() []byte { 203 | return b.skippedL1MessageBitmap 204 | } 205 | 206 | // DataHash returns the data hash of the DABatch. 207 | func (b *daBatchV3) DataHash() common.Hash { 208 | return b.dataHash 209 | } 210 | 211 | // ChallengeDigest returns the challenge digest of the DABatch. 212 | func (b *daBatchV3) ChallengeDigest() common.Hash { 213 | return b.challengeDigest 214 | } 215 | -------------------------------------------------------------------------------- /encoding/codecv4.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "math/big" 11 | 12 | "github.com/scroll-tech/go-ethereum/common" 13 | "github.com/scroll-tech/go-ethereum/core/types" 14 | "github.com/scroll-tech/go-ethereum/crypto" 15 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 16 | "github.com/scroll-tech/go-ethereum/log" 17 | 18 | "github.com/scroll-tech/da-codec/encoding/zstd" 19 | ) 20 | 21 | type DACodecV4 struct { 22 | DACodecV3 23 | forcedVersion *CodecVersion 24 | } 25 | 26 | // Version returns the codec version. 27 | func (d *DACodecV4) Version() CodecVersion { 28 | if d.forcedVersion != nil { 29 | return *d.forcedVersion 30 | } 31 | return CodecV4 32 | } 33 | 34 | // DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks 35 | func (d *DACodecV4) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { 36 | rawBytes := bytesFromBlobCanonical(blob) 37 | 38 | // if first byte is 1 - data compressed, 0 - not compressed 39 | if rawBytes[0] == 0x1 { 40 | batchBytes, err := decompressScrollBlobToBatch(append(zstdMagicNumber, rawBytes[1:]...)) 41 | if err != nil { 42 | return err 43 | } 44 | return decodeTxsFromBytes(batchBytes, chunks, d.MaxNumChunksPerBatch()) 45 | } else { 46 | return decodeTxsFromBytes(rawBytes[1:], chunks, d.MaxNumChunksPerBatch()) 47 | } 48 | } 49 | 50 | // NewDABatch creates a DABatch from the provided Batch. 51 | func (d *DACodecV4) NewDABatch(batch *Batch) (DABatch, error) { 52 | // this encoding can only support a fixed number of chunks per batch 53 | if len(batch.Chunks) > d.MaxNumChunksPerBatch() { 54 | return nil, fmt.Errorf("too many chunks in batch: got %d, maximum allowed is %d", len(batch.Chunks), d.MaxNumChunksPerBatch()) 55 | } 56 | 57 | if len(batch.Chunks) == 0 { 58 | return nil, errors.New("batch must contain at least one chunk") 59 | } 60 | 61 | if len(batch.Chunks[len(batch.Chunks)-1].Blocks) == 0 { 62 | return nil, errors.New("too few blocks in last chunk of the batch") 63 | } 64 | 65 | // batch data hash 66 | dataHash, err := d.computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore) 67 | if err != nil { 68 | return nil, err 69 | } 70 | 71 | // skipped L1 messages bitmap 72 | skippedL1MessageBitmap, totalL1MessagePoppedAfter, err := constructSkippedBitmap(batch.Index, batch.Chunks, batch.TotalL1MessagePoppedBefore) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | enableCompression, err := d.CheckBatchCompressedDataCompatibility(batch) 78 | if err != nil { 79 | return nil, err 80 | } 81 | 82 | // blob payload 83 | blob, blobVersionedHash, z, blobBytes, challengeDigest, err := d.constructBlobPayload(batch.Chunks, d.MaxNumChunksPerBatch(), enableCompression) 84 | if err != nil { 85 | return nil, err 86 | } 87 | 88 | lastChunk := batch.Chunks[len(batch.Chunks)-1] 89 | lastBlock := lastChunk.Blocks[len(lastChunk.Blocks)-1] 90 | 91 | if totalL1MessagePoppedAfter < batch.TotalL1MessagePoppedBefore { 92 | return nil, fmt.Errorf("batch index: %d, totalL1MessagePoppedAfter (%d) is less than batch.TotalL1MessagePoppedBefore (%d)", batch.Index, totalL1MessagePoppedAfter, batch.TotalL1MessagePoppedBefore) 93 | } 94 | l1MessagePopped := totalL1MessagePoppedAfter - batch.TotalL1MessagePoppedBefore 95 | 96 | return newDABatchV3( 97 | d.Version(), // version 98 | batch.Index, // batchIndex 99 | l1MessagePopped, // l1MessagePopped 100 | totalL1MessagePoppedAfter, // totalL1MessagePopped 101 | lastBlock.Header.Time, // lastBlockTimestamp 102 | dataHash, // dataHash 103 | batch.ParentBatchHash, // parentBatchHash 104 | blobVersionedHash, // blobVersionedHash 105 | skippedL1MessageBitmap, // skippedL1MessageBitmap 106 | blob, // blob 107 | z, // z 108 | blobBytes, // blobBytes 109 | challengeDigest, // challengeDigest 110 | ) 111 | } 112 | 113 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 114 | // Note: This function only populates the batch header, it leaves the blob-related fields and skipped L1 message bitmap empty. 115 | func (d *DACodecV4) NewDABatchFromBytes(data []byte) (DABatch, error) { 116 | if len(data) != daBatchV3EncodedLength { 117 | return nil, fmt.Errorf("invalid data length for DABatch, expected %d bytes but got %d", daBatchV3EncodedLength, len(data)) 118 | } 119 | 120 | if CodecVersion(data[daBatchOffsetVersion]) != d.Version() { 121 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", d.Version(), data[daBatchOffsetVersion]) 122 | } 123 | 124 | return newDABatchV3WithProof( 125 | CodecVersion(data[daBatchOffsetVersion]), // version 126 | binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV3OffsetL1MessagePopped]), // batchIndex 127 | binary.BigEndian.Uint64(data[daBatchV3OffsetL1MessagePopped:daBatchV3OffsetTotalL1MessagePopped]), // l1MessagePopped 128 | binary.BigEndian.Uint64(data[daBatchV3OffsetTotalL1MessagePopped:daBatchOffsetDataHash]), // totalL1MessagePopped 129 | binary.BigEndian.Uint64(data[daBatchV3OffsetLastBlockTimestamp:daBatchV3OffsetBlobDataProof]), // lastBlockTimestamp 130 | common.BytesToHash(data[daBatchOffsetDataHash:daBatchV3OffsetBlobVersionedHash]), // dataHash 131 | common.BytesToHash(data[daBatchV3OffsetParentBatchHash:daBatchV3OffsetLastBlockTimestamp]), // parentBatchHash 132 | common.BytesToHash(data[daBatchV3OffsetBlobVersionedHash:daBatchV3OffsetParentBatchHash]), // blobVersionedHash 133 | nil, // skippedL1MessageBitmap 134 | nil, // blob 135 | nil, // z 136 | nil, // blobBytes 137 | common.Hash{}, // challengeDigest 138 | [2]common.Hash{ // blobDataProof 139 | common.BytesToHash(data[daBatchV3OffsetBlobDataProof : daBatchV3OffsetBlobDataProof+kzgPointByteSize]), 140 | common.BytesToHash(data[daBatchV3OffsetBlobDataProof+kzgPointByteSize : daBatchV3EncodedLength]), 141 | }, 142 | ), nil 143 | } 144 | 145 | // constructBlobPayload constructs the 4844 blob payload. 146 | func (d *DACodecV4) constructBlobPayload(chunks []*Chunk, maxNumChunksPerBatch int, enableCompression bool) (*kzg4844.Blob, common.Hash, *kzg4844.Point, []byte, common.Hash, error) { 147 | // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) 148 | metadataLength := 2 + maxNumChunksPerBatch*4 149 | 150 | // batchBytes represents the raw (un-compressed and un-padded) blob payload 151 | batchBytes := make([]byte, metadataLength) 152 | 153 | // challenge digest preimage 154 | // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash 155 | challengePreimage := make([]byte, (1+maxNumChunksPerBatch+1)*common.HashLength) 156 | 157 | // the chunk data hash used for calculating the challenge preimage 158 | var chunkDataHash common.Hash 159 | 160 | // blob metadata: num_chunks 161 | binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks))) 162 | 163 | // encode blob metadata and L2 transactions, 164 | // and simultaneously also build challenge preimage 165 | for chunkID, chunk := range chunks { 166 | currentChunkStartIndex := len(batchBytes) 167 | 168 | for _, block := range chunk.Blocks { 169 | for _, tx := range block.Transactions { 170 | if tx.Type == types.L1MessageTxType { 171 | continue 172 | } 173 | 174 | // encode L2 txs into blob payload 175 | rlpTxData, err := convertTxDataToRLPEncoding(tx) 176 | if err != nil { 177 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 178 | } 179 | batchBytes = append(batchBytes, rlpTxData...) 180 | } 181 | } 182 | 183 | // blob metadata: chunki_size 184 | chunkSize := len(batchBytes) - currentChunkStartIndex 185 | binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize)) 186 | 187 | // challenge: compute chunk data hash 188 | chunkDataHash = crypto.Keccak256Hash(batchBytes[currentChunkStartIndex:]) 189 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 190 | } 191 | 192 | // if we have fewer than maxNumChunksPerBatch chunks, the rest 193 | // of the blob metadata is correctly initialized to 0, 194 | // but we need to add padding to the challenge preimage 195 | for chunkID := len(chunks); chunkID < maxNumChunksPerBatch; chunkID++ { 196 | // use the last chunk's data hash as padding 197 | copy(challengePreimage[common.HashLength+chunkID*common.HashLength:], chunkDataHash[:]) 198 | } 199 | 200 | // challenge: compute metadata hash 201 | hash := crypto.Keccak256Hash(batchBytes[0:metadataLength]) 202 | copy(challengePreimage[0:], hash[:]) 203 | 204 | var blobBytes []byte 205 | if enableCompression { 206 | // blobBytes represents the compressed blob payload (batchBytes) 207 | var err error 208 | blobBytes, err = zstd.CompressScrollBatchBytes(batchBytes) 209 | if err != nil { 210 | return nil, common.Hash{}, nil, nil, common.Hash{}, err 211 | } 212 | // Check compressed data compatibility. 213 | if err = checkCompressedDataCompatibility(blobBytes); err != nil { 214 | log.Error("ConstructBlobPayload: compressed data compatibility check failed", "err", err, "batchBytes", hex.EncodeToString(batchBytes), "blobBytes", hex.EncodeToString(blobBytes)) 215 | return nil, common.Hash{}, nil, nil, common.Hash{}, err 216 | } 217 | blobBytes = append([]byte{1}, blobBytes...) 218 | } else { 219 | blobBytes = append([]byte{0}, batchBytes...) 220 | } 221 | 222 | if len(blobBytes) > maxEffectiveBlobBytes { 223 | log.Error("ConstructBlobPayload: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes)) 224 | return nil, common.Hash{}, nil, nil, common.Hash{}, errors.New("Blob payload exceeds maximum size") 225 | } 226 | 227 | // convert raw data to BLSFieldElements 228 | blob, err := makeBlobCanonical(blobBytes) 229 | if err != nil { 230 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) 231 | } 232 | 233 | // compute blob versioned hash 234 | c, err := kzg4844.BlobToCommitment(blob) 235 | if err != nil { 236 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err) 237 | } 238 | blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) 239 | 240 | // challenge: append blob versioned hash 241 | copy(challengePreimage[(1+maxNumChunksPerBatch)*common.HashLength:], blobVersionedHash[:]) 242 | 243 | // compute z = challenge_digest % BLS_MODULUS 244 | challengeDigest := crypto.Keccak256Hash(challengePreimage) 245 | pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), blsModulus) 246 | pointBytes := pointBigInt.Bytes() 247 | 248 | // the challenge point z 249 | var z kzg4844.Point 250 | if len(pointBytes) > kzgPointByteSize { 251 | return nil, common.Hash{}, nil, nil, common.Hash{}, fmt.Errorf("pointBytes length exceeds %d bytes, got %d bytes", kzgPointByteSize, len(pointBytes)) 252 | } 253 | start := kzgPointByteSize - len(pointBytes) 254 | copy(z[start:], pointBytes) 255 | 256 | return blob, blobVersionedHash, &z, blobBytes, challengeDigest, nil 257 | } 258 | 259 | func (d *DACodecV4) estimateL1CommitBatchSizeAndBlobSize(chunks []*Chunk) (uint64, uint64, error) { 260 | batchBytes, err := constructBatchPayloadInBlob(chunks, d) 261 | if err != nil { 262 | return 0, 0, fmt.Errorf("failed to construct batch payload in blob: %w", err) 263 | } 264 | var blobBytesLength uint64 265 | enableCompression, err := d.CheckBatchCompressedDataCompatibility(&Batch{Chunks: chunks}) 266 | if err != nil { 267 | return 0, 0, fmt.Errorf("failed to compress scroll batch bytes: %w", err) 268 | } 269 | if enableCompression { 270 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 271 | if err != nil { 272 | return 0, 0, err 273 | } 274 | blobBytesLength = 1 + uint64(len(blobBytes)) 275 | } else { 276 | blobBytesLength = 1 + uint64(len(batchBytes)) 277 | } 278 | return uint64(len(batchBytes)), calculatePaddedBlobSize(blobBytesLength), nil 279 | } 280 | 281 | // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. 282 | func (d *DACodecV4) EstimateChunkL1CommitBatchSizeAndBlobSize(c *Chunk) (uint64, uint64, error) { 283 | return d.estimateL1CommitBatchSizeAndBlobSize([]*Chunk{c}) 284 | } 285 | 286 | // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. 287 | func (d *DACodecV4) EstimateBatchL1CommitBatchSizeAndBlobSize(b *Batch) (uint64, uint64, error) { 288 | return d.estimateL1CommitBatchSizeAndBlobSize(b.Chunks) 289 | } 290 | 291 | // checkCompressedDataCompatibility checks the compressed data compatibility for a batch's chunks. 292 | // It constructs a batch payload, compresses the data, and checks the compressed data compatibility. 293 | func (d *DACodecV4) checkCompressedDataCompatibility(chunks []*Chunk) (bool, error) { 294 | batchBytes, err := constructBatchPayloadInBlob(chunks, d) 295 | if err != nil { 296 | return false, fmt.Errorf("failed to construct batch payload in blob: %w", err) 297 | } 298 | blobBytes, err := zstd.CompressScrollBatchBytes(batchBytes) 299 | if err != nil { 300 | return false, fmt.Errorf("failed to compress scroll batch bytes: %w", err) 301 | } 302 | if err = checkCompressedDataCompatibility(blobBytes); err != nil { 303 | log.Warn("Compressed data compatibility check failed", "err", err, "batchBytes", hex.EncodeToString(batchBytes), "blobBytes", hex.EncodeToString(blobBytes)) 304 | return false, nil 305 | } 306 | return true, nil 307 | } 308 | 309 | // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. 310 | func (d *DACodecV4) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { 311 | return d.checkCompressedDataCompatibility([]*Chunk{c}) 312 | } 313 | 314 | // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. 315 | func (d *DACodecV4) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { 316 | return d.checkCompressedDataCompatibility(b.Chunks) 317 | } 318 | 319 | // JSONFromBytes converts the bytes to a daBatchV3 and then marshals it to JSON. 320 | func (d *DACodecV4) JSONFromBytes(data []byte) ([]byte, error) { 321 | batch, err := d.NewDABatchFromBytes(data) // this is different from the V3 implementation 322 | if err != nil { 323 | return nil, fmt.Errorf("failed to decode DABatch from bytes: %w", err) 324 | } 325 | 326 | jsonBytes, err := json.Marshal(batch) 327 | if err != nil { 328 | return nil, fmt.Errorf("failed to marshal DABatch to JSON, version %d, hash %s: %w", batch.Version(), batch.Hash(), err) 329 | } 330 | 331 | return jsonBytes, nil 332 | } 333 | -------------------------------------------------------------------------------- /encoding/codecv5.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | type DACodecV5 struct { 4 | DACodecV4 5 | } 6 | 7 | func NewDACodecV5() *DACodecV5 { 8 | v := CodecV5 9 | return &DACodecV5{ 10 | DACodecV4: DACodecV4{ 11 | forcedVersion: &v, 12 | }, 13 | } 14 | } 15 | 16 | // MaxNumChunksPerBatch returns the maximum number of chunks per batch. 17 | func (d *DACodecV5) MaxNumChunksPerBatch() int { 18 | return 1 19 | } 20 | -------------------------------------------------------------------------------- /encoding/codecv6.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | type DACodecV6 struct { 4 | DACodecV4 5 | } 6 | 7 | func NewDACodecV6() *DACodecV6 { 8 | v := CodecV6 9 | return &DACodecV6{ 10 | DACodecV4: DACodecV4{ 11 | forcedVersion: &v, 12 | }, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /encoding/codecv7.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "math" 10 | 11 | "github.com/scroll-tech/go-ethereum/common" 12 | "github.com/scroll-tech/go-ethereum/core/types" 13 | "github.com/scroll-tech/go-ethereum/crypto" 14 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 15 | "github.com/scroll-tech/go-ethereum/log" 16 | 17 | "github.com/scroll-tech/da-codec/encoding/zstd" 18 | ) 19 | 20 | type DACodecV7 struct{} 21 | 22 | // Version returns the codec version. 23 | func (d *DACodecV7) Version() CodecVersion { 24 | return CodecV7 25 | } 26 | 27 | // MaxNumChunksPerBatch returns the maximum number of chunks per batch. 28 | func (d *DACodecV7) MaxNumChunksPerBatch() int { 29 | return math.MaxInt 30 | } 31 | 32 | // NewDABlock creates a new DABlock from the given Block and the total number of L1 messages popped before. 33 | func (d *DACodecV7) NewDABlock(block *Block, totalL1MessagePoppedBefore uint64) (DABlock, error) { 34 | return newDABlockV7FromBlockWithValidation(block, &totalL1MessagePoppedBefore) 35 | } 36 | 37 | // NewDAChunk creates a new DAChunk from the given Chunk and the total number of L1 messages popped before. 38 | // Note: In DACodecV7 there is no notion of chunks. Blobs contain the entire batch data without any information of Chunks within. 39 | // However, for compatibility reasons this function is implemented to create a DAChunk from a Chunk. 40 | // This way we can still uniquely identify a set of blocks and their L1 messages. 41 | func (d *DACodecV7) NewDAChunk(chunk *Chunk, totalL1MessagePoppedBefore uint64) (DAChunk, error) { 42 | if chunk == nil { 43 | return nil, errors.New("chunk is nil") 44 | } 45 | 46 | if len(chunk.Blocks) == 0 { 47 | return nil, errors.New("number of blocks is 0") 48 | } 49 | 50 | if len(chunk.Blocks) > math.MaxUint16 { 51 | return nil, fmt.Errorf("number of blocks (%d) exceeds maximum allowed (%d)", len(chunk.Blocks), math.MaxUint16) 52 | } 53 | 54 | blocks := make([]DABlock, 0, len(chunk.Blocks)) 55 | txs := make([][]*types.TransactionData, 0, len(chunk.Blocks)) 56 | 57 | if err := iterateAndVerifyBlocksAndL1Messages(chunk.PrevL1MessageQueueHash, chunk.PostL1MessageQueueHash, chunk.Blocks, &totalL1MessagePoppedBefore, func(initialBlockNumber uint64) {}, func(block *Block, daBlock *daBlockV7) error { 58 | blocks = append(blocks, daBlock) 59 | txs = append(txs, block.Transactions) 60 | 61 | return nil 62 | }); err != nil { 63 | return nil, fmt.Errorf("failed to iterate and verify blocks and L1 messages: %w", err) 64 | } 65 | 66 | daChunk := newDAChunkV7( 67 | blocks, 68 | txs, 69 | ) 70 | 71 | return daChunk, nil 72 | } 73 | 74 | // NewDABatch creates a DABatch including blob from the provided Batch. 75 | func (d *DACodecV7) NewDABatch(batch *Batch) (DABatch, error) { 76 | if len(batch.Blocks) == 0 { 77 | return nil, errors.New("batch must contain at least one block") 78 | } 79 | 80 | if err := checkBlocksBatchVSChunksConsistency(batch); err != nil { 81 | return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) 82 | } 83 | 84 | blob, blobVersionedHash, blobBytes, challengeDigest, err := d.constructBlob(batch) 85 | if err != nil { 86 | return nil, fmt.Errorf("failed to construct blob: %w", err) 87 | } 88 | 89 | daBatch, err := newDABatchV7(CodecV7, batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest) 90 | if err != nil { 91 | return nil, fmt.Errorf("failed to construct DABatch: %w", err) 92 | } 93 | 94 | return daBatch, nil 95 | } 96 | 97 | func (d *DACodecV7) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, common.Hash, error) { 98 | blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) 99 | 100 | payloadBytes, err := d.constructBlobPayload(batch) 101 | if err != nil { 102 | return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err) 103 | } 104 | 105 | compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */) 106 | if err != nil { 107 | return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) 108 | } 109 | 110 | isCompressedFlag := uint8(0x0) 111 | if enableCompression { 112 | isCompressedFlag = 0x1 113 | payloadBytes = compressedPayloadBytes 114 | } 115 | 116 | sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes))) 117 | 118 | blobBytes[blobEnvelopeV7OffsetVersion] = uint8(CodecV7) 119 | copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice) 120 | blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag 121 | blobBytes = append(blobBytes, payloadBytes...) 122 | 123 | if len(blobBytes) > maxEffectiveBlobBytes { 124 | log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes)) 125 | return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes) 126 | } 127 | 128 | // convert raw data to BLSFieldElements 129 | blob, err := makeBlobCanonical(blobBytes) 130 | if err != nil { 131 | return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err) 132 | } 133 | 134 | // compute blob versioned hash 135 | c, err := kzg4844.BlobToCommitment(blob) 136 | if err != nil { 137 | return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err) 138 | } 139 | blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c) 140 | 141 | // compute challenge digest for codecv7, different from previous versions, 142 | // the blob bytes are padded to the max effective blob size, which is 131072 / 32 * 31 due to the blob encoding 143 | paddedBlobBytes := make([]byte, maxEffectiveBlobBytes) 144 | copy(paddedBlobBytes, blobBytes) 145 | 146 | challengeDigest := crypto.Keccak256Hash(crypto.Keccak256(paddedBlobBytes), blobVersionedHash[:]) 147 | 148 | return blob, blobVersionedHash, blobBytes, challengeDigest, nil 149 | } 150 | 151 | func (d *DACodecV7) constructBlobPayload(batch *Batch) ([]byte, error) { 152 | blobPayload := blobPayloadV7{ 153 | prevL1MessageQueueHash: batch.PrevL1MessageQueueHash, 154 | postL1MessageQueueHash: batch.PostL1MessageQueueHash, 155 | blocks: batch.Blocks, 156 | } 157 | 158 | return blobPayload.Encode() 159 | } 160 | 161 | // NewDABatchFromBytes decodes the given byte slice into a DABatch. 162 | // Note: This function only populates the batch header, it leaves the blob-related fields empty. 163 | func (d *DACodecV7) NewDABatchFromBytes(data []byte) (DABatch, error) { 164 | daBatch, err := decodeDABatchV7(data) 165 | if err != nil { 166 | return nil, fmt.Errorf("failed to decode DA batch: %w", err) 167 | } 168 | 169 | if daBatch.version != CodecV7 { 170 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, daBatch.version) 171 | } 172 | 173 | return daBatch, nil 174 | } 175 | 176 | func (d *DACodecV7) NewDABatchFromParams(batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash) (DABatch, error) { 177 | return newDABatchV7(CodecV7, batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{}) 178 | } 179 | 180 | func (d *DACodecV7) DecodeDAChunksRawTx(_ [][]byte) ([]*DAChunkRawTx, error) { 181 | return nil, errors.New("DecodeDAChunksRawTx is not implemented for DACodecV7, use DecodeBlob instead") 182 | } 183 | 184 | func (d *DACodecV7) DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) { 185 | rawBytes := bytesFromBlobCanonical(blob) 186 | 187 | // read the blob envelope header 188 | version := rawBytes[blobEnvelopeV7OffsetVersion] 189 | if CodecVersion(version) != CodecV7 { 190 | return nil, fmt.Errorf("codec version mismatch: expected %d but found %d", CodecV7, version) 191 | } 192 | 193 | // read the data size 194 | blobPayloadSize := decodeSize3Bytes(rawBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag]) 195 | if blobPayloadSize+blobEnvelopeV7OffsetPayload > uint32(len(rawBytes)) { 196 | return nil, fmt.Errorf("blob envelope size exceeds the raw data size: %d > %d", blobPayloadSize, len(rawBytes)) 197 | } 198 | 199 | payloadBytes := rawBytes[blobEnvelopeV7OffsetPayload : blobEnvelopeV7OffsetPayload+blobPayloadSize] 200 | 201 | // read the compressed flag and decompress if needed 202 | compressed := rawBytes[blobEnvelopeV7OffsetCompressedFlag] 203 | if compressed != 0x0 && compressed != 0x1 { 204 | return nil, fmt.Errorf("invalid compressed flag: %d", compressed) 205 | } 206 | if compressed == 0x1 { 207 | var err error 208 | if payloadBytes, err = decompressV7Bytes(payloadBytes); err != nil { 209 | return nil, fmt.Errorf("failed to decompress blob payload: %w", err) 210 | } 211 | } 212 | 213 | // read the payload 214 | payload, err := decodeBlobPayloadV7(payloadBytes) 215 | if err != nil { 216 | return nil, fmt.Errorf("failed to decode blob payload: %w", err) 217 | } 218 | 219 | return payload, nil 220 | } 221 | 222 | func (d *DACodecV7) DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error { 223 | return nil 224 | } 225 | 226 | // checkCompressedDataCompatibility checks the compressed data compatibility for a batch. 227 | // It constructs a blob payload, compresses the data, and checks the compressed data compatibility. 228 | // flag checkLength indicates whether to check the length of the compressed data against the original data. 229 | // If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding. 230 | // If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents. 231 | func (d *DACodecV7) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) { 232 | compressedPayloadBytes, err := zstd.CompressScrollBatchBytes(payloadBytes) 233 | if err != nil { 234 | return nil, false, fmt.Errorf("failed to compress blob payload: %w", err) 235 | } 236 | 237 | if err = checkCompressedDataCompatibilityV7(compressedPayloadBytes); err != nil { 238 | log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) 239 | return nil, false, nil 240 | } 241 | 242 | // check if compressed data is bigger or equal to the original data -> no need to compress 243 | if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) { 244 | log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes)) 245 | return nil, false, nil 246 | } 247 | 248 | return compressedPayloadBytes, true, nil 249 | } 250 | 251 | // CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk. 252 | func (d *DACodecV7) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) { 253 | // filling the needed fields for the batch used in the check 254 | b := &Batch{ 255 | Chunks: []*Chunk{c}, 256 | PrevL1MessageQueueHash: c.PrevL1MessageQueueHash, 257 | PostL1MessageQueueHash: c.PostL1MessageQueueHash, 258 | Blocks: c.Blocks, 259 | } 260 | 261 | return d.CheckBatchCompressedDataCompatibility(b) 262 | } 263 | 264 | // CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch. 265 | func (d *DACodecV7) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) { 266 | if len(b.Blocks) == 0 { 267 | return false, errors.New("batch must contain at least one block") 268 | } 269 | 270 | if err := checkBlocksBatchVSChunksConsistency(b); err != nil { 271 | return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err) 272 | } 273 | 274 | payloadBytes, err := d.constructBlobPayload(b) 275 | if err != nil { 276 | return false, fmt.Errorf("failed to construct blob payload: %w", err) 277 | } 278 | 279 | // This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected. 280 | // rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected. 281 | // Since length check is used for DA and proving efficiency, it does not need to be checked here. 282 | _, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */) 283 | if err != nil { 284 | return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) 285 | } 286 | 287 | return compatible, nil 288 | } 289 | 290 | func (d *DACodecV7) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { 291 | if len(batch.Blocks) == 0 { 292 | return 0, 0, errors.New("batch must contain at least one block") 293 | } 294 | 295 | blobBytes := make([]byte, blobEnvelopeV7OffsetPayload) 296 | 297 | payloadBytes, err := d.constructBlobPayload(batch) 298 | if err != nil { 299 | return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err) 300 | } 301 | 302 | compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */) 303 | if err != nil { 304 | return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err) 305 | } 306 | 307 | if enableCompression { 308 | blobBytes = append(blobBytes, compressedPayloadBytes...) 309 | } else { 310 | blobBytes = append(blobBytes, payloadBytes...) 311 | } 312 | 313 | return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil 314 | } 315 | 316 | // EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk. 317 | func (d *DACodecV7) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) { 318 | return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{ 319 | Blocks: chunk.Blocks, 320 | PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash, 321 | PostL1MessageQueueHash: chunk.PostL1MessageQueueHash, 322 | }) 323 | } 324 | 325 | // EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch. 326 | func (d *DACodecV7) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) { 327 | return d.estimateL1CommitBatchSizeAndBlobSize(batch) 328 | } 329 | 330 | // EstimateBlockL1CommitCalldataSize calculates the calldata size in l1 commit for this block approximately. 331 | // Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. 332 | func (d *DACodecV7) EstimateBlockL1CommitCalldataSize(block *Block) (uint64, error) { 333 | return 0, nil 334 | } 335 | 336 | // EstimateChunkL1CommitCalldataSize calculates the calldata size needed for committing a chunk to L1 approximately. 337 | // Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. There is no notion 338 | // of chunks in this version. 339 | func (d *DACodecV7) EstimateChunkL1CommitCalldataSize(chunk *Chunk) (uint64, error) { 340 | return 0, nil 341 | } 342 | 343 | // EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately. 344 | // Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. 345 | // Version + BatchHeader 346 | func (d *DACodecV7) EstimateBatchL1CommitCalldataSize(batch *Batch) (uint64, error) { 347 | return 1 + daBatchV7EncodedLength, nil 348 | } 349 | 350 | // EstimateChunkL1CommitGas calculates the total L1 commit gas for this chunk approximately. 351 | // Note: For CodecV7 calldata is constant independently of how many blocks or batches are submitted. There is no notion 352 | // of chunks in this version. 353 | func (d *DACodecV7) EstimateChunkL1CommitGas(chunk *Chunk) (uint64, error) { 354 | return 0, nil 355 | } 356 | 357 | // EstimateBatchL1CommitGas calculates the total L1 commit gas for this batch approximately. 358 | func (d *DACodecV7) EstimateBatchL1CommitGas(batch *Batch) (uint64, error) { 359 | // TODO: adjust this after contracts are implemented 360 | var totalL1CommitGas uint64 361 | 362 | // Add extra gas costs 363 | totalL1CommitGas += extraGasCost // constant to account for ops like _getAdmin, _implementation, _requireNotPaused, etc 364 | totalL1CommitGas += 4 * coldSloadGas // 4 one-time cold sload for commitBatch 365 | totalL1CommitGas += sstoreGas // 1 time sstore 366 | totalL1CommitGas += baseTxGas // base gas for tx 367 | totalL1CommitGas += calldataNonZeroByteGas // version in calldata 368 | 369 | return totalL1CommitGas, nil 370 | } 371 | 372 | // JSONFromBytes converts the bytes to a DABatch and then marshals it to JSON. 373 | func (d *DACodecV7) JSONFromBytes(data []byte) ([]byte, error) { 374 | batch, err := d.NewDABatchFromBytes(data) 375 | if err != nil { 376 | return nil, fmt.Errorf("failed to decode DABatch from bytes: %w", err) 377 | } 378 | 379 | jsonBytes, err := json.Marshal(batch) 380 | if err != nil { 381 | return nil, fmt.Errorf("failed to marshal DABatch to JSON, version %d, hash %s: %w", batch.Version(), batch.Hash(), err) 382 | } 383 | 384 | return jsonBytes, nil 385 | } 386 | -------------------------------------------------------------------------------- /encoding/codecv7_types.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "math" 11 | "math/big" 12 | 13 | "github.com/klauspost/compress/zstd" 14 | "github.com/scroll-tech/go-ethereum/common" 15 | "github.com/scroll-tech/go-ethereum/core/types" 16 | "github.com/scroll-tech/go-ethereum/crypto" 17 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 18 | ) 19 | 20 | // Below is the encoding for `BatchHeader` V7, total 73 bytes. 21 | // * Field Bytes Type Index Comments 22 | // * version 1 uint8 0 The batch version 23 | // * batchIndex 8 uint64 1 The index of the batch 24 | // * blobVersionedHash 32 bytes32 9 The versioned hash of the blob with this batch’s data 25 | // * parentBatchHash 32 bytes32 41 The parent batch hash 26 | 27 | const ( 28 | daBatchV7EncodedLength = 73 29 | daBatchV7OffsetBlobVersionedHash = 9 30 | daBatchV7OffsetParentBatchHash = 41 31 | ) 32 | 33 | // Below is the encoding format for BlobEnvelopeV7. 34 | // * Field Bytes Type Index Comments 35 | // * version 1 uint8 0 The version of the DA codec (batch/blob) 36 | // * n_bytes[1] 1 uint8 1 Value denoting the number of bytes, n_bytes[1]*256^2 37 | // * n_bytes[2] 1 uint8 2 Value denoting the number of bytes, n_bytes[2]*256 38 | // * n_bytes[3] 1 uint8 3 Value denoting the number of bytes, n_bytes[3] 39 | // * flag 1 bool 4 1-byte flag to denote zstd-encoded/raw bytes 40 | // * payload N bytes 5 Possibly zstd-encoded payload bytes 41 | // * padding (4096*31 - (N+5)) bytes N+5 Padding to align to 4096*31 bytes 42 | 43 | const ( 44 | blobEnvelopeV7OffsetVersion = 0 45 | blobEnvelopeV7OffsetByteSize = 1 46 | blobEnvelopeV7OffsetCompressedFlag = 4 47 | blobEnvelopeV7OffsetPayload = 5 48 | ) 49 | 50 | // Below is the encoding for blobPayloadV7. 51 | // * Field Bytes Type Index Comments 52 | // * prevL1MessageQueueHash 32 bytes32 0 hash of the L1 message queue at the end of previous batch 53 | // * postL1MessageQueueHash 32 bytes32 32 hash of the L1 message queue at the end of this batch 54 | // * initialL2BlockNumber 8 uint64 64 The initial L2 block number in this batch 55 | // * numBlocks 2 uint16 72 The number of blocks in this batch 56 | // * block[0] 52 DABlock7 74 The first block in this batch 57 | // * block[i] 52 DABlock7 74+52*i The (i+1)th block in this batch 58 | // * block[n-1] 52 DABlock7 74+52*(n-1) The last block in this batch 59 | // * l2Transactions dynamic bytes 74+52*n L2 transactions for this batch 60 | 61 | const ( 62 | blobPayloadV7MinEncodedLength = 2*common.HashLength + 8 + 2 63 | blobPayloadV7OffsetPrevL1MessageQueue = 0 64 | blobPayloadV7OffsetPostL1MessageQueue = 32 65 | blobPayloadV7OffsetInitialL2BlockNumber = 64 66 | blobPayloadV7OffsetNumBlocks = 72 67 | blobPayloadV7OffsetBlocks = 74 68 | ) 69 | 70 | // Below is the encoding for DABlockV7, total 52 bytes. 71 | // * Field Bytes Type Index Comments 72 | // * timestamp 8 uint64 0 The timestamp of this block. 73 | // * baseFee 32 uint256 8 The base fee of this block. 74 | // * gasLimit 8 uint64 40 The gas limit of this block. 75 | // * numTransactions 2 uint16 48 The number of transactions in this block, both L1 & L2 txs. 76 | // * numL1Messages 2 uint16 50 The number of l1 messages in this block. 77 | 78 | const ( 79 | daBlockV7BlockContextEncodedLength = 52 80 | daBlockV7OffsetTimestamp = 0 81 | daBlockV7OffsetBaseFee = 8 82 | daBlockV7OffsetGasLimit = 40 83 | daBlockV7OffsetNumTransactions = 48 84 | daBlockV7OffsetNumL1Messages = 50 85 | ) 86 | 87 | // daBatchV7 contains V7 batch metadata and payload. 88 | type daBatchV7 struct { 89 | version CodecVersion 90 | batchIndex uint64 91 | blobVersionedHash common.Hash 92 | parentBatchHash common.Hash 93 | 94 | blob *kzg4844.Blob 95 | blobBytes []byte 96 | challengeDigest common.Hash 97 | } 98 | 99 | func newDABatchV7(version CodecVersion, batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash, blob *kzg4844.Blob, blobBytes []byte, challengeDigest common.Hash) (*daBatchV7, error) { 100 | daBatch := &daBatchV7{ 101 | version: version, 102 | batchIndex: batchIndex, 103 | blobVersionedHash: blobVersionedHash, 104 | parentBatchHash: parentBatchHash, 105 | blob: blob, 106 | blobBytes: blobBytes, 107 | challengeDigest: challengeDigest, 108 | } 109 | 110 | return daBatch, nil 111 | } 112 | 113 | func decodeDABatchV7(data []byte) (*daBatchV7, error) { 114 | if len(data) != daBatchV7EncodedLength { 115 | return nil, fmt.Errorf("invalid data length for DABatchV7, expected %d bytes but got %d", daBatchV7EncodedLength, len(data)) 116 | } 117 | 118 | version := CodecVersion(data[daBatchOffsetVersion]) 119 | batchIndex := binary.BigEndian.Uint64(data[daBatchOffsetBatchIndex:daBatchV7OffsetBlobVersionedHash]) 120 | blobVersionedHash := common.BytesToHash(data[daBatchV7OffsetBlobVersionedHash:daBatchV7OffsetParentBatchHash]) 121 | parentBatchHash := common.BytesToHash(data[daBatchV7OffsetParentBatchHash:daBatchV7EncodedLength]) 122 | 123 | return newDABatchV7(version, batchIndex, blobVersionedHash, parentBatchHash, nil, nil, common.Hash{}) 124 | } 125 | 126 | // Encode serializes the dABatchV7 into bytes. 127 | func (b *daBatchV7) Encode() []byte { 128 | batchBytes := make([]byte, daBatchV7EncodedLength) 129 | batchBytes[daBatchOffsetVersion] = byte(b.version) 130 | binary.BigEndian.PutUint64(batchBytes[daBatchOffsetBatchIndex:daBatchV7OffsetBlobVersionedHash], b.batchIndex) 131 | copy(batchBytes[daBatchV7OffsetBlobVersionedHash:daBatchV7OffsetParentBatchHash], b.blobVersionedHash[:]) 132 | copy(batchBytes[daBatchV7OffsetParentBatchHash:daBatchV7EncodedLength], b.parentBatchHash[:]) 133 | return batchBytes 134 | } 135 | 136 | // Hash computes the hash of the serialized DABatch. 137 | func (b *daBatchV7) Hash() common.Hash { 138 | return crypto.Keccak256Hash(b.Encode()) 139 | } 140 | 141 | // BlobDataProofForPointEvaluation computes the abi-encoded blob verification data. 142 | func (b *daBatchV7) BlobDataProofForPointEvaluation() ([]byte, error) { 143 | // z = challengeDigest % BLS_MODULUS 144 | pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(b.challengeDigest[:]), blsModulus) 145 | pointBytes := pointBigInt.Bytes() 146 | 147 | var z kzg4844.Point 148 | if len(pointBytes) > kzgPointByteSize { 149 | return nil, fmt.Errorf("pointBytes length exceeds %d bytes, got %d bytes", kzgPointByteSize, len(pointBytes)) 150 | } 151 | start := kzgPointByteSize - len(pointBytes) 152 | copy(z[start:], pointBytes) 153 | 154 | commitment, err := kzg4844.BlobToCommitment(b.blob) 155 | if err != nil { 156 | return nil, fmt.Errorf("failed to create blob commitment: %w", err) 157 | } 158 | 159 | proof, y, err := kzg4844.ComputeProof(b.blob, z) 160 | if err != nil { 161 | return nil, fmt.Errorf("failed to create KZG proof at point, err: %w, z: %v", err, hex.EncodeToString(z[:])) 162 | } 163 | 164 | return blobDataProofFromValues(z, y, commitment, proof), nil 165 | } 166 | 167 | // Blob returns the blob of the batch. 168 | func (b *daBatchV7) Blob() *kzg4844.Blob { 169 | return b.blob 170 | } 171 | 172 | // BlobBytes returns the blob bytes of the batch. 173 | func (b *daBatchV7) BlobBytes() []byte { 174 | return b.blobBytes 175 | } 176 | 177 | // MarshalJSON implements the custom JSON serialization for daBatchV7. 178 | // This method is designed to provide prover with batch info in snake_case format. 179 | func (b *daBatchV7) MarshalJSON() ([]byte, error) { 180 | type daBatchV7JSON struct { 181 | Version CodecVersion `json:"version"` 182 | BatchIndex uint64 `json:"batch_index"` 183 | BlobVersionedHash string `json:"blob_versioned_hash"` 184 | ParentBatchHash string `json:"parent_batch_hash"` 185 | } 186 | 187 | return json.Marshal(&daBatchV7JSON{ 188 | Version: b.version, 189 | BatchIndex: b.batchIndex, 190 | BlobVersionedHash: b.blobVersionedHash.Hex(), 191 | ParentBatchHash: b.parentBatchHash.Hex(), 192 | }) 193 | } 194 | 195 | // Version returns the version of the DABatch. 196 | func (b *daBatchV7) Version() CodecVersion { 197 | return b.version 198 | } 199 | 200 | // SkippedL1MessageBitmap returns the skipped L1 message bitmap of the DABatch. 201 | // Note: For daBatchV7, there is no skipped L1 message bitmap, therefore the function returns nil. 202 | func (b *daBatchV7) SkippedL1MessageBitmap() []byte { 203 | return nil 204 | } 205 | 206 | // DataHash returns the data hash of the DABatch. 207 | // Note: For daBatchV7, there is no data hash, therefore the function returns an empty hash. 208 | func (b *daBatchV7) DataHash() common.Hash { 209 | return common.Hash{} 210 | } 211 | 212 | type blobPayloadV7 struct { 213 | prevL1MessageQueueHash common.Hash 214 | postL1MessageQueueHash common.Hash 215 | 216 | // used for encoding 217 | blocks []*Block 218 | 219 | // used for decoding 220 | daBlocks []DABlock 221 | l2Transactions []types.Transactions 222 | } 223 | 224 | func (b *blobPayloadV7) PrevL1MessageQueueHash() common.Hash { 225 | return b.prevL1MessageQueueHash 226 | } 227 | 228 | func (b *blobPayloadV7) PostL1MessageQueueHash() common.Hash { 229 | return b.postL1MessageQueueHash 230 | } 231 | 232 | func (b *blobPayloadV7) Blocks() []DABlock { 233 | return b.daBlocks 234 | } 235 | 236 | func (b *blobPayloadV7) Transactions() []types.Transactions { 237 | return b.l2Transactions 238 | } 239 | 240 | func (b *blobPayloadV7) Encode() ([]byte, error) { 241 | payloadBytes := make([]byte, blobPayloadV7MinEncodedLength) 242 | 243 | copy(payloadBytes[blobPayloadV7OffsetPrevL1MessageQueue:blobPayloadV7OffsetPostL1MessageQueue], b.prevL1MessageQueueHash[:]) 244 | copy(payloadBytes[blobPayloadV7OffsetPostL1MessageQueue:blobPayloadV7OffsetInitialL2BlockNumber], b.postL1MessageQueueHash[:]) 245 | 246 | var transactionBytes []byte 247 | if err := iterateAndVerifyBlocksAndL1Messages(b.prevL1MessageQueueHash, b.postL1MessageQueueHash, b.blocks, nil, func(initialL2BlockNumber uint64) { 248 | binary.BigEndian.PutUint64(payloadBytes[blobPayloadV7OffsetInitialL2BlockNumber:blobPayloadV7OffsetNumBlocks], initialL2BlockNumber) 249 | binary.BigEndian.PutUint16(payloadBytes[blobPayloadV7OffsetNumBlocks:blobPayloadV7OffsetBlocks], uint16(len(b.blocks))) 250 | }, func(block *Block, daBlock *daBlockV7) error { 251 | payloadBytes = append(payloadBytes, daBlock.Encode()...) 252 | 253 | // encode L2 txs as RLP and append to transactionBytes 254 | for _, txData := range block.Transactions { 255 | if txData.Type == types.L1MessageTxType { 256 | continue 257 | } 258 | rlpTxData, err := convertTxDataToRLPEncoding(txData) 259 | if err != nil { 260 | return fmt.Errorf("failed to convert txData to RLP encoding: %w", err) 261 | } 262 | transactionBytes = append(transactionBytes, rlpTxData...) 263 | } 264 | 265 | return nil 266 | }); err != nil { 267 | return nil, fmt.Errorf("failed to iterate and verify blocks and L1 messages: %w", err) 268 | } 269 | 270 | payloadBytes = append(payloadBytes, transactionBytes...) 271 | 272 | return payloadBytes, nil 273 | } 274 | 275 | func decodeBlobPayloadV7(data []byte) (*blobPayloadV7, error) { 276 | if len(data) < blobPayloadV7MinEncodedLength { 277 | return nil, fmt.Errorf("invalid data length for blobPayloadV7, expected at least %d bytes but got %d", blobPayloadV7MinEncodedLength, len(data)) 278 | } 279 | 280 | prevL1MessageQueueHash := common.BytesToHash(data[blobPayloadV7OffsetPrevL1MessageQueue:blobPayloadV7OffsetPostL1MessageQueue]) 281 | postL1MessageQueueHash := common.BytesToHash(data[blobPayloadV7OffsetPostL1MessageQueue:blobPayloadV7OffsetInitialL2BlockNumber]) 282 | 283 | initialL2BlockNumber := binary.BigEndian.Uint64(data[blobPayloadV7OffsetInitialL2BlockNumber:blobPayloadV7OffsetNumBlocks]) 284 | numBlocks := int(binary.BigEndian.Uint16(data[blobPayloadV7OffsetNumBlocks:blobPayloadV7OffsetBlocks])) 285 | 286 | if len(data) < blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks { 287 | return nil, fmt.Errorf("invalid data length for blobPayloadV7, expected at least %d bytes but got %d", blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks, len(data)) 288 | } 289 | 290 | // decode DA Blocks from the blob 291 | daBlocks := make([]DABlock, 0, numBlocks) 292 | for i := uint64(0); i < uint64(numBlocks); i++ { 293 | daBlock := newDABlockV7WithNumber(initialL2BlockNumber + i) 294 | 295 | startBytes := blobPayloadV7OffsetBlocks + i*daBlockV7BlockContextEncodedLength 296 | endBytes := startBytes + daBlockV7BlockContextEncodedLength 297 | if err := daBlock.Decode(data[startBytes:endBytes]); err != nil { 298 | return nil, fmt.Errorf("failed to decode DA block: %w", err) 299 | } 300 | 301 | daBlocks = append(daBlocks, daBlock) 302 | } 303 | 304 | // decode l2Transactions for each block from the blob 305 | txBytes := data[blobPayloadV7OffsetBlocks+daBlockV7BlockContextEncodedLength*numBlocks:] 306 | curIndex := 0 307 | var transactions []types.Transactions 308 | 309 | for _, daBlock := range daBlocks { 310 | var blockTransactions types.Transactions 311 | txNum := int(daBlock.NumTransactions()) - int(daBlock.NumL1Messages()) 312 | if txNum < 0 { 313 | return nil, fmt.Errorf("invalid transaction count: NumL1Messages (%d) exceeds NumTransactions (%d)", daBlock.NumL1Messages(), daBlock.NumTransactions()) 314 | } 315 | 316 | for i := 0; i < txNum; i++ { 317 | tx, nextIndex, err := getNextTx(txBytes, curIndex) 318 | if err != nil { 319 | return nil, fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, curIndex+4) 320 | } 321 | curIndex = nextIndex 322 | blockTransactions = append(blockTransactions, tx) 323 | } 324 | 325 | transactions = append(transactions, blockTransactions) 326 | } 327 | 328 | return &blobPayloadV7{ 329 | prevL1MessageQueueHash: prevL1MessageQueueHash, 330 | postL1MessageQueueHash: postL1MessageQueueHash, 331 | daBlocks: daBlocks, 332 | l2Transactions: transactions, 333 | }, nil 334 | } 335 | 336 | type daBlockV7 struct { 337 | daBlockV0 338 | 339 | lowestL1MessageQueueIndex uint64 340 | } 341 | 342 | func newDABlockV7FromBlockWithValidation(block *Block, totalL1MessagePoppedBefore *uint64) (*daBlockV7, error) { 343 | if !block.Header.Number.IsUint64() { 344 | return nil, errors.New("block number is not uint64") 345 | } 346 | 347 | numL1Messages, lowestQueueIndex, highestQueueIndex, err := block.NumL1MessagesNoSkipping() 348 | if err != nil { 349 | return nil, fmt.Errorf("failed to calculate number of L1 messages: %w", err) 350 | } 351 | if numL1Messages > 0 { 352 | var startL1MessageIndex uint64 353 | if totalL1MessagePoppedBefore != nil { 354 | startL1MessageIndex = *totalL1MessagePoppedBefore 355 | } else { 356 | startL1MessageIndex = lowestQueueIndex 357 | } 358 | 359 | if startL1MessageIndex+uint64(numL1Messages) != highestQueueIndex+1 { 360 | return nil, fmt.Errorf("failed to sanity check L1 messages count: startL1MessageIndex + numL1Messages != highestQueueIndex+1: %d + %d != %d", startL1MessageIndex, numL1Messages, highestQueueIndex+1) 361 | } 362 | } 363 | 364 | numL2Transactions := block.NumL2Transactions() 365 | numTransactions := uint64(numL1Messages) + numL2Transactions 366 | if numTransactions > math.MaxUint16 { 367 | return nil, errors.New("number of transactions exceeds max uint16") 368 | } 369 | 370 | return newDABlockV7( 371 | block.Header.Number.Uint64(), 372 | block.Header.Time, 373 | block.Header.BaseFee, 374 | block.Header.GasLimit, 375 | uint16(numTransactions), 376 | numL1Messages, 377 | lowestQueueIndex, 378 | ), nil 379 | } 380 | 381 | // newDABlockV7 is a constructor function for daBlockV7 that initializes the internal fields. 382 | func newDABlockV7(number uint64, timestamp uint64, baseFee *big.Int, gasLimit uint64, numTransactions uint16, numL1Messages uint16, lowestL1MessageQueueIndex uint64) *daBlockV7 { 383 | return &daBlockV7{ 384 | daBlockV0: daBlockV0{ 385 | number: number, 386 | timestamp: timestamp, 387 | baseFee: baseFee, 388 | gasLimit: gasLimit, 389 | numTransactions: numTransactions, 390 | numL1Messages: numL1Messages, 391 | }, 392 | lowestL1MessageQueueIndex: lowestL1MessageQueueIndex, 393 | } 394 | } 395 | 396 | func newDABlockV7WithNumber(number uint64) *daBlockV7 { 397 | return &daBlockV7{ 398 | daBlockV0: daBlockV0{ 399 | number: number, 400 | }, 401 | } 402 | } 403 | 404 | // Encode serializes the DABlock into a slice of bytes. 405 | func (b *daBlockV7) Encode() []byte { 406 | daBlockBytes := make([]byte, daBlockV7BlockContextEncodedLength) 407 | binary.BigEndian.PutUint64(daBlockBytes[daBlockV7OffsetTimestamp:daBlockV7OffsetBaseFee], b.timestamp) 408 | if b.baseFee != nil { 409 | b.baseFee.FillBytes(daBlockBytes[daBlockV7OffsetBaseFee:daBlockV7OffsetGasLimit]) 410 | } 411 | binary.BigEndian.PutUint64(daBlockBytes[daBlockV7OffsetGasLimit:daBlockV7OffsetNumTransactions], b.gasLimit) 412 | binary.BigEndian.PutUint16(daBlockBytes[daBlockV7OffsetNumTransactions:daBlockV7OffsetNumL1Messages], b.numTransactions) 413 | binary.BigEndian.PutUint16(daBlockBytes[daBlockV7OffsetNumL1Messages:], b.numL1Messages) 414 | return daBlockBytes 415 | } 416 | 417 | // Decode populates the fields of a DABlock from a byte slice. 418 | func (b *daBlockV7) Decode(data []byte) error { 419 | if len(data) != daBlockV7BlockContextEncodedLength { 420 | return fmt.Errorf("block encoding is not blockContextByteSize bytes long expected %d, got %d", daBlockV7BlockContextEncodedLength, len(data)) 421 | } 422 | 423 | b.timestamp = binary.BigEndian.Uint64(data[daBlockV7OffsetTimestamp:daBlockV7OffsetBaseFee]) 424 | b.baseFee = new(big.Int).SetBytes(data[daBlockV7OffsetBaseFee:daBlockV7OffsetGasLimit]) 425 | b.gasLimit = binary.BigEndian.Uint64(data[daBlockV7OffsetGasLimit:daBlockV7OffsetNumTransactions]) 426 | b.numTransactions = binary.BigEndian.Uint16(data[daBlockV7OffsetNumTransactions:daBlockV7OffsetNumL1Messages]) 427 | b.numL1Messages = binary.BigEndian.Uint16(data[daBlockV7OffsetNumL1Messages:]) 428 | 429 | return nil 430 | } 431 | 432 | // daChunkV7 groups consecutive DABlocks with their transactions. 433 | // Note: In DACodecV7 there is no notion of chunks. Blobs contain the entire batch data without any information of Chunks within. 434 | // However, for compatibility reasons DAChunks are still used in the codebase. 435 | // This way we can still uniquely identify a set of blocks and their L1 messages via their hash. 436 | type daChunkV7 struct { 437 | daChunkV1 438 | } 439 | 440 | // newDAChunkV1 is a constructor for daChunkV1, initializing with blocks and transactions. 441 | func newDAChunkV7(blocks []DABlock, transactions [][]*types.TransactionData) *daChunkV7 { 442 | return &daChunkV7{ 443 | daChunkV1{ 444 | blocks: blocks, 445 | transactions: transactions, 446 | }, 447 | } 448 | } 449 | 450 | // Hash computes the hash of the DAChunk data. 451 | func (c *daChunkV7) Hash() (common.Hash, error) { 452 | var dataBytes []byte 453 | 454 | // concatenate block contexts 455 | for _, block := range c.blocks { 456 | encodedBlock := block.Encode() 457 | dataBytes = append(dataBytes, encodedBlock...) 458 | } 459 | 460 | // concatenate l1 tx hashes 461 | for _, blockTxs := range c.transactions { 462 | for _, txData := range blockTxs { 463 | if txData.Type != types.L1MessageTxType { 464 | continue 465 | } 466 | 467 | hashBytes := common.FromHex(txData.TxHash) 468 | if len(hashBytes) != common.HashLength { 469 | return common.Hash{}, fmt.Errorf("unexpected hash: %s", txData.TxHash) 470 | } 471 | dataBytes = append(dataBytes, hashBytes...) 472 | } 473 | } 474 | 475 | hash := crypto.Keccak256Hash(dataBytes) 476 | return hash, nil 477 | } 478 | 479 | // decompressV7Bytes decompresses the given blob bytes into the original payload bytes. 480 | func decompressV7Bytes(compressedBytes []byte) ([]byte, error) { 481 | var res []byte 482 | 483 | compressedBytes = append(zstdMagicNumber, compressedBytes...) 484 | r := bytes.NewReader(compressedBytes) 485 | zr, err := zstd.NewReader(r) 486 | if err != nil { 487 | return nil, fmt.Errorf("failed to create zstd reader: %w", err) 488 | } 489 | defer zr.Close() 490 | 491 | res, err = zr.DecodeAll(compressedBytes, res) 492 | if err != nil { 493 | return nil, fmt.Errorf("failed to decompress zstd data: %w", err) 494 | } 495 | if len(res) == 0 { 496 | return nil, fmt.Errorf("payload is empty after decompression") 497 | } 498 | 499 | return res, nil 500 | } 501 | 502 | func decodeSize3Bytes(data []byte) uint32 { 503 | return uint32(data[0])<<16 | uint32(data[1])<<8 | uint32(data[2]) 504 | } 505 | 506 | func encodeSize3Bytes(data uint32) []byte { 507 | return []byte{byte(data >> 16), byte(data >> 8), byte(data)} 508 | } 509 | 510 | // iterateAndVerifyBlocksAndL1Messages iterates over the blocks and verifies the blocks and L1 messages. 511 | // It verifies: 512 | // - that L1 messages within and across blocks are contiguous 513 | // - correctness of prevL1MessageQueueHash and postL1MessageQueueHash after applying all L1 messages 514 | // - block numbers are contiguous and uint64 515 | // 516 | // The function calls the initialL2BlockNumberCallback with the initial L2 block number of the batch once. 517 | // The function calls the blockCallBack for each block with the block and the corresponding daBlock. 518 | func iterateAndVerifyBlocksAndL1Messages(prevL1MessageQueueHash, postL1MessageQueueHash common.Hash, blocks []*Block, totalL1MessagePoppedBefore *uint64, initialL2BlockNumberCallback func(initialL2BlockNumber uint64), blockCallBack func(block *Block, daBlock *daBlockV7) error) error { 519 | if len(blocks) == 0 { 520 | return errors.New("no blocks to iterate") 521 | } 522 | 523 | if !blocks[0].Header.Number.IsUint64() { 524 | return errors.New("block number of initial block is not uint64") 525 | } 526 | initialL2BlockNumber := blocks[0].Header.Number.Uint64() 527 | var startL1MessageIndex *uint64 528 | if totalL1MessagePoppedBefore != nil { 529 | startL1MessageIndex = new(uint64) 530 | *startL1MessageIndex = *totalL1MessagePoppedBefore 531 | } 532 | 533 | initialL2BlockNumberCallback(initialL2BlockNumber) 534 | 535 | for i, block := range blocks { 536 | if !block.Header.Number.IsUint64() { 537 | return fmt.Errorf("block number is not a uint64: %s", block.Header.Number.String()) 538 | } 539 | // sanity check: block numbers are contiguous 540 | if block.Header.Number.Uint64() != initialL2BlockNumber+uint64(i) { 541 | return fmt.Errorf("invalid block number: expected %d but got %d", initialL2BlockNumber+uint64(i), block.Header.Number.Uint64()) 542 | } 543 | 544 | // sanity check (within NumL1MessagesNoSkipping in newDABlockV7FromBlockWithValidation): L1 message indices are contiguous within a block 545 | daBlock, err := newDABlockV7FromBlockWithValidation(block, startL1MessageIndex) 546 | if err != nil { 547 | return fmt.Errorf("failed to create DABlock from block %d: %w", block.Header.Number.Uint64(), err) 548 | } 549 | // sanity check: L1 message indices are contiguous across blocks boundaries as startL1MessageIndex is verified in newDABlockV7FromBlockWithValidation 550 | // to be: startL1MessageIndex + numL1Messages in block == highestQueueIndex+1 in block 551 | if daBlock.NumL1Messages() > 0 { 552 | // set startL1MessageIndex to the lowestQueueIndex if it's nil (first L1 message within the blocks) 553 | if startL1MessageIndex == nil { 554 | startL1MessageIndex = new(uint64) 555 | *startL1MessageIndex = daBlock.lowestL1MessageQueueIndex 556 | } 557 | *startL1MessageIndex += uint64(daBlock.NumL1Messages()) 558 | } 559 | 560 | if err = blockCallBack(block, daBlock); err != nil { 561 | return fmt.Errorf("failed to process block %d: %w", block.Header.Number.Uint64(), err) 562 | } 563 | } 564 | 565 | // sanity check: prevL1MessageQueueHash+apply(L1Messages) = postL1MessageQueueHash 566 | computedPostL1MessageQueueHash, err := MessageQueueV2ApplyL1MessagesFromBlocks(prevL1MessageQueueHash, blocks) 567 | if err != nil { 568 | return fmt.Errorf("failed to apply L1 messages to prevL1MessageQueueHash: %w", err) 569 | } 570 | if computedPostL1MessageQueueHash != postL1MessageQueueHash { 571 | return fmt.Errorf("failed to sanity check postL1MessageQueueHash after applying all L1 messages: expected %s, got %s", computedPostL1MessageQueueHash, postL1MessageQueueHash) 572 | } 573 | 574 | return nil 575 | } 576 | 577 | // checkBlocksBatchVSChunksConsistency checks the consistency between blocks in the batch and blocks in the chunks. 578 | // If the batch contains chunks, we need to ensure that the blocks in the chunks match the blocks in the batch. 579 | // Chunks are not directly used in DACodecV7, but we still need to check the consistency of the blocks. 580 | // This is done to ensure compatibility with older versions and the relayer implementation. 581 | func checkBlocksBatchVSChunksConsistency(batch *Batch) error { 582 | if len(batch.Chunks) == 0 { 583 | return nil 584 | } 585 | 586 | totalBlocks := len(batch.Blocks) 587 | chunkBlocksCount := 0 588 | for _, chunk := range batch.Chunks { 589 | for _, block := range chunk.Blocks { 590 | if chunkBlocksCount > totalBlocks { 591 | return errors.New("chunks contain more blocks than the batch") 592 | } 593 | 594 | if batch.Blocks[chunkBlocksCount].Header.Hash() != block.Header.Hash() { 595 | return errors.New("blocks in chunks do not match the blocks in the batch") 596 | } 597 | chunkBlocksCount++ 598 | } 599 | } 600 | 601 | if chunkBlocksCount != totalBlocks { 602 | return fmt.Errorf("chunks contain less blocks than the batch: %d < %d", chunkBlocksCount, totalBlocks) 603 | } 604 | 605 | return nil 606 | } 607 | 608 | // ChallengeDigest returns the challenge digest of the DABatch. 609 | func (b *daBatchV7) ChallengeDigest() common.Hash { 610 | return b.challengeDigest 611 | } 612 | -------------------------------------------------------------------------------- /encoding/da_test.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "encoding/hex" 5 | "encoding/json" 6 | "math/big" 7 | "os" 8 | "testing" 9 | 10 | "github.com/scroll-tech/go-ethereum/common" 11 | "github.com/scroll-tech/go-ethereum/core/types" 12 | "github.com/scroll-tech/go-ethereum/log" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | 16 | "github.com/scroll-tech/da-codec/encoding/zstd" 17 | ) 18 | 19 | func TestMain(m *testing.M) { 20 | glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat())) 21 | glogger.Verbosity(log.LvlInfo) 22 | log.Root().SetHandler(glogger) 23 | 24 | code := m.Run() 25 | os.Exit(code) 26 | } 27 | 28 | func TestUtilFunctions(t *testing.T) { 29 | block1 := readBlockFromJSON(t, "testdata/blockTrace_02.json") 30 | block2 := readBlockFromJSON(t, "testdata/blockTrace_03.json") 31 | block3 := readBlockFromJSON(t, "testdata/blockTrace_04.json") 32 | block4 := readBlockFromJSON(t, "testdata/blockTrace_05.json") 33 | block5 := readBlockFromJSON(t, "testdata/blockTrace_06.json") 34 | block6 := readBlockFromJSON(t, "testdata/blockTrace_07.json") 35 | 36 | chunk1 := &Chunk{Blocks: []*Block{block1, block2}} 37 | chunk2 := &Chunk{Blocks: []*Block{block3, block4}} 38 | chunk3 := &Chunk{Blocks: []*Block{block5, block6}} 39 | 40 | batch := &Batch{Chunks: []*Chunk{chunk1, chunk2, chunk3}} 41 | 42 | // Test Block methods 43 | assert.Equal(t, uint64(0), block1.NumL1Messages(0)) 44 | assert.Equal(t, uint64(2), block1.NumL2Transactions()) 45 | assert.Equal(t, uint64(0), block2.NumL1Messages(0)) 46 | assert.Equal(t, uint64(1), block2.NumL2Transactions()) 47 | assert.Equal(t, uint64(11), block3.NumL1Messages(0)) 48 | assert.Equal(t, uint64(1), block3.NumL2Transactions()) 49 | assert.Equal(t, uint64(42), block4.NumL1Messages(0)) 50 | assert.Equal(t, uint64(0), block4.NumL2Transactions()) 51 | assert.Equal(t, uint64(10), block5.NumL1Messages(0)) 52 | assert.Equal(t, uint64(0), block5.NumL2Transactions()) 53 | assert.Equal(t, uint64(257), block6.NumL1Messages(0)) 54 | assert.Equal(t, uint64(0), block6.NumL2Transactions()) 55 | 56 | // Test Chunk methods 57 | assert.Equal(t, uint64(0), chunk1.NumL1Messages(0)) 58 | assert.Equal(t, uint64(3), chunk1.NumL2Transactions()) 59 | crc1Max, err := chunk1.CrcMax() 60 | assert.NoError(t, err) 61 | assert.Equal(t, uint64(11), crc1Max) 62 | assert.Equal(t, uint64(3), chunk1.NumTransactions()) 63 | assert.Equal(t, uint64(1194994), chunk1.TotalGasUsed()) 64 | 65 | assert.Equal(t, uint64(42), chunk2.NumL1Messages(0)) 66 | assert.Equal(t, uint64(1), chunk2.NumL2Transactions()) 67 | crc2Max, err := chunk2.CrcMax() 68 | assert.NoError(t, err) 69 | assert.Equal(t, uint64(0), crc2Max) 70 | assert.Equal(t, uint64(7), chunk2.NumTransactions()) 71 | assert.Equal(t, uint64(144000), chunk2.TotalGasUsed()) 72 | 73 | assert.Equal(t, uint64(257), chunk3.NumL1Messages(0)) 74 | assert.Equal(t, uint64(0), chunk3.NumL2Transactions()) 75 | chunk3.Blocks[0].RowConsumption = nil 76 | crc3Max, err := chunk3.CrcMax() 77 | assert.Error(t, err) 78 | assert.EqualError(t, err, "block (17, 0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28) has nil RowConsumption") 79 | assert.Equal(t, uint64(0), crc3Max) 80 | assert.Equal(t, uint64(5), chunk3.NumTransactions()) 81 | assert.Equal(t, uint64(240000), chunk3.TotalGasUsed()) 82 | 83 | // euclid chunk 84 | chunk3.Blocks[0].RowConsumption = nil 85 | chunk3.Blocks[1].RowConsumption = nil 86 | crc3Max, err = chunk3.CrcMax() 87 | assert.NoError(t, err) 88 | assert.Equal(t, uint64(0), crc3Max) 89 | 90 | // Test Batch methods 91 | assert.Equal(t, block6.Header.Root, batch.StateRoot()) 92 | assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot()) 93 | } 94 | 95 | func TestConvertTxDataToRLPEncoding(t *testing.T) { 96 | blocks := []*Block{ 97 | readBlockFromJSON(t, "testdata/blockTrace_02.json"), 98 | readBlockFromJSON(t, "testdata/blockTrace_03.json"), 99 | readBlockFromJSON(t, "testdata/blockTrace_04.json"), 100 | readBlockFromJSON(t, "testdata/blockTrace_05.json"), 101 | readBlockFromJSON(t, "testdata/blockTrace_06.json"), 102 | readBlockFromJSON(t, "testdata/blockTrace_07.json"), 103 | } 104 | 105 | for _, block := range blocks { 106 | for _, txData := range block.Transactions { 107 | if txData.Type == types.L1MessageTxType { 108 | continue 109 | } 110 | 111 | rlpTxData, err := convertTxDataToRLPEncoding(txData) 112 | assert.NoError(t, err) 113 | var tx types.Transaction 114 | err = tx.UnmarshalBinary(rlpTxData) 115 | assert.NoError(t, err) 116 | assert.Equal(t, txData.TxHash, tx.Hash().Hex()) 117 | } 118 | } 119 | } 120 | 121 | func TestEmptyBatchRoots(t *testing.T) { 122 | emptyBatch := &Batch{Chunks: []*Chunk{}} 123 | assert.Equal(t, common.Hash{}, emptyBatch.StateRoot()) 124 | assert.Equal(t, common.Hash{}, emptyBatch.WithdrawRoot()) 125 | } 126 | 127 | func TestBlobCompressDecompress(t *testing.T) { 128 | blobString := "0060e7159d580094830001000016310002f9162d82cf5502843b9b0a1783119700e28080b915d260806040523480156200001157600080fd5b5060405162001400b2380380833981810160405260a0811037815160208301516040808501805100915193959294830192918464018211639083019060208201858179825181110082820188101794825250918201929091019080838360005b83c357818101510083820152602001620000a9565b50505050905090810190601f16f1578082030080516001836020036101000a0319168191508051604051939291900115012b0001460175015b01a39081015185519093508592508491620001c891600391850001906200026b565b508051620001de90600490602084506005805461ff00190060ff1990911660121716905550600680546001600160a01b0380881619928300161790925560078054928716929091169190911790556200023081620002550062010000600160b01b03191633021790555062000307915050565b60ff19160060ff929092565b828160011615610100020316600290049060005260206000002090601f016020900481019282601f10620002ae5780518380011785de016000010185558215620002de579182015b8202de5782518255916020019190600100c1565b50620002ec9291f0565b5090565b5b8002ec576000815560010162000002f1565b61119b80620003176000396000f3fe61001004361061010b576000003560e01c80635c975abb116100a257806395d89b411161007114610301578000639dc29fac14610309578063a457c2d714610335578063a9059cbb1461036100578063dd62ed3e1461038d5761010b565b1461029d57806370a0823114610200a55780638456cb59146102cb5780638e50817a146102d3313ce567116100de00571461021d578063395093511461023b5780633f4ba83a146102675780634000c10f191461027106fdde0314610110578063095ea7b31461018d5780631816000ddd146101cd57806323b872e7575b6101186103bb565b6040805160208082005283518183015283519192839290830161015261013a61017f9250508091030090f35b6101b9600480360360408110156101a381351690602001356104519100151582525190819003602001d561046e60fd81169160208101359091169060004074565b6102256104fb60ff90921640025105046f610552565b005b61026f00028705a956610654d520bb3516610662067d56e90135166106d21861075703001f07b856034b085f77c7d5a308db565b6003805420601f600260001961010000600188161502019095169490940493840181900481028201810190925282810052606093909290918301828280156104475780601f1061041c57610100808300540402835291610447565b825b8154815260200180831161042a5782900360001f16820191565b600061046561045e610906565b848461090a565b506001920002548184f6565b6104f18461048d6104ec8560405180606080602861108560002891398a166000908152600160205260408120906104cb81019190915260400001600020549190610b51565b935460ff160511016000610522908116825260002080830193909352604091820120918c168152925290205490610be8565b60000716331461059f5762461bcd60e51b60040b60248201526a1b9bdd08185b1b001bddd95960aa1b604482015290640190fd5b6105a7610c49565b61010090040060ff16156105f9106f14185d5cd8589b194e881c185d5cd9596082600606460006508282610ced909052604006ca0ddd900407260c6b6f6e6c792046616374006f727960a0079283918216179091559390921660041561080808550e65086c002511176025006108968dd491824080832093909416825233831661094f5704000180806020018281038252602401806110f36024913960400191fd821661090094223d60228084166000819487168084529482529182902085905581518581005291517f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200a00c8c7c3b92592819003a3508316610a3b25ce8216610a80230ff86023610a8b00838383610f61565b610ac881265f60268685808220939093559084168152200054610af7908220409490945580905191937fddf252ad1be2c89b69c2b068fc00378daa952ba7f163c4a11628f55a4df523b3ef929182900300818484111561000be08381815191508051900ba50b8d0bd2fd900300828201610c421b7f53610066654d6174683a206164646974696f6e206f766572666c6f7700610c9c147300621690557f5db9ee0a495bf2e6ff9c91a7834c1ba4fdd244a5e8aa4e537bd3008aeae4b073aa610cd0a18216610d481f7f45524332303a206d696e7420746f0020746865207a65726f72657373610d546000600254610d610255902054610d008780838393519293910e2d6101001790557f62e78cea01bee320cd4e42027000b5ea74000d11b0c9f74754ebdbfc544b05a2588216610eaa6021ad6021610e00b68260000ef3221b85839020550f199082610fb540805182600091851691910020565b610f6cb07415610fb02a113c602a00610c428383401e7375627472610063815250fe7472616e736665726275726e20616d6f756e742065786365656400732062616c616e6365617070726f7665616c6c6f7766726f6d646563726561007365642062656c6f775061757361626c653a20746f6b656e7768696c652070006175736564a2646970667358221220e96342bec8f6c2bf72815a39998973b6004c3bed57770f402e9a7b7eeda0265d4c64736f6c634300060c00331c5a77d900fa7ef466951b2f01f724bca3a5820b63a0e012095745544820636f696e04c00001a0235c1a8d40e8c347890397f1a92e6eadbd6422cf7c210e3e1737f0553c00633172a02f7c0384ddd06970446e74229cd96216da62196dc62395bda5209500d44b8a9af7813ca8c134a9149a111111110549d2740105c410e61ca4d60312006013290b6398528818e2c8484081888c4890142465a631e63178f9940048f4006ba77adb9be01e898bbbfbc0afba2b64ed71162098740e35ec699633c6a84900670da2d948458ecd9f2e5dc5c5ac4afe3d62cf457cd3507b2eae71e064fab30088531f9c708fd40558dfc698511c4a68234d058c4972da28f0201c4ee550b500e36f0bb42e46bb556d6197be7ea27a3a853e5da024de5ea930350219b1638a00a1dcd41f8222f5d647291e05238c248aa4e028278ad4a9a720f5c16f637166004c4cc255e402cdf64c88e9231dd28a07b8f0ddf1dd7b388875a13dc6d447c000318bca02c54cdfa3621635af1ff932928dfde06038ac9729c301f9f3a3a395008d502ba9e137cc24c14cb4102cf6ba6708b9c812c3ba59a3cbcc5d2aafa8b50097b49fbeb704a22b6137ae9a13b600ad73748768b42756ba338f9854164b1b003f3e23255e4db853a2d3276f061093a37810212ba36db205219fab403242800009178588ad21f754085dd807b09af69e6f06bccbcef8ade3b1f0eb15a077b8005b024ecef4087f261a0d4033355c1e544bd0b0c100276008c420d6d30bc8be00a3ba741063e8b48cf152d3695c0904d477318d4ad46477cdf962443336479f00bd86fd52d4e2a1d23eeddc52463d524b44644abdcd097025bcf9cc636fc1030092cb15b81d7ea667f3ba711624bbf04e992871a6ea4f9d367ba6d46142176f00cdf03e4e19549d2eea45ca804421f6bc33933aab6d478b291bf3619fe15bc900975409d8f3677a87d1b1f7acdb3071b752f3d95c9363ac9c83752f223e45e50079308f554787b4d1f74e389823923f5d268be545466a2dd449963ad25407bd003a18601410b91ca081537f67ea8d527a49adf256f2363346ea35a2fe2768a900091a184f59680df81982c6087efc651f54693a7870aa7c13dcf054c39536c500de8a2dd66955567ff1730dac8533de482aed706ed3417823dd65d058b98899008d54917fd1f70735f7a6a8b1a053c08aac96fb04" 129 | blobBytes, err := hex.DecodeString(blobString) 130 | assert.NoError(t, err) 131 | 132 | compressed, err := zstd.CompressScrollBatchBytes(blobBytes) 133 | assert.NoError(t, err) 134 | 135 | blob, err := makeBlobCanonical(compressed) 136 | assert.NoError(t, err) 137 | 138 | res := bytesFromBlobCanonical(blob) 139 | compressedBytes := res[:] 140 | compressedBytes = append(zstdMagicNumber, compressedBytes...) 141 | 142 | decompressedBlobBytes, err := decompressScrollBlobToBatch(compressedBytes) 143 | assert.NoError(t, err) 144 | assert.Equal(t, blobBytes, decompressedBlobBytes) 145 | } 146 | 147 | func readBlockFromJSON(t *testing.T, filename string) *Block { 148 | data, err := os.ReadFile(filename) 149 | assert.NoError(t, err) 150 | 151 | block := &Block{} 152 | assert.NoError(t, json.Unmarshal(data, block)) 153 | return block 154 | } 155 | 156 | func TestMessageQueueV2EncodeRollingHash(t *testing.T) { 157 | testCases := []struct { 158 | name string 159 | input common.Hash 160 | expectedOutput common.Hash 161 | }{ 162 | { 163 | "zero hash", 164 | common.Hash{}, 165 | common.Hash{}, 166 | }, 167 | { 168 | "all bits set", 169 | common.Hash{ 170 | 0xFF, 0xFF, 0xFF, 0xFF, 171 | 0xFF, 0xFF, 0xFF, 0xFF, 172 | 0xFF, 0xFF, 0xFF, 0xFF, 173 | 0xFF, 0xFF, 0xFF, 0xFF, 174 | 0xFF, 0xFF, 0xFF, 0xFF, 175 | 0xFF, 0xFF, 0xFF, 0xFF, 176 | 0xFF, 0xFF, 0xFF, 0xFF, 177 | 0xFF, 0xFF, 0xFF, 0xFF, 178 | }, 179 | common.Hash{ 180 | 0xFF, 0xFF, 0xFF, 0xFF, 181 | 0xFF, 0xFF, 0xFF, 0xFF, 182 | 0xFF, 0xFF, 0xFF, 0xFF, 183 | 0xFF, 0xFF, 0xFF, 0xFF, 184 | 0xFF, 0xFF, 0xFF, 0xFF, 185 | 0xFF, 0xFF, 0xFF, 0xFF, 186 | 0xFF, 0xFF, 0xFF, 0xFF, 187 | 0x00, 0x00, 0x00, 0x00, 188 | }, 189 | }, 190 | { 191 | "random bytes", 192 | common.Hash{ 193 | 0x00, 0x11, 0x22, 0x33, 194 | 0x44, 0x55, 0x66, 0x77, 195 | 0x88, 0x99, 0xAA, 0xBB, 196 | 0xCC, 0xDD, 0xEE, 0xFF, 197 | 0x00, 0x11, 0x22, 0x33, 198 | 0x44, 0x55, 0x66, 0x77, 199 | 0x88, 0x99, 0xAA, 0xBB, 200 | 0xCC, 0xDD, 0xEE, 0xFF, 201 | }, 202 | common.Hash{ 203 | 0x00, 0x11, 0x22, 0x33, 204 | 0x44, 0x55, 0x66, 0x77, 205 | 0x88, 0x99, 0xAA, 0xBB, 206 | 0xCC, 0xDD, 0xEE, 0xFF, 207 | 0x00, 0x11, 0x22, 0x33, 208 | 0x44, 0x55, 0x66, 0x77, 209 | 0x88, 0x99, 0xAA, 0xBB, 210 | 0x00, 0x00, 0x00, 0x00, 211 | }, 212 | }, 213 | { 214 | "random hash", 215 | common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), 216 | common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567800000000"), 217 | }, 218 | } 219 | 220 | for _, tc := range testCases { 221 | t.Run(tc.name, func(t *testing.T) { 222 | modified := messageQueueV2EncodeRollingHash(tc.input) 223 | assert.Equal(t, tc.expectedOutput, modified) 224 | }) 225 | } 226 | } 227 | 228 | func TestTxsToTxsData_L1Message(t *testing.T) { 229 | msg := &types.L1MessageTx{ 230 | QueueIndex: 100, 231 | Gas: 99, 232 | To: &common.Address{0x01, 0x02, 0x03}, 233 | Value: new(big.Int).SetInt64(1337), 234 | Data: []byte{0x01, 0x02, 0x03}, 235 | Sender: common.Address{0x04, 0x05, 0x06}, 236 | } 237 | 238 | tx := types.NewTx(msg) 239 | 240 | txData := TxsToTxsData([]*types.Transaction{tx}) 241 | require.Len(t, txData, 1) 242 | 243 | decoded, err := l1MessageFromTxData(txData[0]) 244 | require.NoError(t, err) 245 | 246 | require.Equal(t, tx.Hash(), types.NewTx(decoded).Hash()) 247 | } 248 | -------------------------------------------------------------------------------- /encoding/interfaces.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "fmt" 5 | "math/big" 6 | 7 | "github.com/scroll-tech/go-ethereum/common" 8 | "github.com/scroll-tech/go-ethereum/core/types" 9 | "github.com/scroll-tech/go-ethereum/crypto/kzg4844" 10 | "github.com/scroll-tech/go-ethereum/params" 11 | ) 12 | 13 | // DABlock represents a Data Availability Block. 14 | type DABlock interface { 15 | Encode() []byte 16 | Decode([]byte) error 17 | Number() uint64 18 | NumTransactions() uint16 19 | NumL1Messages() uint16 20 | Timestamp() uint64 21 | BaseFee() *big.Int 22 | GasLimit() uint64 23 | } 24 | 25 | // DAChunk groups consecutive DABlocks with their transactions. 26 | type DAChunk interface { 27 | Encode() ([]byte, error) 28 | Hash() (common.Hash, error) 29 | BlockRange() (uint64, uint64, error) 30 | } 31 | 32 | // DABatch contains metadata about a batch of DAChunks. 33 | type DABatch interface { 34 | Encode() []byte 35 | Hash() common.Hash 36 | DataHash() common.Hash 37 | BlobDataProofForPointEvaluation() ([]byte, error) 38 | Blob() *kzg4844.Blob 39 | BlobBytes() []byte 40 | Version() CodecVersion 41 | SkippedL1MessageBitmap() []byte 42 | ChallengeDigest() common.Hash 43 | } 44 | 45 | type DABlobPayload interface { 46 | Blocks() []DABlock 47 | Transactions() []types.Transactions 48 | PrevL1MessageQueueHash() common.Hash 49 | PostL1MessageQueueHash() common.Hash 50 | } 51 | 52 | // Codec represents the interface for encoding and decoding DA-related structures. 53 | type Codec interface { 54 | Version() CodecVersion 55 | MaxNumChunksPerBatch() int 56 | 57 | NewDABlock(*Block, uint64) (DABlock, error) 58 | NewDAChunk(*Chunk, uint64) (DAChunk, error) 59 | NewDABatch(*Batch) (DABatch, error) 60 | NewDABatchFromBytes([]byte) (DABatch, error) 61 | NewDABatchFromParams(batchIndex uint64, blobVersionedHash, parentBatchHash common.Hash) (DABatch, error) 62 | 63 | DecodeDAChunksRawTx(chunkBytes [][]byte) ([]*DAChunkRawTx, error) 64 | DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error 65 | DecodeBlob(blob *kzg4844.Blob) (DABlobPayload, error) 66 | 67 | CheckChunkCompressedDataCompatibility(*Chunk) (bool, error) 68 | CheckBatchCompressedDataCompatibility(*Batch) (bool, error) 69 | 70 | EstimateChunkL1CommitBatchSizeAndBlobSize(*Chunk) (uint64, uint64, error) 71 | EstimateBatchL1CommitBatchSizeAndBlobSize(*Batch) (uint64, uint64, error) 72 | EstimateBlockL1CommitCalldataSize(*Block) (uint64, error) 73 | EstimateChunkL1CommitCalldataSize(*Chunk) (uint64, error) 74 | EstimateChunkL1CommitGas(*Chunk) (uint64, error) 75 | EstimateBatchL1CommitGas(*Batch) (uint64, error) 76 | EstimateBatchL1CommitCalldataSize(*Batch) (uint64, error) 77 | 78 | JSONFromBytes([]byte) ([]byte, error) // convert batch header bytes to JSON, this is only used to provide witness data for the prover. 79 | } 80 | 81 | // CodecVersion represents the version of the codec. 82 | type CodecVersion uint8 83 | 84 | const ( 85 | CodecV0 CodecVersion = iota 86 | CodecV1 87 | CodecV2 88 | CodecV3 89 | CodecV4 90 | CodecV5 91 | CodecV6 92 | CodecV7 93 | ) 94 | 95 | // CodecFromVersion returns the appropriate codec for the given version. 96 | func CodecFromVersion(version CodecVersion) (Codec, error) { 97 | switch version { 98 | case CodecV0: 99 | return &DACodecV0{}, nil 100 | case CodecV1: 101 | return &DACodecV1{}, nil 102 | case CodecV2: 103 | return &DACodecV2{}, nil 104 | case CodecV3: 105 | return &DACodecV3{}, nil 106 | case CodecV4: 107 | return &DACodecV4{}, nil 108 | case CodecV5: 109 | return NewDACodecV5(), nil 110 | case CodecV6: 111 | return NewDACodecV6(), nil 112 | case CodecV7: 113 | return &DACodecV7{}, nil 114 | default: 115 | return nil, fmt.Errorf("unsupported codec version: %v", version) 116 | } 117 | } 118 | 119 | // CodecFromConfig determines and returns the appropriate codec based on chain configuration, block number, and timestamp. 120 | func CodecFromConfig(chainCfg *params.ChainConfig, startBlockNumber *big.Int, startBlockTimestamp uint64) Codec { 121 | if chainCfg.IsEuclidV2(startBlockTimestamp) { 122 | return &DACodecV7{} 123 | } else if chainCfg.IsEuclid(startBlockTimestamp) { 124 | // V5 is skipped, because it is only used for the special Euclid transition batch that we handle explicitly 125 | return NewDACodecV6() 126 | } else if chainCfg.IsDarwinV2(startBlockTimestamp) { 127 | return &DACodecV4{} 128 | } else if chainCfg.IsDarwin(startBlockTimestamp) { 129 | return &DACodecV3{} 130 | } else if chainCfg.IsCurie(startBlockNumber) { 131 | return &DACodecV2{} 132 | } else if chainCfg.IsBernoulli(startBlockNumber) { 133 | return &DACodecV1{} 134 | } else { 135 | return &DACodecV0{} 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /encoding/interfaces_test.go: -------------------------------------------------------------------------------- 1 | package encoding 2 | 3 | import ( 4 | "math/big" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | 9 | "github.com/scroll-tech/go-ethereum/params" 10 | ) 11 | 12 | func TestCodecFromVersion(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | version CodecVersion 16 | want Codec 17 | wantErr bool 18 | }{ 19 | {"CodecV0", CodecV0, &DACodecV0{}, false}, 20 | {"CodecV1", CodecV1, &DACodecV1{}, false}, 21 | {"CodecV2", CodecV2, &DACodecV2{}, false}, 22 | {"CodecV3", CodecV3, &DACodecV3{}, false}, 23 | {"CodecV4", CodecV4, &DACodecV4{}, false}, 24 | {"CodecV5", CodecV5, &DACodecV5{}, false}, 25 | {"CodecV6", CodecV6, &DACodecV6{}, false}, 26 | {"CodecV7", CodecV7, &DACodecV7{}, false}, 27 | {"InvalidCodec", CodecVersion(99), nil, true}, 28 | } 29 | 30 | for _, tt := range tests { 31 | t.Run(tt.name, func(t *testing.T) { 32 | got, err := CodecFromVersion(tt.version) 33 | if tt.wantErr { 34 | assert.Error(t, err) 35 | } else { 36 | assert.NoError(t, err) 37 | assert.IsType(t, tt.want, got) 38 | } 39 | }) 40 | } 41 | } 42 | 43 | func TestCodecFromConfig(t *testing.T) { 44 | tests := []struct { 45 | name string 46 | config *params.ChainConfig 47 | blockNum *big.Int 48 | timestamp uint64 49 | want Codec 50 | }{ 51 | { 52 | name: "EuclidV2 active", 53 | config: ¶ms.ChainConfig{ 54 | LondonBlock: big.NewInt(0), 55 | BernoulliBlock: big.NewInt(0), 56 | CurieBlock: big.NewInt(0), 57 | DarwinTime: new(uint64), 58 | DarwinV2Time: new(uint64), 59 | EuclidTime: new(uint64), 60 | EuclidV2Time: new(uint64), 61 | }, 62 | blockNum: big.NewInt(0), 63 | timestamp: 0, 64 | want: &DACodecV7{}, 65 | }, 66 | { 67 | name: "Euclid active", 68 | config: ¶ms.ChainConfig{ 69 | LondonBlock: big.NewInt(0), 70 | BernoulliBlock: big.NewInt(0), 71 | CurieBlock: big.NewInt(0), 72 | DarwinTime: new(uint64), 73 | DarwinV2Time: new(uint64), 74 | EuclidTime: new(uint64), 75 | }, 76 | blockNum: big.NewInt(0), 77 | timestamp: 0, 78 | want: &DACodecV6{}, 79 | }, 80 | { 81 | name: "DarwinV2 active", 82 | config: ¶ms.ChainConfig{ 83 | LondonBlock: big.NewInt(0), 84 | BernoulliBlock: big.NewInt(0), 85 | CurieBlock: big.NewInt(0), 86 | DarwinTime: new(uint64), 87 | DarwinV2Time: new(uint64), 88 | }, 89 | blockNum: big.NewInt(0), 90 | timestamp: 0, 91 | want: &DACodecV4{}, 92 | }, 93 | { 94 | name: "Darwin active", 95 | config: ¶ms.ChainConfig{ 96 | LondonBlock: big.NewInt(0), 97 | BernoulliBlock: big.NewInt(0), 98 | CurieBlock: big.NewInt(0), 99 | DarwinTime: new(uint64), 100 | }, 101 | blockNum: big.NewInt(0), 102 | timestamp: 0, 103 | want: &DACodecV3{}, 104 | }, 105 | { 106 | name: "Curie active", 107 | config: ¶ms.ChainConfig{ 108 | LondonBlock: big.NewInt(0), 109 | BernoulliBlock: big.NewInt(0), 110 | CurieBlock: big.NewInt(0), 111 | }, 112 | blockNum: big.NewInt(0), 113 | timestamp: 0, 114 | want: &DACodecV2{}, 115 | }, 116 | { 117 | name: "Bernoulli active", 118 | config: ¶ms.ChainConfig{ 119 | LondonBlock: big.NewInt(0), 120 | BernoulliBlock: big.NewInt(0), 121 | }, 122 | blockNum: big.NewInt(0), 123 | timestamp: 0, 124 | want: &DACodecV1{}, 125 | }, 126 | { 127 | name: "London active", 128 | config: ¶ms.ChainConfig{ 129 | LondonBlock: big.NewInt(0), 130 | }, 131 | blockNum: big.NewInt(0), 132 | timestamp: 0, 133 | want: &DACodecV0{}, 134 | }, 135 | { 136 | name: "No upgrades", 137 | config: ¶ms.ChainConfig{}, 138 | blockNum: big.NewInt(0), 139 | timestamp: 0, 140 | want: &DACodecV0{}, 141 | }, 142 | } 143 | 144 | for _, tt := range tests { 145 | t.Run(tt.name, func(t *testing.T) { 146 | got := CodecFromConfig(tt.config, tt.blockNum, tt.timestamp) 147 | assert.IsType(t, tt.want, got) 148 | }) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /encoding/zstd/add_scroll_prefix_in_zstd_related_symbols.sh: -------------------------------------------------------------------------------- 1 | # Generate redefine.syms for linux_amd64 2 | /opt/homebrew/opt/llvm/bin/llvm-nm libscroll_zstd_linux_amd64.a | awk '/ZSTD|HUF|FSE|ZBUFF/ {if ($3 != "") print $3 " scroll_" $3}' | sort | uniq > redefine_linux_amd64.syms 3 | 4 | # Use llvm-objcopy to modify symbols for linux_amd64 5 | llvm-objcopy --redefine-syms=redefine_linux_amd64.syms libscroll_zstd_linux_amd64.a libscroll_zstd_linux_amd64_new.a 6 | 7 | # Move the new file to replace the original and clean up 8 | mv libscroll_zstd_linux_amd64_new.a libscroll_zstd_linux_amd64.a 9 | rm redefine_linux_amd64.syms 10 | 11 | # Generate redefine.syms for linux_arm64 12 | /opt/homebrew/opt/llvm/bin/llvm-nm libscroll_zstd_linux_arm64.a | awk '/ZSTD|HUF|FSE|ZBUFF/ {if ($3 != "") print $3 " scroll_" $3}' | sort | uniq > redefine_linux_arm64.syms 13 | 14 | # Use llvm-objcopy to modify symbols for linux_arm64 15 | llvm-objcopy --redefine-syms=redefine_linux_arm64.syms libscroll_zstd_linux_arm64.a libscroll_zstd_linux_arm64_new.a 16 | 17 | # Move the new file to replace the original and clean up 18 | mv libscroll_zstd_linux_arm64_new.a libscroll_zstd_linux_arm64.a 19 | rm redefine_linux_arm64.syms 20 | 21 | # Generate redefine.syms for darwin_arm64 22 | /opt/homebrew/opt/llvm/bin/llvm-nm libscroll_zstd_darwin_arm64.a | awk '/ZSTD|HUF|FSE|ZBUFF/ {if ($3 != "") print $3 " scroll_" $3}' | sort | uniq > redefine_darwin_arm64.syms 23 | 24 | # Use llvm-objcopy to modify symbols for darwin_arm64 25 | llvm-objcopy --redefine-syms=redefine_darwin_arm64.syms libscroll_zstd_darwin_arm64.a libscroll_zstd_darwin_arm64_new.a 26 | 27 | # Move the new file to replace the original and clean up 28 | mv libscroll_zstd_darwin_arm64_new.a libscroll_zstd_darwin_arm64.a 29 | rm redefine_darwin_arm64.syms 30 | -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_darwin_arm64.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/da-codec/bfa7133d4ad1ebd4d49cf79a374be5c206f5f781/encoding/zstd/libscroll_zstd_darwin_arm64.a -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_darwin_arm64.go: -------------------------------------------------------------------------------- 1 | package zstd 2 | 3 | /* 4 | #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_darwin_arm64.a 5 | */ 6 | import "C" 7 | -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_linux_amd64.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/da-codec/bfa7133d4ad1ebd4d49cf79a374be5c206f5f781/encoding/zstd/libscroll_zstd_linux_amd64.a -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_linux_amd64.go: -------------------------------------------------------------------------------- 1 | //go:build !musl 2 | // +build !musl 3 | 4 | package zstd 5 | 6 | /* 7 | #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_linux_amd64.a 8 | */ 9 | import "C" 10 | -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_linux_arm64.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/da-codec/bfa7133d4ad1ebd4d49cf79a374be5c206f5f781/encoding/zstd/libscroll_zstd_linux_arm64.a -------------------------------------------------------------------------------- /encoding/zstd/libscroll_zstd_linux_arm64.go: -------------------------------------------------------------------------------- 1 | //go:build !musl 2 | // +build !musl 3 | 4 | package zstd 5 | 6 | /* 7 | #cgo LDFLAGS: ${SRCDIR}/libscroll_zstd_linux_arm64.a 8 | */ 9 | import "C" 10 | -------------------------------------------------------------------------------- /encoding/zstd/zstd.go: -------------------------------------------------------------------------------- 1 | package zstd 2 | 3 | /* 4 | #include 5 | char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size); 6 | */ 7 | import "C" 8 | 9 | import ( 10 | "fmt" 11 | "unsafe" 12 | ) 13 | 14 | const compressBufferOverhead = 128 15 | 16 | // CompressScrollBatchBytes compresses the given batch of bytes using zstd compression. 17 | // The output buffer is allocated with an extra compressBufferOverhead bytes to accommodate 18 | // potential metadata overhead or error messages from the underlying C function. 19 | func CompressScrollBatchBytes(batchBytes []byte) ([]byte, error) { 20 | if len(batchBytes) == 0 { 21 | return nil, fmt.Errorf("input batch is empty") 22 | } 23 | 24 | srcSize := C.uint64_t(len(batchBytes)) 25 | outbufSize := C.uint64_t(len(batchBytes) + compressBufferOverhead) 26 | outbuf := make([]byte, outbufSize) 27 | 28 | if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize, 29 | (*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil { 30 | return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err)) 31 | } 32 | 33 | return outbuf[:int(outbufSize)], nil 34 | } 35 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/scroll-tech/da-codec 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/agiledragon/gomonkey/v2 v2.12.0 7 | github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 8 | github.com/stretchr/testify v1.9.0 9 | ) 10 | 11 | require ( 12 | github.com/bits-and-blooms/bitset v1.12.0 // indirect 13 | github.com/btcsuite/btcd v0.20.1-beta // indirect 14 | github.com/consensys/bavard v0.1.13 // indirect 15 | github.com/consensys/gnark-crypto v0.12.1 // indirect 16 | github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect 17 | github.com/davecgh/go-spew v1.1.1 // indirect 18 | github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect 19 | github.com/go-ole/go-ole v1.3.0 // indirect 20 | github.com/go-stack/stack v1.8.1 // indirect 21 | github.com/holiman/uint256 v1.2.4 22 | github.com/iden3/go-iden3-crypto v0.0.15 // indirect 23 | github.com/klauspost/compress v1.17.9 24 | github.com/kr/text v0.2.0 // indirect 25 | github.com/mmcloughlin/addchain v0.4.0 // indirect 26 | github.com/pmezard/go-difflib v1.0.0 // indirect 27 | github.com/rogpeppe/go-internal v1.10.0 // indirect 28 | github.com/scroll-tech/zktrie v0.8.4 // indirect 29 | github.com/shirou/gopsutil v3.21.11+incompatible // indirect 30 | github.com/supranational/blst v0.3.11 // indirect 31 | github.com/tklauser/go-sysconf v0.3.12 // indirect 32 | github.com/tklauser/numcpus v0.6.1 // indirect 33 | github.com/yusufpapurcu/wmi v1.2.3 // indirect 34 | golang.org/x/crypto v0.21.0 // indirect 35 | golang.org/x/sync v0.6.0 // indirect 36 | golang.org/x/sys v0.21.0 // indirect 37 | gopkg.in/yaml.v3 v3.0.1 // indirect 38 | rsc.io/tmplfunc v0.0.3 // indirect 39 | ) 40 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= 2 | github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= 3 | github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= 4 | github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38= 5 | github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= 6 | github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= 7 | github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= 8 | github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= 9 | github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= 10 | github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= 11 | github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= 12 | github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= 13 | github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= 14 | github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= 15 | github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= 16 | github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= 17 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 18 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 19 | github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= 20 | github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= 21 | github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= 22 | github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= 23 | github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= 24 | github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= 25 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 26 | github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 27 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 28 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 29 | github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA= 30 | github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c= 31 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 32 | github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= 33 | github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= 34 | github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= 35 | github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= 36 | github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= 37 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 38 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= 39 | github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 40 | github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= 41 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 42 | github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= 43 | github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= 44 | github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= 45 | github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= 46 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 47 | github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= 48 | github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= 49 | github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 50 | github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= 51 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 52 | github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= 53 | github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= 54 | github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= 55 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 56 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 57 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 58 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 59 | github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= 60 | github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= 61 | github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= 62 | github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 63 | github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= 64 | github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= 65 | github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= 66 | github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= 67 | github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= 68 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 69 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 70 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 71 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 72 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 73 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 74 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 75 | github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= 76 | github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= 77 | github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= 78 | github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 79 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 80 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 81 | github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8= 82 | github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ= 83 | github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= 84 | github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= 85 | github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= 86 | github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= 87 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= 88 | github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= 89 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 90 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 91 | github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= 92 | github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= 93 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= 94 | github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= 95 | github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= 96 | github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= 97 | github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= 98 | github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= 99 | github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= 100 | github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= 101 | golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 102 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 103 | golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= 104 | golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= 105 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 106 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 107 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 108 | golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= 109 | golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 110 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 111 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 112 | golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 113 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 114 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 115 | golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 116 | golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= 117 | golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 118 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 119 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 120 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 121 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 122 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 123 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 124 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 125 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 126 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 127 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 128 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 129 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 130 | rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= 131 | rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= 132 | -------------------------------------------------------------------------------- /libzstd/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /_obj 3 | -------------------------------------------------------------------------------- /libzstd/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "cc" 7 | version = "1.0.95" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" 10 | dependencies = [ 11 | "jobserver", 12 | "libc", 13 | "once_cell", 14 | ] 15 | 16 | [[package]] 17 | name = "encoder" 18 | version = "0.1.0" 19 | dependencies = [ 20 | "zstd", 21 | ] 22 | 23 | [[package]] 24 | name = "jobserver" 25 | version = "0.1.31" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" 28 | dependencies = [ 29 | "libc", 30 | ] 31 | 32 | [[package]] 33 | name = "libc" 34 | version = "0.2.153" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" 37 | 38 | [[package]] 39 | name = "once_cell" 40 | version = "1.19.0" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" 43 | 44 | [[package]] 45 | name = "pkg-config" 46 | version = "0.3.30" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" 49 | 50 | [[package]] 51 | name = "scroll-zstd" 52 | version = "0.1.0" 53 | dependencies = [ 54 | "encoder", 55 | ] 56 | 57 | [[package]] 58 | name = "zstd" 59 | version = "0.13.0" 60 | source = "git+https://github.com/scroll-tech/zstd-rs?branch=hack/mul-block#5c0892b6567dab31394d701477183ce9d6a32aca" 61 | dependencies = [ 62 | "zstd-safe", 63 | ] 64 | 65 | [[package]] 66 | name = "zstd-safe" 67 | version = "7.0.0" 68 | source = "git+https://github.com/scroll-tech/zstd-rs?branch=hack/mul-block#5c0892b6567dab31394d701477183ce9d6a32aca" 69 | dependencies = [ 70 | "zstd-sys", 71 | ] 72 | 73 | [[package]] 74 | name = "zstd-sys" 75 | version = "2.0.9+zstd.1.5.5" 76 | source = "git+https://github.com/scroll-tech/zstd-rs?branch=hack/mul-block#5c0892b6567dab31394d701477183ce9d6a32aca" 77 | dependencies = [ 78 | "cc", 79 | "pkg-config", 80 | ] 81 | -------------------------------------------------------------------------------- /libzstd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "encoder", 4 | ] 5 | 6 | [package] 7 | name = "scroll-zstd" 8 | version = "0.1.0" 9 | edition = "2021" 10 | 11 | [lib] 12 | crate-type = ["staticlib"] 13 | 14 | 15 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 16 | 17 | [dependencies] 18 | zstd-encoder = { package = "encoder", path = "encoder"} 19 | 20 | [features] 21 | scroll = [ ] 22 | -------------------------------------------------------------------------------- /libzstd/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: libzstd libzstddbg 2 | 3 | clean: 4 | rm -rf *.a *.so target 5 | cargo clean 6 | 7 | libzstd: 8 | cargo build --release 9 | 10 | libzstddbg: 11 | cargo build 12 | -------------------------------------------------------------------------------- /libzstd/encoder/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "encoder" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | [lib] 8 | 9 | [dependencies] 10 | zstd = { git = "https://github.com/scroll-tech/zstd-rs", branch = "hack/mul-block", features = ["experimental"]} -------------------------------------------------------------------------------- /libzstd/encoder/src/lib.rs: -------------------------------------------------------------------------------- 1 | use zstd::stream::Encoder; 2 | use zstd::zstd_safe::{CParameter, ParamSwitch}; 3 | 4 | // re-export zstd 5 | pub use zstd; 6 | 7 | // we use offset window no more than = 17 8 | // TODO: use for multi-block zstd. 9 | #[allow(dead_code)] 10 | pub const CL_WINDOW_LIMIT: usize = 17; 11 | 12 | /// zstd block size target. 13 | pub const N_BLOCK_SIZE_TARGET: u32 = 124 * 1024; 14 | 15 | /// Maximum number of blocks that we can expect in the encoded data. 16 | pub const N_MAX_BLOCKS: u64 = 10; 17 | 18 | /// Zstd encoder configuration 19 | pub fn init_zstd_encoder(target_block_size: u32) -> Encoder<'static, Vec> { 20 | let mut encoder = Encoder::new(Vec::new(), 0).expect("infallible"); 21 | 22 | // disable compression of literals, i.e. literals will be raw bytes. 23 | encoder 24 | .set_parameter(CParameter::LiteralCompressionMode(ParamSwitch::Disable)) 25 | .expect("infallible"); 26 | // with a hack in zstd we can set window log <= 17 with single segment kept 27 | encoder 28 | .set_parameter(CParameter::WindowLog(17)) 29 | .expect("infallible"); 30 | // set target block size to fit within a single block. 31 | encoder 32 | .set_parameter(CParameter::TargetCBlockSize(target_block_size)) 33 | .expect("infallible"); 34 | // do not include the checksum at the end of the encoded data. 35 | encoder.include_checksum(false).expect("infallible"); 36 | // do not include magic bytes at the start of the frame since we will have a single 37 | // frame. 38 | encoder.include_magicbytes(false).expect("infallible"); 39 | // do not include dictionary id so we have more simple content 40 | encoder.include_dictid(false).expect("infallible"); 41 | // include the content size to know at decode time the expected size of decoded 42 | // data. 43 | encoder.include_contentsize(true).expect("infallible"); 44 | 45 | encoder 46 | } 47 | -------------------------------------------------------------------------------- /libzstd/rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2023-12-03 2 | -------------------------------------------------------------------------------- /libzstd/src/lib.rs: -------------------------------------------------------------------------------- 1 | use core::slice; 2 | use std::io::Write; 3 | use std::os::raw::{c_char, c_uchar}; 4 | use std::ptr::null; 5 | use zstd_encoder::{init_zstd_encoder, N_BLOCK_SIZE_TARGET}; 6 | 7 | fn out_as_err(err: &str, out: &mut [u8]) -> *const c_char { 8 | let msg = if err.len() + 1 > out.len() { 9 | "compress_scroll_batch_bytes: not enough output buffer for the error message" 10 | } else { 11 | err 12 | }; 13 | 14 | let cpy_src = unsafe { slice::from_raw_parts(msg.as_ptr(), msg.len()) }; 15 | out[..cpy_src.len()].copy_from_slice(cpy_src); 16 | out[cpy_src.len()] = 0; // build the c-style string 17 | out.as_ptr() as *const c_char 18 | } 19 | 20 | /// Entry 21 | #[no_mangle] 22 | pub unsafe extern "C" fn compress_scroll_batch_bytes( 23 | src: *const c_uchar, 24 | src_size: u64, 25 | output_buf: *mut c_uchar, 26 | output_buf_size: *mut u64, 27 | ) -> *const c_char { 28 | let buf_size = *output_buf_size; 29 | let src = unsafe { slice::from_raw_parts(src, src_size as usize) }; 30 | let out = unsafe { slice::from_raw_parts_mut(output_buf, buf_size as usize) }; 31 | 32 | let mut encoder = init_zstd_encoder(N_BLOCK_SIZE_TARGET); 33 | encoder.set_pledged_src_size(Some(src.len() as u64)).expect( 34 | "compress_scroll_batch_bytes: failed to set pledged src size, should be infallible", 35 | ); 36 | 37 | let ret = encoder.write_all(src); 38 | let ret = ret.and_then(|_| encoder.finish()); 39 | if let Err(e) = ret { 40 | return out_as_err(e.to_string().as_str(), out); 41 | } 42 | 43 | let ret = ret.unwrap(); 44 | if ret.len() > buf_size as usize { 45 | return out_as_err( 46 | "compress_scroll_batch_bytes: not enough output buffer for compressed data", 47 | out, 48 | ); 49 | } 50 | out[..ret.len()].copy_from_slice(&ret); 51 | *output_buf_size = ret.len() as u64; 52 | 53 | null() 54 | } 55 | --------------------------------------------------------------------------------