├── .bookignore ├── disperser ├── .gitignore ├── server_config.go ├── encoder │ ├── config.go │ ├── client.go │ └── metrics.go ├── errors.go ├── encoder_client.go ├── signer_client.go ├── db.go ├── batcher │ ├── mock │ │ └── finalizer.go │ ├── slice_signer_test.go │ └── transactor │ │ └── transactor.go ├── api │ ├── proto │ │ ├── encoder │ │ │ └── encoder.proto │ │ ├── signer │ │ │ └── signer.proto │ │ └── retriever │ │ │ └── retriever.proto │ └── grpc │ │ ├── signer │ │ └── signer_grpc.pb.go │ │ ├── encoder │ │ └── encoder_grpc.pb.go │ │ └── retriever │ │ └── retriever_grpc.pb.go ├── cmd │ ├── batcher │ │ ├── Dockerfile │ │ └── config.go │ ├── apiserver │ │ ├── Dockerfile │ │ ├── config.go │ │ ├── main.go │ │ └── flags │ │ │ └── flags.go │ └── combined_server │ │ └── flags │ │ └── flags.go ├── mock │ └── signer.go ├── signer │ ├── client_test.go │ └── client.go ├── leveldb │ └── leveldb.go └── Makefile ├── inabox └── deploy │ ├── codegen │ └── gen.sh │ ├── cmd │ └── main.go │ └── localstack.go ├── .dockerignore ├── .gitbook └── assets │ ├── zg-da-batcher.png │ ├── zg-da-architecture.png │ └── zg-da-encoding-groups.png ├── requirements.txt ├── prometheus.yml ├── tests ├── dep_pip3.sh ├── da_test_framework │ ├── da_node_type.py │ ├── contracts.py │ ├── local_stack.py │ ├── da_signer.py │ ├── da_encoder.py │ ├── da_retriever.py │ ├── da_batcher.py │ ├── blockchain_node.py │ └── da_server.py ├── config │ └── node_config.py ├── utility │ ├── simple_rpc_proxy.py │ └── utils.py └── da_put_get_test.py ├── Dockerfile ├── docs ├── pkg │ ├── README.md │ ├── encoding.md │ └── kzg.md ├── api │ └── README.md ├── glossary.md ├── README.md ├── architecture │ ├── retriever.md │ ├── disperser.md │ ├── batcher.md │ └── README.md ├── security │ └── README.md └── introduction.md ├── .gitignore ├── common ├── abi.go ├── mock │ ├── ratelimiter.go │ ├── rpc_ethclient.go │ ├── workerpool.go │ ├── s3_client.go │ └── logging.go ├── workerpool.go ├── param_store.go ├── rpc_ethclient.go ├── store │ ├── local_store.go │ ├── local_store_test.go │ ├── dynamo_store.go │ └── dynamo_store_test.go ├── healthcheck │ └── server.go ├── storage_node │ └── cli.go ├── common.go ├── pubip │ ├── pubip_test.go │ └── pubip.go ├── ratelimit_test.go ├── logging │ ├── cli.go │ └── logging.go ├── ratelimit │ ├── ratelimit_test.go │ ├── limiter.go │ └── limiter_cli.go ├── aws │ ├── cli.go │ └── dynamodb │ │ ├── utils_test.go │ │ └── utils │ │ └── test_utils.go ├── logging.go ├── ethclient.go ├── geth │ └── cli.go ├── ratelimit.go └── common_test.go ├── .github ├── actions │ ├── setup-rust │ │ └── action.yml │ └── test-coverage │ │ └── action.yml └── workflows │ ├── golangci-lint.yml │ └── tests.yml ├── run ├── start.sh ├── run.sh └── README.md ├── contributing.md ├── combined.Dockerfile ├── .gitmodules ├── api ├── go.mod ├── go.sum ├── proto │ ├── retriever │ │ └── retriever.proto │ └── disperser │ │ └── disperser.proto └── grpc │ └── retriever │ └── retriever_grpc.pb.go ├── .devcontainer ├── install.sh ├── Dockerfile └── devcontainer.json ├── cli ├── config.go └── flags │ └── flags.go ├── trafficgenerator.Dockerfile ├── synthetic-test-client.Dockerfile ├── SUMMARY.md ├── core ├── locate.go ├── encoding.go └── bn254 │ └── attestation.go └── Makefile /.bookignore: -------------------------------------------------------------------------------- 1 | inabox/* 2 | 3 | -------------------------------------------------------------------------------- /disperser/.gitignore: -------------------------------------------------------------------------------- 1 | bin/* 2 | text -------------------------------------------------------------------------------- /inabox/deploy/codegen/gen.sh: -------------------------------------------------------------------------------- 1 | go run . 2 | cd ../ && gofmt -s -w . -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !/Makefile 3 | !/api 4 | !/common 5 | !/core 6 | !/disperser 7 | !/go.mod 8 | !/go.sum 9 | -------------------------------------------------------------------------------- /.gitbook/assets/zg-da-batcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0gfoundation/0g-da-client/HEAD/.gitbook/assets/zg-da-batcher.png -------------------------------------------------------------------------------- /.gitbook/assets/zg-da-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0gfoundation/0g-da-client/HEAD/.gitbook/assets/zg-da-architecture.png -------------------------------------------------------------------------------- /.gitbook/assets/zg-da-encoding-groups.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0gfoundation/0g-da-client/HEAD/.gitbook/assets/zg-da-encoding-groups.png -------------------------------------------------------------------------------- /disperser/server_config.go: -------------------------------------------------------------------------------- 1 | package disperser 2 | 3 | const ( 4 | Localhost = "0.0.0.0" 5 | ) 6 | 7 | type ServerConfig struct { 8 | GrpcPort string 9 | } 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | jsonrpcclient 2 | pyyaml 3 | pysha3 4 | coincurve 5 | eth-utils==1.10.0 6 | py-ecc==5.2.0 7 | web3 8 | eth_tester 9 | rtoml==0.10.0 10 | grpcio 11 | grpcio-tools 12 | -------------------------------------------------------------------------------- /prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | evaluation_interval: 15s 4 | 5 | scrape_configs: 6 | - job_name: "0g-data-avail" 7 | static_configs: 8 | - targets: ["localhost:9100"] 9 | -------------------------------------------------------------------------------- /tests/dep_pip3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pip3 install -r ../requirements.txt 4 | cp ../api/proto/disperser/disperser.proto . 5 | python3 -m grpc_tools.protoc --proto_path=. ./disperser.proto --python_out=. --grpc_python_out=. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y ca-certificates 5 | 6 | VOLUME ["/runtime"] 7 | COPY ./disperser/bin/combined /bin/combined 8 | WORKDIR /runtime 9 | CMD /runtime/run.sh 10 | -------------------------------------------------------------------------------- /disperser/encoder/config.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | const ( 4 | Localhost = "0.0.0.0" 5 | ) 6 | 7 | type ServerConfig struct { 8 | GrpcPort string 9 | MaxConcurrentRequests int 10 | RequestPoolSize int 11 | } 12 | -------------------------------------------------------------------------------- /disperser/errors.go: -------------------------------------------------------------------------------- 1 | package disperser 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrBlobNotFound = errors.New("blob not found") 7 | ErrMemoryDbIsFull = errors.New("memory db is full") 8 | ErrKeyNotFound = errors.New("key not found in db") 9 | ) 10 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_node_type.py: -------------------------------------------------------------------------------- 1 | from enum import Enum, unique 2 | 3 | 4 | @unique 5 | class DANodeType(Enum): 6 | DA_LOCAL_STACK = 3 7 | DA_ENCODER = 4 8 | DA_BATCHER = 5 9 | DA_SERVER = 6 10 | DA_RETRIEVER = 7 11 | DA_SIGNER = 8 12 | -------------------------------------------------------------------------------- /docs/pkg/README.md: -------------------------------------------------------------------------------- 1 | # Dependent Package 2 | 3 | This part contains the dependent packages for the 0G DA protocol. 4 | 5 | * [Encoding](encoding.md): encode the blob sent to the disperser 6 | * [KZG](kzg.md): KZG commitment algorithm to prove a particular blob is in a certain batch 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | inabox/testdata/* 4 | inabox/anvil.pid 5 | 6 | test/testdata/* 7 | inabox/resources/kzg/SRSTables/* 8 | 9 | run/* 10 | !run/start.sh 11 | !run/run.sh 12 | !README.md 13 | 14 | **/bin/* 15 | coverage.* 16 | *log 17 | 18 | *__pycache__* 19 | *tmp* 20 | -------------------------------------------------------------------------------- /common/abi.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | _ "embed" 5 | 6 | "github.com/ethereum/go-ethereum/crypto" 7 | ) 8 | 9 | //go:embed abis/EigenDAServiceManager.json 10 | var ServiceManagerAbi []byte 11 | 12 | var BatchConfirmedEventSigHash = crypto.Keccak256Hash([]byte("BatchConfirmed(bytes32,uint32,uint96)")) 13 | -------------------------------------------------------------------------------- /disperser/encoder_client.go: -------------------------------------------------------------------------------- 1 | package disperser 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | "github.com/0glabs/0g-da-client/core" 8 | ) 9 | 10 | type EncoderClient interface { 11 | EncodeBlob(ctx context.Context, data []byte, log common.Logger) (*core.BlobCommitments, error) 12 | } 13 | -------------------------------------------------------------------------------- /common/mock/ratelimiter.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | ) 8 | 9 | type NoopRatelimiter struct { 10 | } 11 | 12 | func (r *NoopRatelimiter) AllowRequest(ctx context.Context, retrieverID string, blobSize uint, rate common.RateParam) (bool, error) { 13 | return true, nil 14 | } 15 | -------------------------------------------------------------------------------- /.github/actions/setup-rust/action.yml: -------------------------------------------------------------------------------- 1 | name: Setup Rust (cache & toolchain) 2 | runs: 3 | using: composite 4 | steps: 5 | - name: Install toolchain 1.75.0 6 | uses: actions-rs/toolchain@v1 7 | with: 8 | profile: minimal 9 | toolchain: 1.75.0 10 | components: rustfmt, clippy 11 | 12 | - uses: Swatinem/rust-cache@v2 13 | -------------------------------------------------------------------------------- /run/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | IMAGE_NAME="0gclient:latest" 5 | CONTAINER_NAME="daclient" 6 | 7 | HOST_PORT=51001 8 | CONTAINER_PORT=51001 9 | 10 | docker run -d -v .:/runtime --name=$CONTAINER_NAME -p $HOST_PORT:$CONTAINER_PORT --restart=always $IMAGE_NAME 11 | 12 | echo "Checking if container is running..." 13 | docker ps --filter "name=$CONTAINER_NAME" 14 | -------------------------------------------------------------------------------- /common/workerpool.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "context" 4 | 5 | // WorkerPool is an interface for a worker pool taken from "github.com/gammazero/workerpool" 6 | type WorkerPool interface { 7 | Size() int 8 | Stop() 9 | StopWait() 10 | Stopped() bool 11 | Submit(task func()) 12 | SubmitWait(task func()) 13 | WaitingQueueSize() int 14 | Pause(ctx context.Context) 15 | } 16 | -------------------------------------------------------------------------------- /common/param_store.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "context" 4 | 5 | // KVStore is a simple key value store interface. 6 | type KVStore[T any] interface { 7 | // GetItem returns the value associated with a given key. 8 | GetItem(ctx context.Context, key string) (*T, error) 9 | // UpdateItem updates the value for the given key. 10 | UpdateItem(ctx context.Context, key string, value *T) error 11 | } 12 | -------------------------------------------------------------------------------- /disperser/signer_client.go: -------------------------------------------------------------------------------- 1 | package disperser 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | "github.com/0glabs/0g-da-client/core" 8 | pb "github.com/0glabs/0g-da-client/disperser/api/grpc/signer" 9 | ) 10 | 11 | type SignerClient interface { 12 | BatchSign(ctx context.Context, addr string, data []*pb.SignRequest, log common.Logger) ([]*core.Signature, error) 13 | } 14 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ### Why are these changes needed? 4 | 5 | ### Checks 6 | 7 | * [ ] I've made sure the lint is passing in this PR. 8 | * [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, in that case, please comment that they are not relevant. 9 | * [ ] Testing Strategy 10 | * [ ] Unit tests 11 | * [ ] Integration tests 12 | * [ ] This PR is not tested :( 13 | -------------------------------------------------------------------------------- /combined.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.1-alpine3.18 AS builder 2 | 3 | RUN apk add --no-cache make musl-dev linux-headers gcc git jq bash 4 | 5 | # build dispersal api server with local monorepo go modules 6 | WORKDIR / 7 | COPY . 0g-da-client 8 | WORKDIR /0g-da-client 9 | RUN make build 10 | 11 | FROM alpine:3.18 12 | 13 | COPY --from=builder /0g-da-client/disperser/bin/combined /usr/local/bin/combined 14 | 15 | CMD ["combined"] -------------------------------------------------------------------------------- /docs/pkg/encoding.md: -------------------------------------------------------------------------------- 1 | # Encoding 2 | 3 | * Performs Reed Solomon Encoding using elliptic curve points. The library enables KZG multi-proof and reveal in $$O(n log n)$$ time using FFT, based on FK20 algorithm. 4 | * Built upon crypto primitive from [https://pkg.go.dev/github.com/protolambda/go-kzg](https://pkg.go.dev/github.com/protolambda/go-kzg) 5 | * Accepts arbitrary number of systematic nodes, parity nodes and data size, free of restriction on power of 2 6 | -------------------------------------------------------------------------------- /common/rpc_ethclient.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ethereum/go-ethereum/rpc" 7 | ) 8 | 9 | type RPCEthClient interface { 10 | BatchCall(b []rpc.BatchElem) error 11 | BatchCallContext(ctx context.Context, b []rpc.BatchElem) error 12 | Call(result interface{}, method string, args ...interface{}) error 13 | CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error 14 | } 15 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "0g-da-contract"] 2 | path = 0g-da-contract 3 | url = https://github.com/0glabs/0g-da-contract.git 4 | [submodule "0g-da-encoder"] 5 | path = 0g-da-encoder 6 | url = https://github.com/0glabs/0g-da-encoder.git 7 | [submodule "0g-da-signer"] 8 | path = 0g-da-signer 9 | url = https://github.com/0glabs/0g-da-signer.git 10 | [submodule "0g-chain"] 11 | path = 0g-chain 12 | url = https://github.com/0glabs/0g-chain.git 13 | branch = dev 14 | -------------------------------------------------------------------------------- /disperser/db.go: -------------------------------------------------------------------------------- 1 | package disperser 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb/iterator" 5 | ) 6 | 7 | // DB is an interface to access the local database, such as leveldb, rocksdb. 8 | type DB interface { 9 | Put(key []byte, value []byte) error 10 | Get(key []byte) ([]byte, error) 11 | Delete(key []byte) error 12 | DeleteBatch(keys [][]byte) error 13 | WriteBatch(keys, values [][]byte) error 14 | NewIterator(prefix []byte) iterator.Iterator 15 | } 16 | -------------------------------------------------------------------------------- /disperser/batcher/mock/finalizer.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/stretchr/testify/mock" 7 | ) 8 | 9 | type MockFinalizer struct { 10 | mock.Mock 11 | } 12 | 13 | func NewFinalizer() *MockFinalizer { 14 | return &MockFinalizer{} 15 | } 16 | 17 | func (b *MockFinalizer) Start(ctx context.Context) {} 18 | 19 | func (b *MockFinalizer) FinalizeBlobs(ctx context.Context) error { 20 | args := b.Called() 21 | return args.Error(0) 22 | } 23 | -------------------------------------------------------------------------------- /docs/api/README.md: -------------------------------------------------------------------------------- 1 | # gRPC API 2 | 3 | This part contains the API documentation for the gRPC services included in the 0G DA platform. Each markdown file contains the protobuf definitions for each respective service including: 4 | 5 | - [Disperser](disperser.md): the hosted service for users to interact with 0G DA. 6 | - [Retriever](retriever.md): a service that users can run on their own infrastructure, which exposes a gRPC endpoint for retrieval and verification of blobs from 0G Storage nodes. 7 | -------------------------------------------------------------------------------- /tests/da_test_framework/contracts.py: -------------------------------------------------------------------------------- 1 | from os.path import join 2 | from pathlib import Path 3 | import json 4 | from web3 import Web3 5 | 6 | 7 | def load_contract_metadata(base_path: str, name: str): 8 | path = Path(join(base_path, "artifacts")) 9 | try: 10 | found_file = next(path.rglob(f"{name}.json")) 11 | return json.loads(open(found_file, "r").read()) 12 | except StopIteration: 13 | raise Exception(f"Cannot found contract {name}'s metadata") 14 | 15 | -------------------------------------------------------------------------------- /api/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/0glabs/0g-da-client/api 2 | 3 | go 1.21 4 | 5 | toolchain go1.21.1 6 | 7 | require ( 8 | google.golang.org/grpc v1.58.3 9 | google.golang.org/protobuf v1.31.0 10 | ) 11 | 12 | require ( 13 | github.com/golang/protobuf v1.5.3 // indirect 14 | golang.org/x/net v0.12.0 // indirect 15 | golang.org/x/sys v0.10.0 // indirect 16 | golang.org/x/text v0.11.0 // indirect 17 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect 18 | ) 19 | -------------------------------------------------------------------------------- /.devcontainer/install.sh: -------------------------------------------------------------------------------- 1 | # Install foundry 2 | curl -L https://foundry.paradigm.xyz | bash 3 | ~/.foundry/bin/foundryup 4 | 5 | # Install go dependencies 6 | go install github.com/onsi/ginkgo/v2/ginkgo@v2.2.0 7 | go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest 8 | go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 9 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 10 | # go install github.com/mikefarah/yq/v4@latest 11 | 12 | # yarn global add @graphprotocol/graph-cli@0.51.0 -------------------------------------------------------------------------------- /cli/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/cli/flags" 5 | "github.com/0glabs/0g-da-client/common/aws" 6 | "github.com/0glabs/0g-da-client/common/logging" 7 | "github.com/urfave/cli" 8 | ) 9 | 10 | type Config struct { 11 | AwsClientConfig aws.ClientConfig 12 | LoggerConfig logging.Config 13 | } 14 | 15 | func NewConfig(ctx *cli.Context) *Config { 16 | return &Config{ 17 | AwsClientConfig: aws.ReadClientConfig(ctx, flags.FlagPrefix), 18 | LoggerConfig: logging.ReadCLIConfig(ctx, flags.FlagPrefix), 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /trafficgenerator.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.1-alpine3.18 as builder 2 | 3 | RUN apk add --no-cache make musl-dev linux-headers gcc git jq bash 4 | 5 | WORKDIR /app 6 | 7 | # Copy Entire Repo here in order to not copy individual dependencies 8 | COPY . . 9 | 10 | WORKDIR /app/tools/traffic 11 | 12 | RUN --mount=type=cache,target=/go/pkg/mod \ 13 | --mount=type=cache,target=/root/.cache/go-build \ 14 | go build -o ./bin/generator ./cmd 15 | 16 | FROM alpine:3.18 17 | 18 | COPY --from=builder /app/tools/traffic/bin/generator /usr/local/bin 19 | 20 | ENTRYPOINT ["generator"] -------------------------------------------------------------------------------- /disperser/api/proto/encoder/encoder.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/0glabs/0g-da-client/api/grpc/encoder"; 4 | package encoder; 5 | 6 | service Encoder { 7 | rpc EncodeBlob(EncodeBlobRequest) returns (EncodeBlobReply) {} 8 | } 9 | 10 | // EncodeBlobRequest contains data and pre-computed encoding params provided to Encoder 11 | message EncodeBlobRequest { 12 | bytes data = 1; 13 | bool require_data = 2; 14 | } 15 | 16 | // EncodeBlobReply 17 | message EncodeBlobReply { 18 | uint32 version = 1; 19 | bytes erasure_commitment = 2; 20 | bytes storage_root = 3; 21 | bytes encoded_data = 4; 22 | repeated bytes encoded_slice = 5; 23 | } -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | golangci: 12 | name: Linter 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/setup-go@v3 16 | with: 17 | go-version: '1.21' # The Go version to download (if necessary) and use. 18 | - run: go version 19 | 20 | - name: Checkout 0g-data-avail 21 | uses: actions/checkout@v3 22 | 23 | - name: golangci-lint 24 | uses: golangci/golangci-lint-action@v3 25 | with: 26 | version: v1.54 27 | args: --timeout 3m --verbose 28 | -------------------------------------------------------------------------------- /disperser/api/proto/signer/signer.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/0glabs/0g-da-client/api/grpc/signer"; 4 | package signer; 5 | 6 | service Signer { 7 | rpc BatchSign(BatchSignRequest) returns (BatchSignReply) {} 8 | } 9 | 10 | message SignRequest { 11 | uint64 epoch = 1; // epoch number of DASigners internal contract 12 | uint64 quorum_id = 2; // quorum id of DASigners internal contract 13 | bytes erasure_commitment = 3; 14 | bytes storage_root = 4; 15 | repeated bytes encoded_slice = 5; 16 | } 17 | 18 | message BatchSignRequest { 19 | repeated SignRequest requests = 1; 20 | } 21 | 22 | message BatchSignReply { 23 | repeated bytes signatures = 1; 24 | } -------------------------------------------------------------------------------- /disperser/cmd/batcher/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.1-alpine3.18 as builder 2 | 3 | RUN apk add --no-cache make musl-dev linux-headers gcc git jq bash 4 | 5 | # build batcher with local monorepo go modules 6 | COPY ./disperser /app/disperser 7 | COPY common /app/common 8 | COPY core /app/core 9 | COPY api /app/api 10 | COPY contracts /app/contracts 11 | COPY indexer /app/indexer 12 | COPY pkg /app/pkg 13 | COPY go.mod /app 14 | COPY go.sum /app 15 | 16 | WORKDIR /app/disperser 17 | 18 | RUN --mount=type=cache,target=/go/pkg/mod \ 19 | --mount=type=cache,target=/root/.cache/go-build \ 20 | go build -o ./bin/server ./cmd/batcher 21 | 22 | FROM alpine:3.18 23 | 24 | COPY --from=builder /app/disperser/bin/server /usr/local/bin 25 | 26 | ENTRYPOINT ["server"] 27 | -------------------------------------------------------------------------------- /disperser/cmd/apiserver/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.1-alpine3.18 as builder 2 | 3 | RUN apk add --no-cache make musl-dev linux-headers gcc git jq bash 4 | 5 | # build dispersal api server with local monorepo go modules 6 | COPY ./disperser /app/disperser 7 | COPY common /app/common 8 | COPY contracts /app/contracts 9 | COPY core /app/core 10 | COPY api /app/api 11 | COPY indexer /app/indexer 12 | COPY pkg /app/pkg 13 | COPY go.mod /app 14 | COPY go.sum /app 15 | 16 | WORKDIR /app/disperser 17 | 18 | RUN --mount=type=cache,target=/go/pkg/mod \ 19 | --mount=type=cache,target=/root/.cache/go-build \ 20 | go build -o ./bin/server ./cmd/apiserver 21 | 22 | FROM alpine:3.18 23 | 24 | COPY --from=builder /app/disperser/bin/server /usr/local/bin 25 | 26 | ENTRYPOINT ["server"] -------------------------------------------------------------------------------- /synthetic-test-client.Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Go image as the base image 2 | FROM golang:1.21.1-alpine3.18 as builder 3 | 4 | # Copy only the test file and necessary files to the container 5 | COPY ./disperser /app/disperser 6 | COPY ./test/synthetic-test /app 7 | COPY go.mod /app 8 | COPY go.sum /app 9 | COPY api /app/api 10 | COPY clients /app/clients 11 | COPY node /app/node 12 | COPY common /app/common 13 | COPY churner /app/churner 14 | COPY core /app/core 15 | COPY indexer /app/indexer 16 | COPY contracts /app/contracts 17 | COPY pkg /app/pkg 18 | # Set the working directory inside the container 19 | WORKDIR /app 20 | 21 | # TODO eventually this will be replaced with an executable 22 | # Run the Go test command for the specific test file 23 | CMD ["go", "test", "-v", "synthetic_client_test.go"] 24 | -------------------------------------------------------------------------------- /disperser/batcher/slice_signer_test.go: -------------------------------------------------------------------------------- 1 | package batcher 2 | 3 | import ( 4 | "crypto/sha256" 5 | "math/big" 6 | "testing" 7 | 8 | "github.com/0glabs/0g-da-client/core" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestGetHash(t *testing.T) { 13 | dataRoot := sha256.Sum256([]byte("dataRoot")) 14 | epoch := big.NewInt(123) 15 | quorumId := big.NewInt(456) 16 | erasureCommitment := core.NewG1Point(new(big.Int).SetUint64(1), new(big.Int).SetUint64(1)) 17 | 18 | expectedHash := [32]byte{0xde, 0x7b, 0xb4, 0x32, 0xe4, 0xff, 0xf3, 0xff, 0xbd, 0x59, 0x3c, 0x99, 0x6a, 0x9a, 0x60, 0x62, 0x6d, 0x24, 0xa4, 0xaa, 0xc0, 0xa5, 0xd0, 0xbb, 0x49, 0x47, 0x66, 0x48, 0x92, 0x42, 0x91, 0xe} 19 | 20 | resultHash, err := getHash(dataRoot, epoch, quorumId, erasureCommitment) 21 | assert.NoError(t, err) 22 | assert.Equal(t, expectedHash, resultHash, "Hashes should match") 23 | } 24 | -------------------------------------------------------------------------------- /disperser/mock/signer.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | "github.com/0glabs/0g-da-client/core" 8 | "github.com/0glabs/0g-da-client/disperser" 9 | pb "github.com/0glabs/0g-da-client/disperser/api/grpc/signer" 10 | "github.com/stretchr/testify/mock" 11 | ) 12 | 13 | type MockSignerClient struct { 14 | mock.Mock 15 | } 16 | 17 | var _ disperser.SignerClient = (*MockSignerClient)(nil) 18 | 19 | func NewMockSignerClient() *MockSignerClient { 20 | return &MockSignerClient{} 21 | } 22 | 23 | func (m *MockSignerClient) BatchSign(ctx context.Context, addr string, data []*pb.SignRequest, log common.Logger) ([]*core.Signature, error) { 24 | args := m.Called(ctx, addr, data, log) 25 | var signatures []*core.Signature 26 | if args.Get(0) != nil { 27 | signatures = args.Get(0).([]*core.Signature) 28 | } 29 | 30 | return signatures, args.Error(1) 31 | } 32 | -------------------------------------------------------------------------------- /SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | 3 | * [0GDA](README.md) 4 | * [0G DA Spec](docs/README.md) 5 | * [Introduction]() 6 | * [Architecture]() 7 | * [Disperser]() 8 | * [Batcher]() 9 | * [Retriever]() 10 | * [Security Guarantee]() 11 | * [Encoding]() 12 | * [KZG Encoder Backend]() 13 | * [Data Model]() 14 | * [gRPC API]() 15 | * [Disperser API]() 16 | * [Retriever API]() 17 | * [Dependent Package]() 18 | * [Encoding]() 19 | * [KZG and FFT utils]() 20 | * [Glossary]() 21 | * [Contributing](contributing.md) 22 | -------------------------------------------------------------------------------- /common/store/local_store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/0glabs/0g-da-client/common" 8 | lru "github.com/hashicorp/golang-lru/v2" 9 | ) 10 | 11 | type localParamStore[T any] struct { 12 | cache *lru.Cache[string, T] 13 | } 14 | 15 | func NewLocalParamStore[T any](size int) (common.KVStore[T], error) { 16 | cache, err := lru.New[string, T](size) 17 | if err != nil { 18 | return nil, err 19 | } 20 | 21 | return &localParamStore[T]{ 22 | cache: cache, 23 | }, nil 24 | } 25 | 26 | func (s *localParamStore[T]) GetItem(ctx context.Context, key string) (*T, error) { 27 | 28 | obj, ok := s.cache.Get(key) 29 | if !ok { 30 | return nil, errors.New("error retrieving key") 31 | } 32 | 33 | return &obj, nil 34 | 35 | } 36 | 37 | func (s *localParamStore[T]) UpdateItem(ctx context.Context, key string, params *T) error { 38 | 39 | s.cache.Add(key, *params) 40 | 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /docs/glossary.md: -------------------------------------------------------------------------------- 1 | # Glossary 2 | 3 | ## Data Packaging 4 | 5 | **Blob:** Blobs are the fundamental unit of data posted to 0G DA by users. 6 | 7 | **Batch:** Batch is an aggregated data of multiple blobs together with the KZG commitments to each blob. 8 | 9 | ## System Components 10 | 11 | **Disperser**. The Disperser is an off-chain service which is responsible for uploading the data into s3 buckets and sending the blob to the Batcher for further process. The disperser is an untrusted system component. 12 | 13 | **Batcher**. The Batcher is an off-chain component which accepts the blob requests from the Disperser and batches multiple blobs together with their KZG commitments into one data packet and send out to the 0G Storage nodes for data storage. 14 | 15 | **Retriever**. The Retriever is an off-chain service which implements a protocol for receiving data blobs from 0G Storage nodes. It is also responsible for verifying the authentication and correctness of the data. 16 | -------------------------------------------------------------------------------- /core/locate.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | func AllocateRows(encodedBlobs []*BlobLocation) uint { 4 | n := len(encodedBlobs) 5 | allocated := make([]int, n) 6 | segments := uint(0) 7 | for i := 0; i < n; { 8 | offset := uint(0) 9 | // allocate matrices in turn 10 | for j := i; i < n; { 11 | if allocated[j] == int(encodedBlobs[j].Rows) { 12 | // encoded blob is fully allocated 13 | if j == i { 14 | i++ 15 | } 16 | } else { 17 | // try to fill one chunk + proof 18 | l := encodedBlobs[j].Cols*CoeffSize + CommitmentSize 19 | if offset+l <= SegmentSize { 20 | encodedBlobs[j].SegmentIndexes[allocated[j]] = segments 21 | encodedBlobs[j].Offsets[allocated[j]] = offset 22 | allocated[j]++ 23 | offset += l 24 | } else { 25 | break 26 | } 27 | } 28 | // move to next blob 29 | j++ 30 | if j >= n { 31 | j = i 32 | } 33 | } 34 | if offset > 0 { 35 | segments++ 36 | } 37 | } 38 | return segments 39 | } 40 | -------------------------------------------------------------------------------- /common/mock/rpc_ethclient.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/ethereum/go-ethereum/rpc" 7 | "github.com/stretchr/testify/mock" 8 | ) 9 | 10 | type MockRPCEthClient struct { 11 | mock.Mock 12 | } 13 | 14 | func (mock *MockRPCEthClient) BatchCall(b []rpc.BatchElem) error { 15 | args := mock.Called() 16 | return args.Error(0) 17 | } 18 | 19 | func (mock *MockRPCEthClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { 20 | args := mock.Called(ctx, b) 21 | return args.Error(0) 22 | } 23 | 24 | func (mock *MockRPCEthClient) Call(result interface{}, method string, args ...interface{}) error { 25 | mokcArgs := mock.Called() 26 | return mokcArgs.Error(0) 27 | } 28 | 29 | func (mock *MockRPCEthClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error { 30 | args = append([]interface{}{ctx, result, method}, args...) 31 | mokcArgs := mock.Called(args...) 32 | return mokcArgs.Error(0) 33 | } 34 | -------------------------------------------------------------------------------- /tests/config/node_config.py: -------------------------------------------------------------------------------- 1 | from web3 import Web3 2 | 3 | BLOCK_SIZE_LIMIT = 200 * 1024 4 | # 0xfbe45681Ac6C53D5a40475F7526baC1FE7590fb8 5 | GENESIS_PRIV_KEY = "46b9e861b63d3509c88b7817275a30d22d62c8cd8fa6486ddee35ef0d8e0495f" 6 | MINER_ID = "308a6e102a5829ba35e4ba1da0473c3e8bd45f5d3ffb91e31adb43f25463dddb" 7 | GENESIS_ACCOUNT = Web3().eth.account.from_key(GENESIS_PRIV_KEY) 8 | TX_PARAMS = { 9 | "gasPrice": 10_000_000_000, 10 | "gas": 10_000_000, 11 | "from": GENESIS_ACCOUNT.address, 12 | } 13 | 14 | # 0x0e768D12395C8ABFDEdF7b1aEB0Dd1D27d5E2A7F 15 | GENESIS_PRIV_KEY1 = "9a6d3ba2b0c7514b16a006ee605055d71b9edfad183aeb2d9790e9d4ccced471" 16 | GENESIS_ACCOUNT1 = Web3().eth.account.from_key(GENESIS_PRIV_KEY1) 17 | TX_PARAMS1 = { 18 | "gasPrice": 10_000_000_000, 19 | "gas": 10_000_000, 20 | "from": GENESIS_ACCOUNT1.address, 21 | } 22 | 23 | NO_SEAL_FLAG = 0x1 24 | NO_MERKLE_PROOF_FLAG = 0x2 25 | 26 | PRIV_KEY = "17939f5b4fa643ab86df3ae9d0b8f9c8c0c14328e679ece6c243d32dab673e49" -------------------------------------------------------------------------------- /common/store/local_store_test.go: -------------------------------------------------------------------------------- 1 | package store_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/0glabs/0g-da-client/common" 9 | "github.com/0glabs/0g-da-client/common/store" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | inmemBucketStoreSize = 1000 15 | ) 16 | 17 | func TestLocalStore(t *testing.T) { 18 | 19 | localStore, err := store.NewLocalParamStore[common.RateBucketParams](inmemBucketStoreSize) 20 | assert.NoError(t, err) 21 | 22 | ctx := context.Background() 23 | 24 | p := &common.RateBucketParams{ 25 | BucketLevels: []time.Duration{time.Second, time.Minute}, 26 | LastRequestTime: time.Now(), 27 | } 28 | 29 | p2, err := localStore.GetItem(ctx, "testRetriever") 30 | assert.Error(t, err) 31 | assert.Nil(t, p2) 32 | 33 | err = localStore.UpdateItem(ctx, "testRetriever", p) 34 | assert.NoError(t, err) 35 | 36 | p2, err = localStore.GetItem(ctx, "testRetriever") 37 | 38 | assert.NoError(t, err) 39 | assert.Equal(t, p, p2) 40 | 41 | } 42 | -------------------------------------------------------------------------------- /common/healthcheck/server.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "context" 5 | 6 | "google.golang.org/grpc" 7 | "google.golang.org/grpc/health/grpc_health_v1" 8 | ) 9 | 10 | type HealthServer struct{} 11 | 12 | // Watch implements grpc_health_v1.HealthServer. 13 | func (*HealthServer) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error { 14 | panic("unimplemented") 15 | } 16 | 17 | func (s *HealthServer) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { 18 | // If the server is healthy, return a response with status "SERVING". 19 | return &grpc_health_v1.HealthCheckResponse{ 20 | Status: grpc_health_v1.HealthCheckResponse_SERVING, 21 | }, nil 22 | } 23 | 24 | // RegisterHealthServer registers the HealthServer with the provided gRPC server. 25 | func RegisterHealthServer(server *grpc.Server) { 26 | healthServer := &HealthServer{} // Initialize your health server implementation 27 | grpc_health_v1.RegisterHealthServer(server, healthServer) 28 | } 29 | -------------------------------------------------------------------------------- /cli/flags/flags.go: -------------------------------------------------------------------------------- 1 | package flags 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/0glabs/0g-da-client/common/aws" 6 | "github.com/0glabs/0g-da-client/common/logging" 7 | "github.com/urfave/cli" 8 | ) 9 | 10 | const ( 11 | FlagPrefix = "aws-cli" 12 | envVarPrefix = "AWS_CLI" 13 | ) 14 | 15 | var ( 16 | /* Required Flags */ 17 | S3BucketNameFlag = cli.StringFlag{ 18 | Name: common.PrefixFlag(FlagPrefix, "s3-bucket-name"), 19 | Usage: "Name of the bucket to store blobs", 20 | Required: true, 21 | EnvVar: common.PrefixEnvVar(envVarPrefix, "S3_BUCKET_NAME"), 22 | } 23 | DynamoDBTableNameFlag = cli.StringFlag{ 24 | Name: common.PrefixFlag(FlagPrefix, "table-name"), 25 | Usage: "Name of the dynamodb table", 26 | Required: true, 27 | EnvVar: common.PrefixEnvVar(envVarPrefix, "TABLE_NAME"), 28 | } 29 | ) 30 | 31 | // Flags contains the list of configuration options available to the binary. 32 | var Flags []cli.Flag 33 | 34 | func init() { 35 | Flags = append(logging.CLIFlags(envVarPrefix, FlagPrefix), aws.ClientFlags(envVarPrefix, FlagPrefix)...) 36 | } 37 | -------------------------------------------------------------------------------- /common/mock/workerpool.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | "github.com/stretchr/testify/mock" 8 | ) 9 | 10 | type MockWorkerpool struct { 11 | mock.Mock 12 | } 13 | 14 | var _ common.WorkerPool = (*MockWorkerpool)(nil) 15 | 16 | func (mock *MockWorkerpool) Size() int { 17 | args := mock.Called() 18 | result := args.Get(0) 19 | return result.(int) 20 | } 21 | 22 | func (mock *MockWorkerpool) Stop() { 23 | mock.Called() 24 | } 25 | 26 | func (mock *MockWorkerpool) StopWait() { 27 | mock.Called() 28 | } 29 | 30 | func (mock *MockWorkerpool) Stopped() bool { 31 | args := mock.Called() 32 | result := args.Get(0) 33 | return result.(bool) 34 | } 35 | 36 | func (mock *MockWorkerpool) Submit(task func()) { 37 | mock.Called(task) 38 | } 39 | 40 | func (mock *MockWorkerpool) SubmitWait(task func()) { 41 | mock.Called(task) 42 | } 43 | 44 | func (mock *MockWorkerpool) WaitingQueueSize() int { 45 | args := mock.Called() 46 | result := args.Get(0) 47 | return result.(int) 48 | } 49 | 50 | func (mock *MockWorkerpool) Pause(ctx context.Context) { 51 | mock.Called(ctx) 52 | } 53 | -------------------------------------------------------------------------------- /tests/utility/simple_rpc_proxy.py: -------------------------------------------------------------------------------- 1 | from jsonrpcclient import request, parse, Ok 2 | import requests 3 | 4 | 5 | class SimpleRpcProxy: 6 | def __init__(self, url, timeout=60): 7 | self.url = url 8 | self.timeout = timeout 9 | 10 | def __getattr__(self, name): 11 | return RpcCaller(self.url, name, self.timeout) 12 | 13 | 14 | class RpcCaller: 15 | def __init__(self, url, method, timeout): 16 | self.url = url 17 | self.method = method 18 | self.timeout = timeout 19 | 20 | def __call__(self, *args, **argsn): 21 | r = request(self.method, *args) 22 | try: 23 | response = requests.post(self.url, json=r, timeout=self.timeout) 24 | parsed = parse(response.json()) 25 | if isinstance(parsed, Ok): 26 | return parsed.result 27 | else: 28 | print("Failed to call RPC, method = %s(%s), error = %s" % (self.method, str(*args), parsed)) 29 | except Exception as ex: 30 | print("Failed to call RPC, method = %s(%s), exception = %s" % (self.method, str(*args), ex)) 31 | return None 32 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # 0G DA 2 | 3 | ## Organization 4 | 5 | The 0G DA repo is organized as a monorepo, with each project adhering to the "Ben Johnson" project structure style. Within the core project directories (e.g., `core`, `disperser`, `retriever`), the main interfaces and data types are defined at the root of the project, while implementations are organized by dependency. 6 | 7 | In general, the `core` project contains implementation of all the important business logic responsible for the security guarantees of the 0G DA protocol, while the other projects add the networking layers needed to run the distributed system. 8 | 9 | ## Directory structure 10 | 11 | ``` 12 | ┌── : api: Protobuf definitions 13 | ├── : common: contract bindings and other basic components 14 | ┌── : core: Core logic of the 0G DA protocol 15 | ├── : disperser: Disperser service 16 | ├── : docs: Documentation and specification 17 | ├── : pkg: Dependent pkg 18 | | ├── : encoding: Core encoding/decoding functionality and multiproof generation 19 | | └── : kzg: kzg libraries 20 | ├── : retriever: Retriever service 21 | ├── : tests: Tools for running integration tests 22 | ``` 23 | -------------------------------------------------------------------------------- /common/storage_node/cli.go: -------------------------------------------------------------------------------- 1 | package storage_node 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/urfave/cli" 6 | ) 7 | 8 | var ( 9 | KVDBPathFlagName = "storage.kv-db-path" 10 | TimeToExpireFlagName = "storage.time-to-expire" 11 | ) 12 | 13 | type ClientConfig struct { 14 | KvDbPath string 15 | TimeToExpire uint 16 | } 17 | 18 | func ClientFlags(envPrefix string, flagPrefix string) []cli.Flag { 19 | return []cli.Flag{ 20 | cli.StringFlag{ 21 | Name: common.PrefixFlag(flagPrefix, KVDBPathFlagName), 22 | Usage: "kv db path", 23 | Required: false, 24 | Value: "", 25 | EnvVar: common.PrefixEnvVar(envPrefix, "KV_DB_PATH"), 26 | }, 27 | cli.UintFlag{ 28 | Name: common.PrefixFlag(flagPrefix, TimeToExpireFlagName), 29 | Usage: "time to expire", 30 | Required: false, 31 | Value: 5184000, // 60 days 32 | EnvVar: common.PrefixEnvVar(envPrefix, "TimeToExpire"), 33 | }, 34 | } 35 | } 36 | 37 | func ReadClientConfig(ctx *cli.Context, flagPrefix string) ClientConfig { 38 | return ClientConfig{ 39 | KvDbPath: ctx.GlobalString(common.PrefixFlag(flagPrefix, KVDBPathFlagName)), 40 | TimeToExpire: ctx.GlobalUint(common.PrefixFlag(flagPrefix, TimeToExpireFlagName)), 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /run/run.sh: -------------------------------------------------------------------------------- 1 | /bin/combined \ 2 | --chain.rpc L1_RPC_ENDPOINT \ 3 | --chain.private-key YOUR_PRIVATE_KEY \ 4 | --chain.receipt-wait-rounds 180 \ 5 | --chain.receipt-wait-interval 1s \ 6 | --chain.gas-limit 2000000 \ 7 | --combined-server.use-memory-db \ 8 | --combined-server.storage.kv-db-path /runtime/ \ 9 | --combined-server.storage.time-to-expire 2592000 \ 10 | --disperser-server.grpc-port 51001 \ 11 | --batcher.da-entrance-contract ENTRANCE_CONTRACT_ADDR \ 12 | --batcher.da-signers-contract 0x0000000000000000000000000000000000001000 \ 13 | --batcher.finalizer-interval 20s \ 14 | --batcher.confirmer-num 3 \ 15 | --batcher.max-num-retries-for-sign 3 \ 16 | --batcher.finalized-block-count 50 \ 17 | --batcher.batch-size-limit 500 \ 18 | --batcher.encoding-interval 3s \ 19 | --batcher.encoding-request-queue-size 1 \ 20 | --batcher.pull-interval 10s \ 21 | --batcher.signing-interval 3s \ 22 | --batcher.signed-pull-interval 20s \ 23 | --batcher.expiration-poll-interval 3600 \ 24 | --encoder-socket DA_ENCODER_SERVER \ 25 | --encoding-timeout 300s \ 26 | --signing-timeout 60s \ 27 | --chain-read-timeout 12s \ 28 | --chain-write-timeout 13s \ 29 | --combined-server.log.level-file trace \ 30 | --combined-server.log.level-std trace \ 31 | --combined-server.log.path /runtime/run.log 32 | -------------------------------------------------------------------------------- /common/mock/s3_client.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/0glabs/0g-da-client/common/aws/s3" 8 | ) 9 | 10 | type S3Client struct { 11 | bucket map[string][]byte 12 | } 13 | 14 | func NewS3Client() *S3Client { 15 | return &S3Client{bucket: make(map[string][]byte)} 16 | } 17 | 18 | func (s *S3Client) DownloadObject(ctx context.Context, bucket string, key string) ([]byte, error) { 19 | data, ok := s.bucket[key] 20 | if !ok { 21 | return []byte{}, s3.ErrObjectNotFound 22 | } 23 | return data, nil 24 | } 25 | 26 | func (s *S3Client) UploadObject(ctx context.Context, bucket string, key string, data []byte) error { 27 | s.bucket[key] = data 28 | return nil 29 | } 30 | 31 | func (s *S3Client) DeleteObject(ctx context.Context, bucket string, key string) error { 32 | delete(s.bucket, key) 33 | return nil 34 | } 35 | 36 | func (s *S3Client) ListObjects(ctx context.Context, bucket string, prefix string) ([]s3.Object, error) { 37 | objects := make([]s3.Object, 0, 5) 38 | for k, v := range s.bucket { 39 | if strings.HasPrefix(k, prefix) { 40 | objects = append(objects, s3.Object{Key: k, Size: int64(len(v))}) 41 | } 42 | } 43 | return objects, nil 44 | } 45 | 46 | func (s *S3Client) CreateBucket(ctx context.Context, tableName string, region string) error { 47 | return nil 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: functional-test 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | test: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Checkout sources 19 | uses: actions/checkout@v3 20 | # with: 21 | # submodules: recursive 22 | 23 | - name: Setup Rust (cache & toolchain) 24 | uses: ./.github/actions/setup-rust 25 | 26 | - name: Set up Python 3.9 27 | uses: actions/setup-python@v4 28 | with: 29 | python-version: '3.9' 30 | cache: 'pip' 31 | 32 | - name: Install dependencies 33 | run: | 34 | python -m pip install --upgrade pip 35 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 36 | 37 | - name: Build Protobuf 38 | run: | 39 | cp api/proto/disperser/disperser.proto tests/ 40 | cp api/proto/retriever/retriever.proto tests/ 41 | cd tests && python -m grpc_tools.protoc --proto_path=. ./disperser.proto --python_out=. --grpc_python_out=. && python -m grpc_tools.protoc --proto_path=. ./retriever.proto --python_out=. --grpc_python_out=. 42 | 43 | 44 | # - name: Run tests 45 | # run: | 46 | # cd tests 47 | # python da_test_all.py 48 | -------------------------------------------------------------------------------- /common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "unsafe" 7 | 8 | "github.com/fxamacker/cbor/v2" 9 | ) 10 | 11 | // PrefixEnvVar returns the environment variable name with the given prefix and suffix 12 | func PrefixEnvVar(prefix, suffix string) string { 13 | return prefix + "_" + suffix 14 | } 15 | 16 | // PrefixFlag returns the flag name with the given prefix and suffix 17 | func PrefixFlag(prefix, suffix string) string { 18 | return prefix + "." + suffix 19 | } 20 | 21 | // Hash returns the sha256 hash of the given value 22 | func Hash[T any](t T) ([]byte, error) { 23 | bytes, err := EncodeToBytes(t) 24 | if err != nil { 25 | return nil, err 26 | } 27 | hasher := sha256.New() 28 | hasher.Write(bytes) 29 | return hasher.Sum(nil), nil 30 | } 31 | 32 | // EncodeToBytes encodes the given value to bytes 33 | func EncodeToBytes[T any](t T) ([]byte, error) { 34 | size := int(unsafe.Sizeof(t)) 35 | buffer := bytes.NewBuffer(make([]byte, 0, size)) 36 | err := cbor.NewEncoder(buffer).Encode(t) 37 | if err != nil { 38 | return nil, err 39 | } 40 | return buffer.Bytes(), nil 41 | } 42 | 43 | // DecodeFromBytes decodes the given bytes to the given value 44 | func DecodeFromBytes[T any](b []byte) (T, error) { 45 | var t T 46 | buffer := bytes.NewBuffer(b) 47 | err := cbor.NewDecoder(buffer).Decode(&t) 48 | if err != nil { 49 | return t, err 50 | } 51 | return t, nil 52 | } 53 | -------------------------------------------------------------------------------- /tests/da_test_framework/local_stack.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | sys.path.append("../0g_storage_kv/tests") 6 | 7 | from test_framework.blockchain_node import TestNode 8 | from da_test_framework.da_node_type import DANodeType 9 | 10 | 11 | class LocalStack(TestNode): 12 | def __init__( 13 | self, 14 | root_dir, 15 | binary, 16 | updated_config, 17 | log, 18 | ): 19 | local_conf = dict(log_config_file="log_config") 20 | 21 | local_conf.update(updated_config) 22 | data_dir = os.path.join(root_dir, "localstack") 23 | super().__init__( 24 | DANodeType.DA_LOCAL_STACK, 25 | 10, 26 | data_dir, 27 | None, 28 | binary, 29 | local_conf, 30 | log, 31 | None, 32 | ) 33 | self.args = [binary, "--localstack-port", "4566", "--deploy-resources=true", "localstack"] 34 | 35 | def start(self): 36 | self.log.info("Start localstack") 37 | super().start() 38 | 39 | def wait_for_rpc_connection(self): 40 | while self.process.poll() is None: 41 | self.log.info('building docker') 42 | time.sleep(10) 43 | self.log.info('docker is running') 44 | 45 | def stop(self): 46 | self.log.info("Stop localstack") 47 | os.system("docker stop localstack-test") 48 | -------------------------------------------------------------------------------- /disperser/signer/client_test.go: -------------------------------------------------------------------------------- 1 | package signer 2 | 3 | import ( 4 | "encoding/hex" 5 | "io" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestToBigEndian(t *testing.T) { 12 | i, err := hex.DecodeString("b5129dd545319cdef628303e95b3134fd9c6d2b1f025eaba80ca556b661ede0e76daeab01e1bae6aa33379a3af63786ca36e7385472a8839a62e250a2b2a3ca9") 13 | assert.Nil(t, err, "decode should success") 14 | 15 | // Define test cases with input and expected output 16 | testCases := []struct { 17 | input []byte 18 | expectedOutput []byte 19 | expectedError error 20 | }{ 21 | { 22 | i, 23 | []byte{0xe, 0xde, 0x1e, 0x66, 0x6b, 0x55, 0xca, 0x80, 0xba, 0xea, 0x25, 0xf0, 0xb1, 0xd2, 0xc6, 0xd9, 0x4f, 0x13, 0xb3, 0x95, 0x3e, 0x30, 0x28, 0xf6, 0xde, 0x9c, 0x31, 0x45, 0xd5, 0x9d, 0x12, 0xb5, 0x29, 0x3c, 0x2a, 0x2b, 0xa, 0x25, 0x2e, 0xa6, 0x39, 0x88, 0x2a, 0x47, 0x85, 0x73, 0x6e, 0xa3, 0x6c, 0x78, 0x63, 0xaf, 0xa3, 0x79, 0x33, 0xa3, 0x6a, 0xae, 0x1b, 0x1e, 0xb0, 0xea, 0xda, 0x76}, 24 | nil, 25 | }, 26 | {[]byte{1, 2, 3, 4, 5, 6}, nil, io.ErrShortBuffer}, 27 | } 28 | 29 | // Iterate through test cases 30 | for _, tc := range testCases { 31 | // Call the function with test input 32 | output, err := toBigEndian(tc.input) 33 | 34 | // Check if the output and error match the expected values 35 | assert.Equal(t, tc.expectedError, err, "Error should match") 36 | assert.Equal(t, tc.expectedOutput, output, "Output should match") 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /tests/da_put_get_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import random 3 | import sys 4 | from random import randbytes 5 | import time 6 | 7 | from disperser_pb2 import BlobStatus 8 | 9 | sys.path.append("../0g-storage-kv/tests") 10 | from da_test_framework.da_test_framework import DATestFramework 11 | from utility.utils import assert_equal 12 | 13 | 14 | class DAPutGetTest(DATestFramework): 15 | def setup_params(self): 16 | self.num_blockchain_nodes = 1 17 | self.num_nodes = 1 18 | 19 | def run_test(self): 20 | disperser = self.da_services[-1] 21 | 22 | data = randbytes(507904) 23 | disperse_response = disperser.disperse_blob(data) 24 | 25 | self.log.info(disperse_response) 26 | request_id = disperse_response.request_id 27 | reply = disperser.get_blob_status(request_id) 28 | count = 0 29 | while reply.status != BlobStatus.CONFIRMED and count <= 20: 30 | reply = disperser.get_blob_status(request_id) 31 | count += 1 32 | time.sleep(10) 33 | 34 | info = reply.info 35 | # retrieve the blob 36 | # reply = disperser.retrieve_blob(info) 37 | # assert_equal(reply.data[:len(data)], data) 38 | 39 | # retriever = self.da_services[-1] 40 | # retriever_response = retriever.retrieve_blob(info) 41 | # assert_equal(retriever_response.data[:len(data)], data) 42 | 43 | 44 | if __name__ == "__main__": 45 | DAPutGetTest( 46 | blockchain_node_configs=dict([(0, dict(mode="dev", dev_block_interval_ms=50))]) 47 | ).main() 48 | -------------------------------------------------------------------------------- /disperser/leveldb/leveldb.go: -------------------------------------------------------------------------------- 1 | package leveldb 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/syndtr/goleveldb/leveldb" 7 | "github.com/syndtr/goleveldb/leveldb/iterator" 8 | "github.com/syndtr/goleveldb/leveldb/util" 9 | ) 10 | 11 | var ErrNotFound = errors.New("not found") 12 | 13 | // This is an implementation of node.DB interfaces with levelDB as the backend engine. 14 | type LevelDBStore struct { 15 | *leveldb.DB 16 | } 17 | 18 | func NewLevelDBStore(path string) (*LevelDBStore, error) { 19 | handle, err := leveldb.OpenFile(path, nil) 20 | return &LevelDBStore{handle}, err 21 | } 22 | 23 | func (d *LevelDBStore) Put(key []byte, value []byte) error { 24 | return d.DB.Put(key, value, nil) 25 | } 26 | 27 | func (d *LevelDBStore) Get(key []byte) ([]byte, error) { 28 | data, err := d.DB.Get(key, nil) 29 | if err != nil { 30 | if err == leveldb.ErrNotFound { 31 | return nil, ErrNotFound 32 | } 33 | return nil, err 34 | } 35 | return data, nil 36 | } 37 | 38 | func (d *LevelDBStore) NewIterator(prefix []byte) iterator.Iterator { 39 | return d.DB.NewIterator(util.BytesPrefix(prefix), nil) 40 | } 41 | 42 | func (d *LevelDBStore) Delete(key []byte) error { 43 | return d.DB.Delete(key, nil) 44 | } 45 | 46 | func (d *LevelDBStore) DeleteBatch(keys [][]byte) error { 47 | batch := new(leveldb.Batch) 48 | for _, key := range keys { 49 | batch.Delete(key) 50 | } 51 | return d.DB.Write(batch, nil) 52 | } 53 | 54 | func (d *LevelDBStore) WriteBatch(keys, values [][]byte) error { 55 | batch := new(leveldb.Batch) 56 | for i, key := range keys { 57 | batch.Put(key, values[i]) 58 | } 59 | return d.DB.Write(batch, nil) 60 | } 61 | -------------------------------------------------------------------------------- /common/pubip/pubip_test.go: -------------------------------------------------------------------------------- 1 | package pubip 2 | 3 | import ( 4 | "context" 5 | "github.com/stretchr/testify/assert" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | ) 10 | 11 | func TestProviderOrDefault(t *testing.T) { 12 | p := ProviderOrDefault(SeepIPProvider) 13 | assert.Equal(t, SeeIP, p) 14 | p = ProviderOrDefault(IpifyProvider) 15 | assert.Equal(t, Ipify, p) 16 | p = ProviderOrDefault("test") 17 | assert.Equal(t, SeeIP, p) 18 | } 19 | 20 | func TestSimpleProvider_PublicIPAddress(t *testing.T) { 21 | tests := []struct { 22 | name string 23 | requestDoer RequestDoerFunc 24 | expectErr bool 25 | expected string 26 | }{ 27 | { 28 | name: "success", 29 | requestDoer: func(req *http.Request) (*http.Response, error) { 30 | w := httptest.NewRecorder() 31 | _, _ = w.WriteString("\n\n8.8.8.8\n\n") 32 | return w.Result(), nil 33 | }, 34 | expectErr: false, 35 | expected: "8.8.8.8", 36 | }, 37 | { 38 | name: "http error status", 39 | requestDoer: func(req *http.Request) (*http.Response, error) { 40 | w := httptest.NewRecorder() 41 | w.WriteHeader(http.StatusInternalServerError) 42 | return w.Result(), nil 43 | }, 44 | expectErr: true, 45 | expected: "", 46 | }, 47 | } 48 | 49 | for _, tt := range tests { 50 | t.Run(tt.name, func(t *testing.T) { 51 | p := SimpleProvider{ 52 | RequestDoer: tt.requestDoer, 53 | Name: "test", 54 | URL: "https://api.seeip.org", 55 | } 56 | 57 | ip, err := p.PublicIPAddress(context.Background()) 58 | assert.Equal(t, tt.expected, ip) 59 | 60 | if tt.expectErr { 61 | assert.NotNil(t, err) 62 | } 63 | }) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/go/.devcontainer/base.Dockerfile 2 | 3 | # [Choice] Go version (use -bullseye variants on local arm64/Apple Silicon): 1, 1.19, 1.18, 1-bullseye, 1.19-bullseye, 1.18-bullseye, 1-buster, 1.19-buster, 1.18-buster 4 | ARG VARIANT="1-1.21-bullseye" 5 | FROM mcr.microsoft.com/vscode/devcontainers/go:${VARIANT} 6 | 7 | # [Choice] Node.js version: none, lts/*, 18, 16, 14 8 | ARG NODE_VERSION="none" 9 | RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi 10 | 11 | # Install geth 12 | RUN echo "deb http://ppa.launchpad.net/ethereum/ethereum/ubuntu bionic main\n" \ 13 | "deb-src http://ppa.launchpad.net/ethereum/ethereum/ubuntu bionic main" > /etc/apt/sources.list.d/ethereum-bioinc.list \ 14 | && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 2A518C819BE37D2C2031944D1C52189C923F6CA9 \ 15 | && apt-get update \ 16 | && apt-get -y install ethereum 17 | 18 | # [Optional] Uncomment this section to install additional OS packages. 19 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 20 | && apt-get -y install --no-install-recommends netcat \ 21 | && apt-get -y install protobuf-compiler 22 | 23 | # [Optional] Uncomment the next lines to use go get to install anything else you need 24 | # USER vscode 25 | # RUN go get -x 26 | 27 | # [Optional] Uncomment this line to install global node packages. 28 | # RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 29 | RUN yarn global add @graphprotocol/graph-cli@0.51.0 30 | 31 | -------------------------------------------------------------------------------- /common/ratelimit_test.go: -------------------------------------------------------------------------------- 1 | package common_test 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "testing" 7 | 8 | "github.com/0glabs/0g-da-client/common" 9 | "github.com/stretchr/testify/assert" 10 | "google.golang.org/grpc/metadata" 11 | "google.golang.org/grpc/peer" 12 | ) 13 | 14 | func TestGetClientAddress(t *testing.T) { 15 | 16 | // Make test context 17 | // Four proxies. The last proxy's IP address will be in the connection, not in the header 18 | md := metadata.Pairs("x-forwarded-for", "dummyheader, clientip", "x-forwarded-for", "proxy1, proxy2", "x-forwarded-for", "proxy3") 19 | 20 | ctx := peer.NewContext(context.Background(), &peer.Peer{ 21 | Addr: &net.TCPAddr{ 22 | IP: net.ParseIP("0.0.0.0"), 23 | Port: 1234, 24 | }, 25 | }) 26 | 27 | ctx = metadata.NewIncomingContext(ctx, md) 28 | md, ok := metadata.FromIncomingContext(ctx) 29 | if !ok { 30 | t.Fatal("failed to get metadata from context") 31 | } 32 | assert.Equal(t, []string{"dummyheader, clientip", "proxy1, proxy2", "proxy3"}, md.Get("x-forwarded-for")) 33 | 34 | ip, err := common.GetClientAddress(ctx, "x-forwarded-for", 4, false) 35 | assert.NoError(t, err) 36 | assert.Equal(t, "clientip", ip) 37 | 38 | ip, err = common.GetClientAddress(ctx, "x-forwarded-for", 7, false) 39 | assert.Error(t, err) 40 | assert.Equal(t, "", ip) 41 | 42 | ip, err = common.GetClientAddress(ctx, "x-forwarded-for", 7, true) 43 | assert.NoError(t, err) 44 | assert.Equal(t, "0.0.0.0", ip) 45 | 46 | ip, err = common.GetClientAddress(ctx, "", 0, true) 47 | assert.NoError(t, err) 48 | assert.Equal(t, "0.0.0.0", ip) 49 | 50 | ip, err = common.GetClientAddress(ctx, "", 0, false) 51 | assert.NoError(t, err) 52 | assert.Equal(t, "0.0.0.0", ip) 53 | 54 | } 55 | -------------------------------------------------------------------------------- /docs/architecture/retriever.md: -------------------------------------------------------------------------------- 1 | # Retriever 2 | 3 | The Retriever is a service for retrieving chunks corresponding to a blob from the 0G DA operator nodes and reconstructing the original blob from the chunks. This is a client-side library that the users are supposed to operationalize. 4 | 5 | When an end user posts a blob of data to 0G DA, the disperser determines which place to store the data and does two things: 6 | 7 | 1. Directly store the data into the pre-configured s3 bucket. 8 | 2. Send the blob request into a queue for the batcher to batch multiple blobs together and send out to 0G Storage Node for DA. The batcher will also append the KZG commitment to the batch for later verification use. 9 | 10 | Note: Users generally have two ways to retrieve a blob from 0G DA: 11 | 12 | 1. Retrieve from the Disperser that the user initially used for dispersal: the API is `Disperser.RetrieveBlob()` as defined in `api/proto/disperser/disperser.proto` 13 | 2. Retrieve directly from the 0G DA Nodes, which is supported by this Retriever. 14 | 15 | The `Disperser.RetrieveBlob()` is generally faster and cheaper as the Disperser manages the blobs that it has processed, whereas the Retriever.RetrieveBlob() removes the need to trust the Disperser, with the downside of more cost and performance. 16 | 17 | 1. The user submit the retrieval request to the retriever service with the form of [`BlobRequest`](../data-model.md#blob-request). 18 | 2. The retriever client will first fetch the metadata of the blob and verify its merkle proof to guarantee correctness. 19 | 3. Then the client will verify the KZG commitments of each chunk in the blob and download the chunk if the check passes. 20 | 4. The retriever will finally decode the chunks into the original blob data and send back to the user. 21 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_signer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | 6 | from da_test_framework.test_node import TestNode 7 | from da_test_framework.da_node_type import DANodeType 8 | 9 | from config.node_config import PRIV_KEY 10 | 11 | 12 | __file_path__ = os.path.dirname(os.path.realpath(__file__)) 13 | 14 | 15 | class DASigner(TestNode): 16 | def __init__( 17 | self, 18 | root_dir, 19 | binary, 20 | updated_config, 21 | log, 22 | ): 23 | local_conf = dict( 24 | log_level = "info", 25 | data_path = "./db/", 26 | da_entrance_address = "0x64fcfde2350E08E7BaDc18771a7674FAb5E137a2", 27 | start_block_number = 0, 28 | signer_private_key = "1", 29 | validator_private_key = PRIV_KEY, 30 | ) 31 | 32 | local_conf.update(updated_config) 33 | data_dir = os.path.join(root_dir, "da_signer") 34 | super().__init__( 35 | DANodeType.DA_SIGNER, 36 | 11, 37 | data_dir, 38 | None, 39 | binary, 40 | local_conf, 41 | log, 42 | None, 43 | ) 44 | 45 | def start(self): 46 | self.log.info("Start DA signer") 47 | super().start() 48 | 49 | def wait_for_rpc_connection(self): 50 | time.sleep(3) 51 | 52 | def stop(self): 53 | self.log.info("Stop DA signer") 54 | 55 | try: 56 | super().stop(kill=True, wait=False) 57 | except AssertionError as e: 58 | err = repr(e) 59 | if "no RPC connection" in err: 60 | self.log.debug(f"Stop DA signer: no RPC connection") 61 | else: 62 | raise e 63 | -------------------------------------------------------------------------------- /disperser/api/proto/retriever/retriever.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/0glabs/0g-da-client/api/grpc/retriever"; 4 | 5 | package retriever; 6 | 7 | // The Retriever is a service for retrieving chunks corresponding to a blob from 8 | // the ZGDA operator nodes and reconstructing the original blob from the chunks. 9 | // This is a client-side library that the users are supposed to operationalize. 10 | // 11 | // Note: Users generally have two ways to retrieve a blob from ZGDA: 12 | // 1) Retrieve from the Disperser that the user initially used for dispersal: the API 13 | // is Disperser.RetrieveBlob() as defined in api/proto/disperser/disperser.proto 14 | // 2) Retrieve directly from the ZGDA Nodes, which is supported by this Retriever. 15 | // 16 | // The Disperser.RetrieveBlob() (the 1st approach) is generally faster and cheaper as the 17 | // Disperser manages the blobs that it has processed, whereas the Retriever.RetrieveBlob() 18 | // (the 2nd approach here) removes the need to trust the Disperser, with the downside of 19 | // worse cost and performance. 20 | service Retriever { 21 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 22 | // reconstructed original blob in response. 23 | rpc RetrieveBlob(BlobRequest) returns (BlobReply) {} 24 | } 25 | 26 | message BlobRequest { 27 | // The hash of data 28 | bytes storage_root = 1; 29 | // This identifies the epoch that this blob belongs to. 30 | uint64 epoch = 2; 31 | // Which quorum of the blob this is requesting for (note a blob can participate in 32 | // multiple quorums). 33 | uint64 quorum_id = 3; 34 | } 35 | 36 | message BlobReply { 37 | // The blob retrieved and reconstructed from the ZGDA Nodes per BlobRequest. 38 | bytes data = 1; 39 | } 40 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_encoder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | sys.path.append("../0g-storage-kv/tests") 6 | 7 | from da_test_framework.test_node import TestNode 8 | from da_test_framework.da_node_type import DANodeType 9 | 10 | __file_path__ = os.path.dirname(os.path.realpath(__file__)) 11 | 12 | 13 | class DAEncoder(TestNode): 14 | def __init__( 15 | self, 16 | root_dir, 17 | binary, 18 | updated_config, 19 | log, 20 | ): 21 | local_conf = dict( 22 | log_level = "debug", 23 | ) 24 | local_conf.update(updated_config) 25 | data_dir = os.path.join(root_dir, "da_encoder") 26 | super().__init__( 27 | DANodeType.DA_ENCODER, 28 | 11, 29 | data_dir, 30 | None, 31 | binary, 32 | local_conf, 33 | log, 34 | None, 35 | ) 36 | 37 | def start(self): 38 | self.log.info("Start DA encoder") 39 | super().start() 40 | 41 | def wait_for_rpc_connection(self): 42 | time.sleep(3) 43 | 44 | def stop(self): 45 | self.log.info("Stop DA encoder") 46 | # The encoder will check return_code via rpc when error log exists 47 | # that is written when the encoder starts normally. 48 | # The exception handling can be removed when rpc is added or the error 49 | # is not written when the encoder starts normally. 50 | try: 51 | super().stop(kill=True, wait=False) 52 | except AssertionError as e: 53 | err = repr(e) 54 | if "no RPC connection" in err: 55 | self.log.debug(f"Stop DA encoder: no RPC connection") 56 | else: 57 | raise e 58 | -------------------------------------------------------------------------------- /docs/security/README.md: -------------------------------------------------------------------------------- 1 | # Security Guarantee 2 | 3 | The overall security guarantee provided by 0G DA is actually a composite of two guarantees, one at blob dispersal phase and another one at blob retrieval phase. 4 | 5 | ## Dispersal 6 | 7 | The main guarantee at the dispersal phase is implemented in the 0G Storage module. In particular, the storage module is responsible for upholding the following guarantee: 8 | 9 | * 0G Storage Contract: receive merkle root from the batcher and emit on-chain events 10 | * 0G Storage Node: receive full batch data and verify the data with the submitted on-chain merkle root (by listening on corresponding events). 11 | 12 | The merkle root is constructed from the multiple blobs in the batch by the batcher. Its purpose is to verify that certain blob is in the batch. The storage node is responsible for verifying the correctness of the full batch data using the root. 13 | 14 | ## Retrieval 15 | 16 | The 0G DA retrievers expect for blobs to correspond to evaluations of a polynomial of a certain degree. The blob payload delivered to the receiver contains a KZG polynomial commitment identifying the polynomial, as well as a separate commitment allowing the retriever to verify its degree. 17 | 18 | The receiver will perform the following checks for each retrieval request to ensure that the [`BlobRequest`](../data-model.md#request) is valid: 19 | 20 | 1. Verify the merkle proof in the requested blob metadata by calling `VerifyProofUsing`. 21 | 2. Verify the KZG commitment by using `lowDegreeProof` to verify that `BlobCommitments` in the [`BlobHeader`](../data-model.md#blob-header) commits to a polynomial of degree equal to the commitments length. 22 | 3. Verify the KZG commitment for each blob chunk which was previously encoded into the blob during the dispersal phase. 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: compile-el compile-dl clean protoc lint build unit-tests integration-tests-churner integration-tests-indexer integration-tests-graph-indexer 2 | 3 | PROTOS := ./api/proto 4 | PROTOS_DISPERSER := ./disperser/api/proto 5 | PROTO_GEN := ./api/grpc 6 | PROTO_GEN_DISPERSER_PATH = ./disperser/api/grpc 7 | 8 | compile-el: 9 | cd contracts && ./compile.sh compile-el 10 | 11 | compile-dl: 12 | cd contracts && ./compile.sh compile-dl 13 | 14 | clean: 15 | find $(PROTO_GEN) -name "*.pb.go" -type f | xargs rm -rf 16 | mkdir -p $(PROTO_GEN) 17 | find $(PROTO_GEN_DISPERSER_PATH) -name "*.pb.go" -type f | xargs rm -rf 18 | mkdir -p $(PROTO_GEN_DISPERSER_PATH) 19 | 20 | protoc: clean 21 | protoc -I $(PROTOS) \ 22 | --go_out=$(PROTO_GEN) \ 23 | --go_opt=paths=source_relative \ 24 | --go-grpc_out=$(PROTO_GEN) \ 25 | --go-grpc_opt=paths=source_relative \ 26 | $(PROTOS)/**/*.proto 27 | # Generate Protobuf for sub directories of ./api/proto/disperser 28 | protoc -I $(PROTOS_DISPERSER) \ 29 | --go_out=$(PROTO_GEN_DISPERSER_PATH) \ 30 | --go_opt=paths=source_relative \ 31 | --go-grpc_out=$(PROTO_GEN_DISPERSER_PATH) \ 32 | --go-grpc_opt=paths=source_relative \ 33 | $(PROTOS_DISPERSER)/**/*.proto 34 | 35 | lint: 36 | golint -set_exit_status ./... 37 | go tool fix ./.. 38 | golangci-lint run 39 | 40 | build: 41 | # cd churner && make build 42 | cd disperser && make build 43 | # cd node && make build 44 | # cd retriever && make build 45 | # cd tools/traffic && make build 46 | 47 | unit-tests: 48 | ./test.sh 49 | 50 | integration-tests-churner: 51 | go test -v ./churner/tests 52 | 53 | integration-tests-indexer: 54 | go test -v ./core/indexer 55 | 56 | integration-tests-node-plugin: 57 | go test -v ./node/plugin/tests 58 | 59 | integration-tests-graph-indexer: 60 | make build 61 | go test -v ./core/thegraph 62 | -------------------------------------------------------------------------------- /common/logging/cli.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/urfave/cli" 6 | ) 7 | 8 | const ( 9 | PathFlagName = "log.path" 10 | FileLevelFlagName = "log.level-file" 11 | StdLevelFlagName = "log.level-std" 12 | ) 13 | 14 | type Config struct { 15 | Path string 16 | Prefix string 17 | FileLevel string 18 | StdLevel string 19 | } 20 | 21 | func CLIFlags(envPrefix string, flagPrefix string) []cli.Flag { 22 | return []cli.Flag{ 23 | cli.StringFlag{ 24 | Name: common.PrefixFlag(flagPrefix, StdLevelFlagName), 25 | Usage: `The lowest log level that will be output to stdout. Accepted options are "trace", "debug", "info", "warn", "error"`, 26 | Value: "info", 27 | EnvVar: common.PrefixEnvVar(envPrefix, "STD_LOG_LEVEL"), 28 | }, 29 | cli.StringFlag{ 30 | Name: common.PrefixFlag(flagPrefix, FileLevelFlagName), 31 | Usage: `The lowest log level that will be output to file logs. Accepted options are "trace", "debug", "info", "warn", "error"`, 32 | Value: "info", 33 | EnvVar: common.PrefixEnvVar(envPrefix, "FILE_LOG_LEVEL"), 34 | }, 35 | cli.StringFlag{ 36 | Name: common.PrefixFlag(flagPrefix, PathFlagName), 37 | Usage: "Path to file where logs will be written", 38 | Value: "", 39 | EnvVar: common.PrefixEnvVar(envPrefix, "LOG_PATH"), 40 | }, 41 | } 42 | } 43 | 44 | func DefaultCLIConfig() Config { 45 | return Config{ 46 | Path: "", 47 | FileLevel: "debug", 48 | StdLevel: "debug", 49 | } 50 | } 51 | 52 | func ReadCLIConfig(ctx *cli.Context, flagPrefix string) Config { 53 | cfg := DefaultCLIConfig() 54 | cfg.StdLevel = ctx.GlobalString(common.PrefixFlag(flagPrefix, StdLevelFlagName)) 55 | cfg.FileLevel = ctx.GlobalString(common.PrefixFlag(flagPrefix, FileLevelFlagName)) 56 | cfg.Path = ctx.GlobalString(common.PrefixFlag(flagPrefix, PathFlagName)) 57 | return cfg 58 | } 59 | -------------------------------------------------------------------------------- /api/go.sum: -------------------------------------------------------------------------------- 1 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 2 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 3 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 4 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 5 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 6 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 7 | golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= 8 | golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= 9 | golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= 10 | golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 11 | golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= 12 | golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 13 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 14 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= 15 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= 16 | google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= 17 | google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= 18 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 19 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 20 | google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= 21 | google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 22 | -------------------------------------------------------------------------------- /common/ratelimit/ratelimit_test.go: -------------------------------------------------------------------------------- 1 | package ratelimit_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/0glabs/0g-da-client/common" 9 | "github.com/0glabs/0g-da-client/common/mock" 10 | "github.com/0glabs/0g-da-client/common/ratelimit" 11 | "github.com/0glabs/0g-da-client/common/store" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func makeTestRatelimiter() (common.RateLimiter, error) { 16 | 17 | globalParams := common.GlobalRateParams{ 18 | BucketSizes: []time.Duration{time.Second, time.Minute}, 19 | Multipliers: []float32{1, 1}, 20 | } 21 | bucketStoreSize := 1000 22 | 23 | bucketStore, err := store.NewLocalParamStore[common.RateBucketParams](bucketStoreSize) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | ratelimiter := ratelimit.NewRateLimiter(globalParams, bucketStore, []string{"testRetriever2"}, &mock.Logger{}) 29 | 30 | return ratelimiter, nil 31 | 32 | } 33 | 34 | func TestRatelimit(t *testing.T) { 35 | 36 | ratelimiter, err := makeTestRatelimiter() 37 | assert.NoError(t, err) 38 | 39 | ctx := context.Background() 40 | 41 | retreiverID := "testRetriever" 42 | 43 | for i := 0; i < 10; i++ { 44 | allow, err := ratelimiter.AllowRequest(ctx, retreiverID, 10, 100) 45 | assert.NoError(t, err) 46 | assert.Equal(t, true, allow) 47 | } 48 | 49 | allow, err := ratelimiter.AllowRequest(ctx, retreiverID, 10, 100) 50 | assert.NoError(t, err) 51 | assert.Equal(t, false, allow) 52 | } 53 | 54 | func TestRatelimitAllowlist(t *testing.T) { 55 | ratelimiter, err := makeTestRatelimiter() 56 | assert.NoError(t, err) 57 | 58 | ctx := context.Background() 59 | 60 | retreiverID := "testRetriever2" 61 | 62 | // 10x more requests allowed for allowlisted IDs 63 | for i := 0; i < 100; i++ { 64 | allow, err := ratelimiter.AllowRequest(ctx, retreiverID, 10, 100) 65 | assert.NoError(t, err) 66 | assert.Equal(t, true, allow) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /common/mock/logging.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | ethlog "github.com/ethereum/go-ethereum/log" 8 | ) 9 | 10 | type Logger struct { 11 | print bool 12 | } 13 | 14 | func NewLogger(print bool) common.Logger { 15 | return &Logger{ 16 | print: print, 17 | } 18 | } 19 | 20 | func (l *Logger) New(ctx ...interface{}) common.Logger { 21 | return &Logger{} 22 | } 23 | 24 | func (l *Logger) printLog(level ethlog.Lvl, msg string, ctx ...interface{}) { 25 | if l.print { 26 | info := []interface{}{ 27 | level, 28 | msg, 29 | } 30 | info = append(info, ctx...) 31 | log.Println(info) 32 | } 33 | } 34 | 35 | func (l *Logger) SetHandler(h ethlog.Handler) {} 36 | 37 | func (l *Logger) Trace(msg string, ctx ...interface{}) { 38 | l.printLog(ethlog.LvlTrace, msg, ctx...) 39 | } 40 | 41 | func (l *Logger) Debug(msg string, ctx ...interface{}) { 42 | l.printLog(ethlog.LvlDebug, msg, ctx...) 43 | } 44 | 45 | func (l *Logger) Info(msg string, ctx ...interface{}) { 46 | l.printLog(ethlog.LvlInfo, msg, ctx...) 47 | } 48 | 49 | func (l *Logger) Warn(msg string, ctx ...interface{}) { 50 | l.printLog(ethlog.LvlWarn, msg, ctx...) 51 | } 52 | 53 | func (l *Logger) Error(msg string, ctx ...interface{}) { 54 | l.printLog(ethlog.LvlError, msg, ctx...) 55 | } 56 | 57 | func (l *Logger) Crit(msg string, ctx ...interface{}) { 58 | l.printLog(ethlog.LvlCrit, msg, ctx...) 59 | } 60 | 61 | func (l *Logger) Fatal(msg string, ctx ...interface{}) { 62 | l.printLog(ethlog.LvlCrit, msg, ctx...) 63 | } 64 | 65 | func (l *Logger) Debugf(template string, args ...interface{}) {} 66 | 67 | func (l *Logger) Infof(template string, args ...interface{}) {} 68 | 69 | func (l *Logger) Warnf(template string, args ...interface{}) {} 70 | 71 | func (l *Logger) Errorf(template string, args ...interface{}) {} 72 | 73 | func (l *Logger) Critf(template string, args ...interface{}) {} 74 | 75 | func (l *Logger) Fatalf(template string, args ...interface{}) {} 76 | -------------------------------------------------------------------------------- /docs/architecture/disperser.md: -------------------------------------------------------------------------------- 1 | # Disperser 2 | 3 | ### Dispersal 4 | 5 | Requesters that want to store data on 0G Storage make requests to the disperser with the form of [`DisperseBlobRequest`](../data-model.md#request). 6 | 7 | They specify the data they want to store on 0G Storage. The disperser takes each `DisperseBlobRequest` and stores it into the s3 bucket. The data that the disperser stores into the s3 bucket contains two parts: blob metadata and full blob data. 8 | 9 | #### Blob Data 10 | 11 | The [key](../data-model.md#blob-key) of the blob data is calculated by a certain hash function. This key is used for users to retrieve the blob directly from a disperser. The disperser uploads the blob data with the object key of the blob hash to a certain s3 bucket (defined by the disperser service). 12 | 13 | In pseudocode: 14 | 15 | ```go 16 | blobHash := getBlobHash(blob) 17 | metadataHash := getMetadataHash(requestedAt, blob.RequestHeader.SecurityParams) 18 | metadataKey.BlobHash = blobHash 19 | metadataKey.MetadataHash = metadataHash 20 | 21 | err = s.s3Client.UploadObject(ctx, s.bucketName, blobObjectKey(blobHash), blob.Data) 22 | ``` 23 | 24 | #### Metadata 25 | 26 | The [metadata](../data-model.md#blob-metadata) of a blob is constructed and stored into a table (defined by the disperser service) in aws dynamodb which is a nosql database. The update of the metadata in the dynamodb is monitored by the Batcher service to do further process. 27 | 28 | ### Retrieval 29 | 30 | Requesters can directly download the data blob from the disperser service with the form of [`RetrieveBlobRequest`](../data-model.md#request). 31 | 32 | The user defines the index of the blob in a batch and the hash of the batch header to retrieve the metadata of the blob in dynamodb first. The disperser then downloads the full blob data using the blob key in the metadata. 33 | 34 | 35 | 36 | Note that the disperser is not responsible for encoding/decoding of the blob. The disperser service is trustless, whether to trust the disperser depends on the user judgement. 37 | -------------------------------------------------------------------------------- /core/encoding.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "math" 5 | ) 6 | 7 | const ( 8 | EntrySize = 256 //256B 9 | EntryPerSegment = 1024 10 | SegmentSize = EntrySize * EntryPerSegment // 256KB 11 | ScalarSize = 31 12 | CoeffSize = 32 13 | CommitmentSize = 48 14 | MaxCols = 1024 15 | MaxRows = 1024 16 | MaxBlobSize = MaxCols * MaxRows * ScalarSize 17 | ) 18 | 19 | type MatrixDimsions struct { 20 | Rows uint `json:"rows"` 21 | Cols uint `json:"cols"` 22 | } 23 | 24 | // Encoder is responsible for encoding 25 | type Encoder interface { 26 | // Encode takes in a blob and returns the commitments and encoded matrix 27 | Encode(data []byte, dims MatrixDimsions) (*ExtendedMatrix, error) 28 | } 29 | 30 | func NextPowerOf2(d uint64) uint64 { 31 | nextPower := math.Ceil(math.Log2(float64(d))) 32 | return uint64(math.Pow(2.0, nextPower)) 33 | } 34 | 35 | // GetBlobLength converts from blob size in bytes to blob size in symbols 36 | func GetBlobLength(blobSize uint) uint { 37 | return (blobSize + ScalarSize - 1) / ScalarSize 38 | } 39 | 40 | // SplitToMatrix calculate row and column length for encoded blob, try to split it into rows x cols matrix 41 | func SplitToMatrix(blobLength uint, targetRowNum uint) (uint, uint) { 42 | expectedLength := uint(NextPowerOf2(uint64(blobLength * 2))) 43 | var rows, cols uint 44 | if targetRowNum == 0 { 45 | // split into maximum rows 46 | rows = min(expectedLength, MaxRows) 47 | cols = expectedLength / rows 48 | } else { 49 | // try to split into target rows 50 | targetRowNum = min(MaxRows, uint(NextPowerOf2(uint64(targetRowNum)))) 51 | rows = min(expectedLength, targetRowNum) 52 | cols = expectedLength / rows 53 | if cols > MaxCols { 54 | // split into maximum rows 55 | rows = min(expectedLength, MaxRows) 56 | cols = expectedLength / rows 57 | } 58 | } 59 | return rows, cols 60 | } 61 | 62 | // GetBlobSize converts from blob length in symbols to blob size in bytes. This is not an exact conversion. 63 | func GetBlobSize(blobLength uint) uint { 64 | return blobLength * ScalarSize 65 | } 66 | -------------------------------------------------------------------------------- /docs/architecture/batcher.md: -------------------------------------------------------------------------------- 1 | # Batcher 2 | 3 | The batcher is an off-chain service which acts as a bridge between the Disperser and 0G Storage. It is in effect during the Dispersal phase. Figure 1 shows the detailed batcher workflow. 4 | 5 | ### Batch Process 6 | 7 | 1. When a user submits a dispersal request, the disperser service will store the blob and its metadata on to s3 and dynamodb correspondingly. 8 | 2. The batcher has a encoder streaming process to listen to the update of the metadata on the dynamodb and triggers the [encoding process](../security/encoding.md) of the blob data. The batcher stores the encoded blob data into its memory. 9 | 3. The batcher has a separate batching processor to query the memory and batch multiple encoded blobs into one batch. During this process, it also generates merkle proof for each blob based on its blob header. 10 | 4. The batcher then calls a dispatch service to dispatch the batch data and its metadata to 0G Storage for verification and data store. 11 | 12 | ### Dispatch Process 13 | 14 | 1. A dispatcher receives 15 | - Hash of the batch header 16 | - [Batch Header](../data-model.md#batch-header) 17 | - [Encoded Blobs](../data-model.md#encoded-blob) 18 | - [Merkle Proofs](../data-model.md#merkle-tree-proof) 19 | 2. Upon receiving the above data, the dispatcher will dump the batch data to 0G Storage node. 20 | 3. The dispatcher will also call an on-chain contract to upload batch headers. 21 | 22 | Note that it is up to the 0G Storage Node to verify the correctness of the batch data with its header. 23 | 24 | ### Finalization 25 | 26 | The batcher has two more components, confirmer and finalizer. 27 | 28 | The confirmer is used to query the tx receipt from the on-chain contract to check if the transaction is confirmed on chain. 29 | 30 | The finalizer is used to check the difference between the confirmed block number and current block number to determine if such transaction is finalized (no reorg) on chain. 31 | 32 |

Figure 1. Batcher Workflow

33 | -------------------------------------------------------------------------------- /common/aws/cli.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/urfave/cli" 6 | ) 7 | 8 | var ( 9 | RegionFlagName = "aws.region" 10 | AccessKeyIdFlagName = "aws.access-key-id" 11 | SecretAccessKeyFlagName = "aws.secret-access-key" 12 | EndpointURLFlagName = "aws.endpoint-url" 13 | ) 14 | 15 | type ClientConfig struct { 16 | Region string 17 | AccessKey string 18 | SecretAccessKey string 19 | EndpointURL string 20 | } 21 | 22 | func ClientFlags(envPrefix string, flagPrefix string) []cli.Flag { 23 | return []cli.Flag{ 24 | cli.StringFlag{ 25 | Name: common.PrefixFlag(flagPrefix, RegionFlagName), 26 | Usage: "AWS Region", 27 | Required: false, 28 | Value: "", 29 | EnvVar: common.PrefixEnvVar(envPrefix, "AWS_REGION"), 30 | }, 31 | cli.StringFlag{ 32 | Name: common.PrefixFlag(flagPrefix, AccessKeyIdFlagName), 33 | Usage: "AWS Access Key Id", 34 | Required: false, 35 | Value: "", 36 | EnvVar: common.PrefixEnvVar(envPrefix, "AWS_ACCESS_KEY_ID"), 37 | }, 38 | cli.StringFlag{ 39 | Name: common.PrefixFlag(flagPrefix, SecretAccessKeyFlagName), 40 | Usage: "AWS Secret Access Key", 41 | Required: false, 42 | Value: "", 43 | EnvVar: common.PrefixEnvVar(envPrefix, "AWS_SECRET_ACCESS_KEY"), 44 | }, 45 | cli.StringFlag{ 46 | Name: common.PrefixFlag(flagPrefix, EndpointURLFlagName), 47 | Usage: "AWS Endpoint URL", 48 | Required: false, 49 | Value: "", 50 | EnvVar: common.PrefixEnvVar(envPrefix, "AWS_ENDPOINT_URL"), 51 | }, 52 | } 53 | } 54 | 55 | func ReadClientConfig(ctx *cli.Context, flagPrefix string) ClientConfig { 56 | return ClientConfig{ 57 | Region: ctx.GlobalString(common.PrefixFlag(flagPrefix, RegionFlagName)), 58 | AccessKey: ctx.GlobalString(common.PrefixFlag(flagPrefix, AccessKeyIdFlagName)), 59 | SecretAccessKey: ctx.GlobalString(common.PrefixFlag(flagPrefix, SecretAccessKeyFlagName)), 60 | EndpointURL: ctx.GlobalString(common.PrefixFlag(flagPrefix, EndpointURLFlagName)), 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /run/README.md: -------------------------------------------------------------------------------- 1 | # 0GDA 2 | 3 | ## How to run 4 | 5 | 1. Build client server 6 | 7 | ```bash 8 | cd run 9 | make -C .. build 10 | ``` 11 | 12 | 2. Build docker image 13 | 14 | ```bash 15 | cd .. 16 | docker build -t 0gclient -f ./Dockerfile . 17 | ``` 18 | 19 | 3. Update configurations in **[run.sh](run.sh)** 20 | 21 | 4. Grant executable permissions to **[run.sh](run.sh)** and **[start.sh](start.sh)** when needed. 22 | 23 | 5. Run docker image 24 | ```bash 25 | cd run 26 | ./start.sh 27 | ``` 28 | 29 | ## Build and run via combined.Dockerfile 30 | Adjust commands and parameters as required for your setup: 31 | 32 | Build the Docker image 33 | 34 | ```bash 35 | docker build -t 0g-da-client -f combined.Dockerfile . 36 | ``` 37 | 38 | Run the Docker container 39 | 40 | ```bash 41 | docker run -v ./run:/runtime -p 51001:51001 0g-da-client:latest combined \ 42 | --chain.rpc L1_RPC_ENDPOINT \ 43 | --chain.private-key YOUR_PRIVATE_KEY \ 44 | --chain.receipt-wait-rounds 180 \ 45 | --chain.receipt-wait-interval 1s \ 46 | --chain.gas-limit 2000000 \ 47 | --combined-server.use-memory-db \ 48 | --combined-server.storage.kv-db-path /runtime/ \ 49 | --combined-server.storage.time-to-expire 2592000 \ 50 | --disperser-server.grpc-port 51001 \ 51 | --batcher.da-entrance-contract ENTRANCE_CONTRACT_ADDR \ 52 | --batcher.da-signers-contract 0x0000000000000000000000000000000000001000 \ 53 | --batcher.finalizer-interval 20s \ 54 | --batcher.confirmer-num 3 \ 55 | --batcher.max-num-retries-for-sign 3 \ 56 | --batcher.finalized-block-count 50 \ 57 | --batcher.batch-size-limit 500 \ 58 | --batcher.encoding-interval 3s \ 59 | --batcher.encoding-request-queue-size 1 \ 60 | --batcher.pull-interval 10s \ 61 | --batcher.signing-interval 3s \ 62 | --batcher.signed-pull-interval 20s \ 63 | --batcher.expiration-poll-interval 3600 \ 64 | --encoder-socket DA_ENCODER_SERVER \ 65 | --encoding-timeout 300s \ 66 | --signing-timeout 60s \ 67 | --chain-read-timeout 12s \ 68 | --chain-write-timeout 13s 69 | ``` 70 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/go 3 | { 4 | "name": "Go", 5 | "build": { 6 | "dockerfile": "Dockerfile", 7 | "args": { 8 | // Update the VARIANT arg to pick a version of Go: 1, 1.19, 1.18 9 | // Append -bullseye or -buster to pin to an OS version. 10 | // Use -bullseye variants on local arm64/Apple Silicon. 11 | "VARIANT": "1-1.21-bullseye", 12 | // Options 13 | "NODE_VERSION": "lts/*" 14 | } 15 | }, 16 | "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], 17 | 18 | // Configure tool-specific properties. 19 | "customizations": { 20 | // Configure access control to other repositories 21 | "codespaces": { 22 | "repositories": { 23 | "Layr-Labs/*": { 24 | "permissions": "write-all" 25 | } 26 | } 27 | }, 28 | // Configure properties specific to VS Code. 29 | "vscode": { 30 | // Set *default* container specific settings.json values on container create. 31 | "settings": { 32 | "go.toolsManagement.checkForUpdates": "local", 33 | "go.useLanguageServer": true, 34 | "go.gopath": "/go" 35 | }, 36 | 37 | // Add the IDs of extensions you want installed when the container is created. 38 | "extensions": [ 39 | "golang.Go", 40 | "NomicFoundation.hardhat-solidity" 41 | ] 42 | } 43 | }, 44 | 45 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 46 | // "forwardPorts": [], 47 | 48 | // Use 'postCreateCommand' to run commands after the container is created. 49 | "postCreateCommand": "chmod +x ./.devcontainer/install.sh && bash ./.devcontainer/install.sh", 50 | 51 | // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 52 | "remoteUser": "vscode", 53 | "features": { 54 | "ghcr.io/devcontainers/features/aws-cli:1": { 55 | "version": "latest" 56 | }, 57 | "ghcr.io/devcontainers/features/docker-in-docker:1": { 58 | "version": "latest" 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /api/proto/retriever/retriever.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/0glabs/0g-da-client/api/grpc/retriever"; 4 | package retriever; 5 | 6 | // The Retriever is a service for retrieving chunks corresponding to a blob from 7 | // the ZGDA operator nodes and reconstructing the original blob from the chunks. 8 | // This is a client-side library that the users are supposed to operationalize. 9 | // 10 | // Note: Users generally have two ways to retrieve a blob from ZGDA: 11 | // 1) Retrieve from the Disperser that the user initially used for dispersal: the API 12 | // is Disperser.RetrieveBlob() as defined in api/proto/disperser/disperser.proto 13 | // 2) Retrieve directly from the ZGDA Nodes, which is supported by this Retriever. 14 | // 15 | // The Disperser.RetrieveBlob() (the 1st approach) is generally faster and cheaper as the 16 | // Disperser manages the blobs that it has processed, whereas the Retriever.RetrieveBlob() 17 | // (the 2nd approach here) removes the need to trust the Disperser, with the downside of 18 | // worse cost and performance. 19 | service Retriever { 20 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 21 | // reconstructed original blob in response. 22 | rpc RetrieveBlob(BlobRequest) returns (BlobReply) {} 23 | } 24 | 25 | message BlobRequest { 26 | // The hash of the ReducedBatchHeader defined onchain, see: 27 | // https://github.com/0glabs/0g-da-client/blob/master/contracts/src/interfaces/IZGDAServiceManager.sol#L43 28 | // This identifies the batch that this blob belongs to. 29 | bytes batch_header_hash = 1; 30 | // Which blob in the batch this is requesting for (note: a batch is logically an 31 | // ordered list of blobs). 32 | uint32 blob_index = 2; 33 | // The Ethereum block number at which the batch for this blob was constructed. 34 | uint32 reference_block_number = 3; 35 | // Which quorum of the blob this is requesting for (note a blob can participate in 36 | // multiple quorums). 37 | uint32 quorum_id = 4; 38 | } 39 | 40 | message BlobReply { 41 | // The blob retrieved and reconstructed from the ZGDA Nodes per BlobRequest. 42 | bytes data = 1; 43 | } 44 | -------------------------------------------------------------------------------- /.github/actions/test-coverage/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Go coverage report' 2 | description: 'This action updates adds an HTML coverage report and SVG badge to your wiki' 3 | branding: 4 | color: blue 5 | icon: award 6 | 7 | inputs: 8 | report: 9 | description: Generate an HTML coverage report. 10 | default: true 11 | chart: 12 | description: Generate a coverage over time chart. 13 | default: false 14 | amend: 15 | description: Amend wiki, avoiding spurious commits. 16 | default: false 17 | go-version: 18 | description: The Go version to download (if necessary) and use. 19 | default: '1.21' 20 | 21 | runs: 22 | using: "composite" 23 | steps: 24 | - name: Checkout code 25 | uses: actions/checkout@v3 26 | 27 | - name: Checkout wiki 28 | uses: actions/checkout@v3 29 | with: 30 | repository: ${{github.repository}}.wiki 31 | token: ${{ github.token }} 32 | path: ./.github/wiki/ 33 | 34 | - name: Set up Go 35 | uses: actions/setup-go@v4 36 | with: 37 | go-version: ${{inputs.go-version}} 38 | 39 | - name: Download coverage artifact 40 | uses: actions/download-artifact@v2 41 | with: 42 | name: coverage 43 | path: . 44 | 45 | - name: Generate coverage report 46 | shell: bash 47 | env: 48 | INPUT_CHART: ${{inputs.chart}} 49 | INPUT_REPORT: ${{inputs.report}} 50 | run: | 51 | ${{github.action_path}}/coverage.sh ./.github/wiki/ 52 | 53 | - name: Push to wiki 54 | shell: bash 55 | run: | 56 | cd ./.github/wiki/ 57 | git add --all 58 | git diff-index --quiet HEAD && exit 59 | git config --local user.name "GitHub Action" 60 | git config --local user.email "action@github.com" 61 | git remote set-url --push origin https://${{ github.token }}@github.com/0glabs/0g-da-client.wiki.git 62 | test ${{inputs.amend}} == "true" && \ 63 | git commit --amend --no-edit && git push --force-with-lease || \ 64 | git commit -m "Update coverage" && git push https://${{ github.token }}@github.com/0glabs/0g-da-client.wiki.git 65 | -------------------------------------------------------------------------------- /common/aws/dynamodb/utils_test.go: -------------------------------------------------------------------------------- 1 | package dynamodb_test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | commonaws "github.com/0glabs/0g-da-client/common/aws" 8 | "github.com/aws/aws-sdk-go-v2/aws" 9 | "github.com/aws/aws-sdk-go-v2/config" 10 | "github.com/aws/aws-sdk-go-v2/credentials" 11 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 12 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 13 | ) 14 | 15 | const ( 16 | // waiterDuration is the duration to wait for a table to be created 17 | waiterDuration = 15 * time.Second 18 | ) 19 | 20 | func CreateTable(ctx context.Context, cfg commonaws.ClientConfig, name string, input *dynamodb.CreateTableInput) (*types.TableDescription, error) { 21 | c, err := getClient(cfg) 22 | if err != nil { 23 | return nil, err 24 | } 25 | table, err := c.CreateTable(ctx, input) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | waiter := dynamodb.NewTableExistsWaiter(c) 31 | err = waiter.Wait(ctx, &dynamodb.DescribeTableInput{ 32 | TableName: aws.String(name), 33 | }, waiterDuration) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | return table.TableDescription, nil 39 | } 40 | 41 | func getClient(clientConfig commonaws.ClientConfig) (*dynamodb.Client, error) { 42 | createClient := func(service, region string, options ...interface{}) (aws.Endpoint, error) { 43 | if clientConfig.EndpointURL != "" { 44 | return aws.Endpoint{ 45 | PartitionID: "aws", 46 | URL: clientConfig.EndpointURL, 47 | SigningRegion: clientConfig.Region, 48 | }, nil 49 | } 50 | 51 | // returning EndpointNotFoundError will allow the service to fallback to its default resolution 52 | return aws.Endpoint{}, &aws.EndpointNotFoundError{} 53 | } 54 | customResolver := aws.EndpointResolverWithOptionsFunc(createClient) 55 | 56 | cfg, errCfg := config.LoadDefaultConfig(context.Background(), 57 | config.WithRegion(clientConfig.Region), 58 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(clientConfig.AccessKey, clientConfig.SecretAccessKey, "")), 59 | config.WithEndpointResolverWithOptions(customResolver), 60 | config.WithRetryMode(aws.RetryModeStandard), 61 | ) 62 | if errCfg != nil { 63 | return nil, errCfg 64 | } 65 | return dynamodb.NewFromConfig(cfg), nil 66 | } 67 | -------------------------------------------------------------------------------- /common/aws/dynamodb/utils/test_utils.go: -------------------------------------------------------------------------------- 1 | package test_utils 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | commonaws "github.com/0glabs/0g-da-client/common/aws" 8 | "github.com/aws/aws-sdk-go-v2/aws" 9 | "github.com/aws/aws-sdk-go-v2/config" 10 | "github.com/aws/aws-sdk-go-v2/credentials" 11 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 12 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 13 | ) 14 | 15 | const ( 16 | // waiterDuration is the duration to wait for a table to be created 17 | waiterDuration = 15 * time.Second 18 | ) 19 | 20 | func CreateTable(ctx context.Context, cfg commonaws.ClientConfig, name string, input *dynamodb.CreateTableInput) (*types.TableDescription, error) { 21 | c, err := getClient(cfg) 22 | if err != nil { 23 | return nil, err 24 | } 25 | table, err := c.CreateTable(ctx, input) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | waiter := dynamodb.NewTableExistsWaiter(c) 31 | err = waiter.Wait(ctx, &dynamodb.DescribeTableInput{ 32 | TableName: aws.String(name), 33 | }, waiterDuration) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | return table.TableDescription, nil 39 | } 40 | 41 | func getClient(clientConfig commonaws.ClientConfig) (*dynamodb.Client, error) { 42 | createClient := func(service, region string, options ...interface{}) (aws.Endpoint, error) { 43 | if clientConfig.EndpointURL != "" { 44 | return aws.Endpoint{ 45 | PartitionID: "aws", 46 | URL: clientConfig.EndpointURL, 47 | SigningRegion: clientConfig.Region, 48 | }, nil 49 | } 50 | 51 | // returning EndpointNotFoundError will allow the service to fallback to its default resolution 52 | return aws.Endpoint{}, &aws.EndpointNotFoundError{} 53 | } 54 | customResolver := aws.EndpointResolverWithOptionsFunc(createClient) 55 | 56 | cfg, errCfg := config.LoadDefaultConfig(context.Background(), 57 | config.WithRegion(clientConfig.Region), 58 | config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(clientConfig.AccessKey, clientConfig.SecretAccessKey, "")), 59 | config.WithEndpointResolverWithOptions(customResolver), 60 | config.WithRetryMode(aws.RetryModeStandard), 61 | ) 62 | if errCfg != nil { 63 | return nil, errCfg 64 | } 65 | return dynamodb.NewFromConfig(cfg), nil 66 | } 67 | -------------------------------------------------------------------------------- /common/pubip/pubip.go: -------------------------------------------------------------------------------- 1 | package pubip 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "strings" 11 | ) 12 | 13 | const ( 14 | SeepIPProvider = "seeip" 15 | IpifyProvider = "ipify" 16 | MockIpProvider = "mockip" 17 | ) 18 | 19 | var ( 20 | SeeIP = &SimpleProvider{Name: "seeip", URL: "https://api.seeip.org"} 21 | Ipify = &SimpleProvider{Name: "ipify", URL: "https://api.ipify.org"} 22 | MockIp = &SimpleProvider{Name: "mockip", URL: ""} 23 | ) 24 | 25 | type RequestDoer interface { 26 | Do(req *http.Request) (*http.Response, error) 27 | } 28 | 29 | type RequestDoerFunc func(req *http.Request) (*http.Response, error) 30 | 31 | var _ RequestDoer = (RequestDoerFunc)(nil) 32 | 33 | func (f RequestDoerFunc) Do(req *http.Request) (*http.Response, error) { 34 | return f(req) 35 | } 36 | 37 | type Provider interface { 38 | PublicIPAddress(ctx context.Context) (string, error) 39 | } 40 | 41 | type SimpleProvider struct { 42 | RequestDoer RequestDoer 43 | Name string 44 | URL string 45 | } 46 | 47 | var _ Provider = (*SimpleProvider)(nil) 48 | 49 | func (s *SimpleProvider) PublicIPAddress(ctx context.Context) (string, error) { 50 | if s.Name == MockIpProvider { 51 | return "localhost", nil 52 | } 53 | ip, err := s.doRequest(ctx, s.URL) 54 | if err != nil { 55 | return "", fmt.Errorf("%s: failed to retrieve public ip address: %w", s.Name, err) 56 | } 57 | return ip, nil 58 | } 59 | 60 | func (s *SimpleProvider) doRequest(ctx context.Context, url string) (string, error) { 61 | req, err := http.NewRequestWithContext(ctx, "GET", url, nil) 62 | if err != nil { 63 | return "", err 64 | } 65 | 66 | if s.RequestDoer == nil { 67 | s.RequestDoer = http.DefaultClient 68 | } 69 | resp, err := s.RequestDoer.Do(req) 70 | if err != nil { 71 | return "", err 72 | } 73 | defer func() { _ = resp.Body.Close() }() 74 | 75 | if resp.StatusCode >= http.StatusBadRequest { 76 | return "", errors.New(resp.Status) 77 | } 78 | 79 | var b bytes.Buffer 80 | _, err = io.Copy(&b, resp.Body) 81 | if err != nil { 82 | return "", err 83 | } 84 | return strings.TrimSpace(b.String()), nil 85 | } 86 | 87 | func ProviderOrDefault(name string) Provider { 88 | p := map[string]Provider{ 89 | SeepIPProvider: SeeIP, 90 | IpifyProvider: Ipify, 91 | MockIpProvider: MockIp, 92 | }[name] 93 | if p == nil { 94 | p = SeeIP 95 | } 96 | return p 97 | } 98 | -------------------------------------------------------------------------------- /common/store/dynamo_store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/0glabs/0g-da-client/common" 8 | commondynamodb "github.com/0glabs/0g-da-client/common/aws/dynamodb" 9 | "github.com/aws/aws-sdk-go-v2/aws" 10 | "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" 11 | "github.com/aws/aws-sdk-go-v2/service/dynamodb" 12 | "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" 13 | ) 14 | 15 | type dynamodbBucketStore[T any] struct { 16 | client *commondynamodb.Client 17 | tableName string 18 | } 19 | 20 | func NewDynamoParamStore[T any](client *commondynamodb.Client, tableName string) common.KVStore[T] { 21 | return &dynamodbBucketStore[T]{ 22 | client: client, 23 | tableName: tableName, 24 | } 25 | } 26 | 27 | func (s *dynamodbBucketStore[T]) GetItem(ctx context.Context, requesterID string) (*T, error) { 28 | 29 | key := map[string]types.AttributeValue{ 30 | "RequesterID": &types.AttributeValueMemberS{ 31 | Value: requesterID, 32 | }, 33 | } 34 | 35 | item, err := s.client.GetItem(ctx, s.tableName, key) 36 | if err != nil { 37 | return nil, err 38 | } 39 | if item == nil { 40 | return nil, fmt.Errorf("item not found") 41 | } 42 | 43 | params := new(T) 44 | err = attributevalue.UnmarshalMap(item, params) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | return params, nil 50 | } 51 | 52 | func (s *dynamodbBucketStore[T]) UpdateItem(ctx context.Context, requesterID string, params *T) error { 53 | 54 | fields, err := attributevalue.MarshalMap(params) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | fields["RequesterID"] = &types.AttributeValueMemberS{ 60 | Value: requesterID, 61 | } 62 | 63 | return s.client.PutItem(ctx, s.tableName, fields) 64 | } 65 | 66 | func GenerateTableSchema(readCapacityUnits int64, writeCapacityUnits int64, tableName string) *dynamodb.CreateTableInput { 67 | return &dynamodb.CreateTableInput{ 68 | AttributeDefinitions: []types.AttributeDefinition{ 69 | { 70 | AttributeName: aws.String("RequesterID"), 71 | AttributeType: types.ScalarAttributeTypeS, 72 | }, 73 | }, 74 | KeySchema: []types.KeySchemaElement{ 75 | { 76 | AttributeName: aws.String("RequesterID"), 77 | KeyType: types.KeyTypeHash, 78 | }, 79 | }, 80 | TableName: aws.String(tableName), 81 | ProvisionedThroughput: &types.ProvisionedThroughput{ 82 | ReadCapacityUnits: aws.Int64(readCapacityUnits), 83 | WriteCapacityUnits: aws.Int64(writeCapacityUnits), 84 | }, 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /common/logging/logging.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/0glabs/0g-da-client/common" 8 | "github.com/ethereum/go-ethereum/log" 9 | ) 10 | 11 | type Logger struct { 12 | log.Logger 13 | } 14 | 15 | func (l *Logger) New(ctx ...interface{}) common.Logger { 16 | return &Logger{Logger: l.Logger.New(ctx...)} 17 | } 18 | 19 | func (l *Logger) SetHandler(h log.Handler) { 20 | l.Logger.SetHandler(h) 21 | } 22 | 23 | // GetLogger returns a logger with the specified configuration. 24 | func GetLogger(cfg Config) (common.Logger, error) { 25 | fileLevel, err := log.LvlFromString(cfg.FileLevel) 26 | if err != nil { 27 | return nil, err 28 | } 29 | stdLevel, err := log.LvlFromString(cfg.StdLevel) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | logger := &Logger{Logger: log.New()} 35 | // This is required to print locations of log calls 36 | // This was recently added in this PR: https://github.com/ethereum/go-ethereum/pull/28069/files 37 | // where the default behavior was changed to not print origins 38 | // This was due to it being very expensive to compute origins 39 | // We should evaluate enabling/disabling this based on the flag 40 | log.PrintOrigins(true) 41 | stdh := log.StreamHandler(os.Stdout, log.TerminalFormat(false)) 42 | stdHandler := log.CallerFileHandler(log.LvlFilterHandler(stdLevel, stdh)) 43 | if cfg.Path != "" { 44 | fh, err := log.FileHandler(cfg.Path, log.LogfmtFormat()) 45 | if err != nil { 46 | return nil, err 47 | } 48 | fileHandler := log.LvlFilterHandler(fileLevel, fh) 49 | logger.SetHandler(log.MultiHandler(fileHandler, stdHandler)) 50 | } else { 51 | logger.SetHandler(stdHandler) 52 | } 53 | return logger, nil 54 | } 55 | 56 | func (l *Logger) Fatal(msg string, ctx ...interface{}) { 57 | l.Crit(msg, ctx...) 58 | } 59 | 60 | func (l *Logger) Debugf(template string, args ...interface{}) { 61 | l.Debug(fmt.Sprintf(template, args...)) 62 | } 63 | 64 | func (l *Logger) Infof(template string, args ...interface{}) { 65 | l.Info(fmt.Sprintf(template, args...)) 66 | } 67 | 68 | func (l *Logger) Warnf(template string, args ...interface{}) { 69 | l.Warn(fmt.Sprintf(template, args...)) 70 | } 71 | 72 | func (l *Logger) Errorf(template string, args ...interface{}) { 73 | l.Error(fmt.Sprintf(template, args...)) 74 | } 75 | 76 | func (l *Logger) Critf(template string, args ...interface{}) { 77 | l.Crit(fmt.Sprintf(template, args...)) 78 | } 79 | 80 | func (l *Logger) Fatalf(template string, args ...interface{}) { 81 | l.Crit(fmt.Sprintf(template, args...)) 82 | } 83 | -------------------------------------------------------------------------------- /disperser/encoder/client.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "time" 8 | 9 | "github.com/0glabs/0g-da-client/common" 10 | "github.com/0glabs/0g-da-client/core" 11 | "github.com/0glabs/0g-da-client/disperser" 12 | pb "github.com/0glabs/0g-da-client/disperser/api/grpc/encoder" 13 | bn "github.com/consensys/gnark-crypto/ecc/bn254" 14 | "github.com/consensys/gnark-crypto/ecc/bn254/fp" 15 | "github.com/ethereum/go-ethereum/common/hexutil" 16 | "google.golang.org/grpc" 17 | "google.golang.org/grpc/credentials/insecure" 18 | ) 19 | 20 | type client struct { 21 | addr string 22 | timeout time.Duration 23 | } 24 | 25 | func NewEncoderClient(addr string, timeout time.Duration) (disperser.EncoderClient, error) { 26 | return client{ 27 | addr: addr, 28 | timeout: timeout, 29 | }, nil 30 | } 31 | 32 | func (c client) EncodeBlob(ctx context.Context, data []byte, log common.Logger) (*core.BlobCommitments, error) { 33 | ctxWithTimeout, cancel := context.WithTimeout(ctx, c.timeout) 34 | defer cancel() 35 | conn, err := grpc.DialContext( 36 | ctxWithTimeout, 37 | c.addr, 38 | grpc.WithTransportCredentials(insecure.NewCredentials()), 39 | grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024)), // 1 GiB 40 | ) 41 | if err != nil { 42 | return nil, fmt.Errorf("failed to dial encoder: %w", err) 43 | } 44 | defer conn.Close() 45 | 46 | encoder := pb.NewEncoderClient(conn) 47 | encodeBlobReply, err := encoder.EncodeBlob(ctx, &pb.EncodeBlobRequest{ 48 | Data: data, 49 | RequireData: false, 50 | }) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | // little endian to big endian 56 | commitment := encodeBlobReply.GetErasureCommitment() 57 | if len(commitment) != bn.SizeOfG1AffineUncompressed { 58 | return nil, io.ErrShortBuffer 59 | } 60 | 61 | commitment[bn.SizeOfG1AffineUncompressed-1] &= 63 62 | for i := 0; i < fp.Bytes/2; i++ { 63 | commitment[i], commitment[fp.Bytes-i-1] = commitment[fp.Bytes-i-1], commitment[i] 64 | } 65 | 66 | for i := fp.Bytes; i < fp.Bytes+fp.Bytes/2; i++ { 67 | commitment[i], commitment[len(commitment)-(i-fp.Bytes)-1] = commitment[len(commitment)-(i-fp.Bytes)-1], commitment[i] 68 | } 69 | 70 | log.Debug("blob erasure commit", "commit", hexutil.Encode(commitment)) 71 | 72 | commitmentPoint, err := new(core.G1Point).Deserialize(commitment) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | return &core.BlobCommitments{ 78 | ErasureCommitment: commitmentPoint, 79 | StorageRoot: encodeBlobReply.GetStorageRoot(), 80 | EncodedData: encodeBlobReply.GetEncodedData(), 81 | EncodedSlice: encodeBlobReply.GetEncodedSlice(), 82 | }, nil 83 | } 84 | -------------------------------------------------------------------------------- /docs/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | ## Overview 4 | 5 | 0G DA is a decentralized data availability (DA) service with deep consideration in security, scalability and decentralization. It is also the first DA solution with a built-in data storage layer. Users interact with 0G DA to submit and store their data into[ 0G Storage](https://github.com/0glabs/0g-storage-node) for later retrieval. 6 | 7 | ## Integration 8 | 9 | Check out [this example](https://github.com/0glabs/0g-da-example-rust) for how to integrate the 0G DA into your own applications. 10 | 11 | For detailed public APIs, visit [gRPC API](../0G%20DA/broken-reference/) section. 12 | 13 | ## Deployment 14 | 15 | * For local test environment, [aws-cli](https://aws.amazon.com/cli/) is required. 16 | * [Local Stack setup](./#localstack) 17 | * [Disperser](./#disperser) 18 | * [Retriever](./#retriever) 19 | 20 | ### Disperser 21 | 22 | 1. Build binaries: 23 | 24 | ``` 25 | cd disperser 26 | make build 27 | ``` 28 | 29 | 2. Run encoder: 30 | 31 | ``` 32 | make run_encoder 33 | ``` 34 | 35 | 3. Set the cli arguments of run\_batcher in Makefile to proper values. Full list of available configuration parameters are showing below. 36 | 37 | ``` 38 | # default 39 | --batcher.pull-interval 5s 40 | --chain.receipt-wait-rounds 180 41 | --chain.receipt-wait-interval 1s 42 | --chain.gas-limit 2000000 43 | --batcher.finalizer-interval 300s 44 | --batcher.confirmer-num 3 45 | --encoder-socket 0.0.0.0:34000 46 | --batcher.batch-size-limit 50 47 | --batcher.srs-order 300000 48 | --encoding-timeout 10s 49 | --chain-read-timeout 12s 50 | --chain-write-timeout 13s 51 | --batcher.storage.node-url http://0.0.0.0:5678 52 | --batcher.storage.node-url http://0.0.0.0:6789 53 | --batcher.storage.kv-url http://0.0.0.0:7890 54 | --batcher.storage.kv-stream-id 000000000000000000000000000000000000000000000000000000000000f2bd 55 | --batcher.aws.region us-east-2 56 | 57 | # custom 58 | # aws 59 | --batcher.aws.access-key-id localstack 60 | --batcher.aws.secret-access-key localstack 61 | --batcher.s3-bucket-name test-zgda-blobstore 62 | --batcher.dynamodb-table-name test-BlobMetadata 63 | # chain 64 | --chain.rpc ETH_RPC_ENDPOINT 65 | --chain.private-key YOUR_PRIVATE_KEY 66 | --batcher.storage.flow-contract FLOW_CONTRACT_ADDR 67 | ``` 68 | 69 | 4. Then run batcher and the main disperser server: 70 | 71 | ``` 72 | make run_batcher 73 | 74 | make run_server 75 | ``` 76 | 77 | ### Retriever 78 | 79 | 1. Build binaries: 80 | 81 | ``` 82 | cd retriever 83 | make build 84 | ``` 85 | 86 | 2. Run the main retriever server: 87 | 88 | ``` 89 | make run 90 | ``` 91 | 92 | ## Contributing 93 | 94 | To make contributions to the project, please follow the guidelines [here](../../contributing.md). 95 | -------------------------------------------------------------------------------- /disperser/cmd/apiserver/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common/aws" 5 | "github.com/0glabs/0g-da-client/common/geth" 6 | "github.com/0glabs/0g-da-client/common/logging" 7 | "github.com/0glabs/0g-da-client/common/ratelimit" 8 | "github.com/0glabs/0g-da-client/common/storage_node" 9 | "github.com/0glabs/0g-da-client/disperser" 10 | "github.com/0glabs/0g-da-client/disperser/apiserver" 11 | "github.com/0glabs/0g-da-client/disperser/cmd/apiserver/flags" 12 | "github.com/0glabs/0g-da-client/disperser/common/blobstore" 13 | "github.com/urfave/cli" 14 | ) 15 | 16 | type Config struct { 17 | AwsClientConfig aws.ClientConfig 18 | BlobstoreConfig blobstore.Config 19 | ServerConfig disperser.ServerConfig 20 | LoggerConfig logging.Config 21 | MetricsConfig disperser.MetricsConfig 22 | RatelimiterConfig ratelimit.Config 23 | RateConfig apiserver.RateConfig 24 | StorageNodeConfig storage_node.ClientConfig 25 | EthClientConfig geth.EthClientConfig 26 | EnableRatelimiter bool 27 | BucketTableName string 28 | BucketStoreSize int 29 | RetrieverAddr string 30 | } 31 | 32 | func NewConfig(ctx *cli.Context) (Config, error) { 33 | 34 | ratelimiterConfig, err := ratelimit.ReadCLIConfig(ctx, flags.FlagPrefix) 35 | if err != nil { 36 | return Config{}, err 37 | } 38 | 39 | rateConfig, err := apiserver.ReadCLIConfig(ctx) 40 | if err != nil { 41 | return Config{}, err 42 | } 43 | 44 | config := Config{ 45 | AwsClientConfig: aws.ReadClientConfig(ctx, flags.FlagPrefix), 46 | ServerConfig: disperser.ServerConfig{ 47 | GrpcPort: ctx.GlobalString(flags.GrpcPortFlag.Name), 48 | }, 49 | EthClientConfig: geth.ReadEthClientConfig(ctx), 50 | BlobstoreConfig: blobstore.Config{ 51 | BucketName: ctx.GlobalString(flags.S3BucketNameFlag.Name), 52 | TableName: ctx.GlobalString(flags.DynamoDBTableNameFlag.Name), 53 | MetadataHashAsBlobKey: ctx.GlobalBool(flags.MetadataHashAsBlobKey.Name), 54 | }, 55 | LoggerConfig: logging.ReadCLIConfig(ctx, flags.FlagPrefix), 56 | MetricsConfig: disperser.MetricsConfig{ 57 | HTTPPort: ctx.GlobalString(flags.MetricsHTTPPort.Name), 58 | EnableMetrics: ctx.GlobalBool(flags.EnableMetrics.Name), 59 | }, 60 | RatelimiterConfig: ratelimiterConfig, 61 | RateConfig: rateConfig, 62 | EnableRatelimiter: ctx.GlobalBool(flags.EnableRatelimiter.Name), 63 | BucketTableName: ctx.GlobalString(flags.BucketTableName.Name), 64 | BucketStoreSize: ctx.GlobalInt(flags.BucketStoreSize.Name), 65 | StorageNodeConfig: storage_node.ReadClientConfig(ctx, flags.FlagPrefix), 66 | RetrieverAddr: ctx.GlobalString(flags.RetrieverAddrName.Name), 67 | } 68 | return config, nil 69 | } 70 | -------------------------------------------------------------------------------- /tests/utility/utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import inspect 3 | import os 4 | import platform 5 | import rtoml 6 | import time 7 | import sha3 8 | 9 | class PortMin: 10 | # Must be initialized with a unique integer for each process 11 | n = 11000 12 | 13 | def is_windows_platform(): 14 | return platform.system().lower() == "windows" 15 | 16 | 17 | def initialize_config(config_path, config_parameters): 18 | with open(config_path, "w") as f: 19 | for k in config_parameters: 20 | value = config_parameters[k] 21 | if isinstance(value, str) and not ( 22 | value.startswith('"') or value.startswith("'") 23 | ): 24 | if value == "true" or value == "false": 25 | value = f"{value}" 26 | else: 27 | value = f'"{value}"' 28 | 29 | f.write(f"{k}={value}\n") 30 | 31 | 32 | def wait_until(predicate, *, attempts=float("inf"), timeout=float("inf"), lock=None): 33 | if attempts == float("inf") and timeout == float("inf"): 34 | timeout = 60 35 | attempt = 0 36 | time_end = time.time() + timeout 37 | 38 | while attempt < attempts and time.time() < time_end: 39 | if lock: 40 | with lock: 41 | if predicate(): 42 | return 43 | else: 44 | if predicate(): 45 | return 46 | attempt += 1 47 | time.sleep(0.5) 48 | 49 | # Print the cause of the timeout 50 | predicate_source = inspect.getsourcelines(predicate) 51 | if attempt >= attempts: 52 | raise AssertionError( 53 | "Predicate {} not true after {} attempts".format(predicate_source, attempts) 54 | ) 55 | elif time.time() >= time_end: 56 | raise AssertionError( 57 | "Predicate {} not true after {} seconds".format(predicate_source, timeout) 58 | ) 59 | raise RuntimeError("Unreachable") 60 | 61 | 62 | 63 | def assert_equal(thing1, thing2, *args): 64 | if thing1 != thing2 or any(thing1 != arg for arg in args): 65 | raise AssertionError( 66 | "not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args) 67 | ) 68 | 69 | 70 | def assert_ne(thing1, thing2): 71 | if thing1 == thing2: 72 | raise AssertionError("not(%s)" % " != ".join([thing1, thing2])) 73 | 74 | 75 | def assert_greater_than(thing1, thing2): 76 | if thing1 <= thing2: 77 | raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) 78 | 79 | 80 | def assert_greater_than_or_equal(thing1, thing2): 81 | if thing1 < thing2: 82 | raise AssertionError("%s < %s" % (str(thing1), str(thing2))) 83 | 84 | MAX_NODES = 100 85 | 86 | def blockchain_rpc_port(n): 87 | return PortMin.n + 3 * MAX_NODES + n -------------------------------------------------------------------------------- /docs/pkg/kzg.md: -------------------------------------------------------------------------------- 1 | # KZG and FFT utils 2 | 3 | This repo is _super experimental_. 4 | 5 | This is an implementation in Go, initially aimed at chunkification and extension of data, and building/verifying KZG proofs for the output data. The KZG proofs, or Kate proofs, are built on top of BLS12-381. 6 | 7 | Part of a low-latency data-availability sampling network prototype for Eth2 Phase 1. See https://github.com/protolambda/eth2-das 8 | 9 | Code is based on: 10 | 11 | * [KZG Data availability code by Dankrad](https://github.com/ethereum/research/tree/master/kzg\_data\_availability) 12 | * [Verkle and FFT code by Dankrad and Vitalik](https://github.com/ethereum/research/tree/master/verkle) 13 | * [Reed solomon erasure code recovery with FFTs by Vitalik](https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039) 14 | * [FFT explainer by Vitalik](https://vitalik.eth.limo/general/2019/05/12/fft.html) 15 | * [Kate explainer by Dankrad](https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html) 16 | * [Kate amortized paper by Dankrad and Dmitry](https://github.com/khovratovich/Kate/blob/master/Kate\_amortized.pdf) 17 | 18 | Features: 19 | 20 | * (I)FFT on `F_r` 21 | * (I)FFT on `G1` 22 | * Specialized FFT for extension of `F_r` data 23 | * KZG 24 | * commitments 25 | * generate/verify proof for single point 26 | * generate/verify proofs for multiple points 27 | * generate/verify proofs for all points, using FK20 28 | * generate/verify proofs for ranges (cosets) of points, using FK20 29 | * Data recovery: given an arbitrary subset of data (at least half), recover the rest 30 | * Optimized for Data-availability usage 31 | * Change Bignum / BLS with build tags. 32 | 33 | ## BLS 34 | 35 | Currently supported BLS implementations: Herumi BLS and Kilic BLS (default). 36 | 37 | ## Field elements (Fr) 38 | 39 | The BLS curve order is used for the modulo math, different libraries could be used to provide this functionality. Note: some of these libraries do not have full BLS functionality, only Bignum / uint256. The KZG code will be excluded when compiling with a non-BLS build tag. 40 | 41 | Build tag options: 42 | 43 | * (no build tags, default): Use Kilic BLS library. Previously used by `bignum_kilic` build tag. [`kilic/bls12-381`](https://github.com/kilic/bls12-381) 44 | * `-tags bignum_hbls`: use Herumi BLS library. [`herumi/bls-eth-go-binary`](https://github.com/herumi/bls-eth-go-binary/) 45 | * `-tags bignum_hol256`: Use the uint256 code that Geth uses, [`holiman/uint256`](https://github.com/holiman/uint256) 46 | * `-tags bignum_pure`: Use the native Go Bignum implementation. 47 | 48 | ## Benchmarks 49 | 50 | See `BENCH.md` for benchmarks of FFT, FFT in G1, FFT-extension, zero polynomials, and sample recovery. 51 | 52 | ## License 53 | 54 | MIT, see `LICENSE` file. 55 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_retriever.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import grpc 5 | import retriever_pb2 as pb2 6 | import retriever_pb2_grpc as pb2_grpc 7 | from da_test_framework.da_node_type import DANodeType 8 | 9 | sys.path.append("../0g-storage-kv/tests") 10 | 11 | from test_framework.blockchain_node import TestNode 12 | 13 | 14 | __file_path__ = os.path.dirname(os.path.realpath(__file__)) 15 | 16 | 17 | class DARetriever(TestNode): 18 | def __init__( 19 | self, 20 | root_dir, 21 | binary, 22 | updated_config, 23 | log, 24 | ): 25 | local_conf = dict(log_config_file="log_config") 26 | print(updated_config) 27 | print(local_conf) 28 | local_conf.update(updated_config) 29 | 30 | data_dir = os.path.join(root_dir, "da_retriever") 31 | self.grpc_url = "0.0.0.0:32011" 32 | super().__init__( 33 | DANodeType.DA_RETRIEVER, 34 | 14, 35 | data_dir, 36 | None, 37 | binary, 38 | local_conf, 39 | log, 40 | None, 41 | ) 42 | self.args = [ 43 | binary, 44 | "--retriever.hostname", "localhost", 45 | "--retriever.grpc-port", "32011", 46 | "--retriever.storage.node-url", f'http://{local_conf["node_rpc_endpoint"]}', 47 | "--retriever.storage.kv-url", f'http://{local_conf["kv_rpc_endpoint"]}', 48 | "--retriever.storage.kv-stream-id", local_conf['stream_id'], 49 | "--retriever.storage.flow-contract", local_conf['log_contract_address'], 50 | "--retriever.log.level-std", "trace", 51 | "--kzg.srs-order", "300000", 52 | ] 53 | 54 | def wait_for_rpc_connection(self): 55 | # TODO: health check of service availability 56 | time.sleep(3) 57 | self.channel = grpc.insecure_channel(self.grpc_url) 58 | # bind the client and the server 59 | self.stub = pb2_grpc.RetrieverStub(self.channel) 60 | 61 | def start(self): 62 | self.log.info("Start DA retriever") 63 | super().start() 64 | 65 | def stop(self): 66 | self.log.info("Stop DA retriever") 67 | try: 68 | super().stop(kill=True, wait=False) 69 | except AssertionError as e: 70 | err = repr(e) 71 | if "no RPC connection" in err: 72 | self.log.debug(f"Stop DA retriever: no RPC connection") 73 | else: 74 | raise e 75 | 76 | def retrieve_blob(self, info): 77 | message = pb2.BlobRequest(batch_header_hash=info.blob_verification_proof.batch_metadata.batch_header_hash, blob_index=info.blob_verification_proof.blob_index) 78 | return self.stub.RetrieveBlob(message) 79 | -------------------------------------------------------------------------------- /common/logging.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import "github.com/ethereum/go-ethereum/log" 4 | 5 | type Logger interface { 6 | // New returns a new Logger that has this logger's context plus the given context 7 | New(ctx ...interface{}) Logger 8 | 9 | // SetHandler updates the logger to write records to the specified handler. 10 | SetHandler(h log.Handler) 11 | 12 | // Log a message at the trace level with context key/value pairs 13 | // 14 | // # Usage 15 | // 16 | // log.Trace("msg") 17 | // log.Trace("msg", "key1", val1) 18 | // log.Trace("msg", "key1", val1, "key2", val2) 19 | Trace(msg string, ctx ...interface{}) 20 | 21 | // Log a message at the debug level with context key/value pairs 22 | // 23 | // # Usage Examples 24 | // 25 | // log.Debug("msg") 26 | // log.Debug("msg", "key1", val1) 27 | // log.Debug("msg", "key1", val1, "key2", val2) 28 | Debug(msg string, ctx ...interface{}) 29 | 30 | // Log a message at the info level with context key/value pairs 31 | // 32 | // # Usage Examples 33 | // 34 | // log.Info("msg") 35 | // log.Info("msg", "key1", val1) 36 | // log.Info("msg", "key1", val1, "key2", val2) 37 | Info(msg string, ctx ...interface{}) 38 | 39 | // Log a message at the warn level with context key/value pairs 40 | // 41 | // # Usage Examples 42 | // 43 | // log.Warn("msg") 44 | // log.Warn("msg", "key1", val1) 45 | // log.Warn("msg", "key1", val1, "key2", val2) 46 | Warn(msg string, ctx ...interface{}) 47 | 48 | // Log a message at the error level with context key/value pairs 49 | // 50 | // # Usage Examples 51 | // 52 | // log.Error("msg") 53 | // log.Error("msg", "key1", val1) 54 | // log.Error("msg", "key1", val1, "key2", val2) 55 | Error(msg string, ctx ...interface{}) 56 | 57 | // Log a message at the crit level with context key/value pairs, and then exit. 58 | // 59 | // # Usage Examples 60 | // 61 | // log.Crit("msg") 62 | // log.Crit("msg", "key1", val1) 63 | // log.Crit("msg", "key1", val1, "key2", val2) 64 | Crit(msg string, ctx ...interface{}) 65 | 66 | // Fatal is an alias for Crit 67 | // Log a message at the crit level with context key/value pairs, and then exit. 68 | // 69 | // # Usage Examples 70 | // 71 | // log.Fatal("msg") 72 | // log.Fatal("msg", "key1", val1) 73 | // log.Fatal("msg", "key1", val1, "key2", val2) 74 | Fatal(msg string, ctx ...interface{}) 75 | 76 | // We add the below methods to be compliant with the eigensdk Logger interface 77 | Debugf(template string, args ...interface{}) 78 | Infof(template string, args ...interface{}) 79 | Warnf(template string, args ...interface{}) 80 | Errorf(template string, args ...interface{}) 81 | Critf(template string, args ...interface{}) 82 | // eigensdk uses fatal instead of crit so we add it, 83 | // but should have same semantic as Critf 84 | Fatalf(template string, args ...interface{}) 85 | } 86 | -------------------------------------------------------------------------------- /disperser/cmd/combined_server/flags/flags.go: -------------------------------------------------------------------------------- 1 | package flags 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/0glabs/0g-da-client/common/aws" 6 | "github.com/0glabs/0g-da-client/common/geth" 7 | "github.com/0glabs/0g-da-client/common/logging" 8 | "github.com/0glabs/0g-da-client/common/ratelimit" 9 | "github.com/0glabs/0g-da-client/common/storage_node" 10 | server_flags "github.com/0glabs/0g-da-client/disperser/cmd/apiserver/flags" 11 | batcher_flags "github.com/0glabs/0g-da-client/disperser/cmd/batcher/flags" 12 | "github.com/urfave/cli" 13 | ) 14 | 15 | const ( 16 | FlagPrefix = "combined-server" 17 | EnvVarPrefix = "COMBINED_SERVER" 18 | ) 19 | 20 | var ( 21 | MetricsHTTPPort = cli.StringFlag{ 22 | Name: common.PrefixFlag(FlagPrefix, "metrics-http-port"), 23 | Usage: "the http port which the metrics prometheus server is listening", 24 | Required: false, 25 | Value: "9100", 26 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "METRICS_HTTP_PORT"), 27 | } 28 | EnableMetrics = cli.BoolFlag{ 29 | Name: common.PrefixFlag(FlagPrefix, "enable-metrics"), 30 | Usage: "start metrics server", 31 | Required: false, 32 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "ENABLE_METRICS"), 33 | } 34 | UseMemoryDB = cli.BoolFlag{ 35 | Name: common.PrefixFlag(FlagPrefix, "use-memory-db"), 36 | Usage: "use memory db", 37 | Required: false, 38 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "USE_MEMORY_DB"), 39 | } 40 | MemoryDBSizeLimit = cli.UintFlag{ 41 | Name: common.PrefixFlag(FlagPrefix, "memory-db-size-limit"), 42 | Usage: "the maximum memory db size in MiB", 43 | Required: false, 44 | Value: 2048, // 2G 45 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "MEMORY_DB_SIZE_LIMIT"), 46 | } 47 | ) 48 | 49 | var RequiredFlags = []cli.Flag{} 50 | 51 | var OptionalFlags = []cli.Flag{ 52 | MetricsHTTPPort, 53 | EnableMetrics, 54 | UseMemoryDB, 55 | MemoryDBSizeLimit, 56 | } 57 | 58 | // Flags contains the list of configuration options available to the binary. 59 | var Flags []cli.Flag 60 | 61 | func init() { 62 | // combined 63 | Flags = append(RequiredFlags, OptionalFlags...) 64 | Flags = append(Flags, logging.CLIFlags(EnvVarPrefix, FlagPrefix)...) 65 | Flags = append(Flags, geth.EthClientFlags(EnvVarPrefix)...) 66 | Flags = append(Flags, aws.ClientFlags(EnvVarPrefix, FlagPrefix)...) 67 | Flags = append(Flags, storage_node.ClientFlags(EnvVarPrefix, FlagPrefix)...) 68 | 69 | // api server 70 | Flags = append(Flags, server_flags.RequiredFlags...) 71 | Flags = append(Flags, server_flags.OptionalFlags...) 72 | Flags = append(Flags, ratelimit.RatelimiterCLIFlags(server_flags.EnvVarPrefix, server_flags.FlagPrefix)...) 73 | 74 | // batcher 75 | Flags = append(Flags, batcher_flags.RequiredFlags...) 76 | Flags = append(Flags, batcher_flags.OptionalFlags...) 77 | } 78 | -------------------------------------------------------------------------------- /common/ethclient.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "math/big" 6 | 7 | "github.com/ethereum/go-ethereum" 8 | "github.com/ethereum/go-ethereum/accounts/abi/bind" 9 | "github.com/ethereum/go-ethereum/common" 10 | "github.com/ethereum/go-ethereum/core/types" 11 | ) 12 | 13 | type EthClient interface { 14 | GetAccountAddress() common.Address 15 | GetNoSendTransactOpts() (*bind.TransactOpts, error) 16 | ChainID(ctx context.Context) (*big.Int, error) 17 | BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) 18 | BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) 19 | BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) 20 | GetCurrentBlockNumber(ctx context.Context) (uint32, error) 21 | CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) 22 | CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) 23 | EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) 24 | FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) 25 | HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) 26 | HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) 27 | NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) 28 | PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) 29 | PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) 30 | PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) 31 | SendTransaction(ctx context.Context, tx *types.Transaction) error 32 | StorageAt(ctx context.Context, account common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) 33 | SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) 34 | SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) 35 | SuggestGasPrice(ctx context.Context) (*big.Int, error) 36 | SuggestGasTipCap(ctx context.Context) (*big.Int, error) 37 | TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) 38 | TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) 39 | TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) 40 | TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) 41 | EstimateGasPriceAndLimitAndSendTx(ctx context.Context, tx *types.Transaction, tag string, value *big.Int) (*types.Receipt, error) 42 | UpdateGas(ctx context.Context, tx *types.Transaction, value *big.Int) (*types.Transaction, error) 43 | EnsureTransactionEvaled(ctx context.Context, tx *types.Transaction, tag string) (*types.Receipt, error) 44 | } 45 | -------------------------------------------------------------------------------- /disperser/batcher/transactor/transactor.go: -------------------------------------------------------------------------------- 1 | package transactor 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/0glabs/0g-da-client/common" 8 | "github.com/0glabs/0g-da-client/disperser/contract" 9 | "github.com/0glabs/0g-da-client/disperser/contract/da_entrance" 10 | eth_common "github.com/ethereum/go-ethereum/common" 11 | "github.com/openweb3/web3go/types" 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | type Transactor struct { 16 | mu sync.Mutex 17 | 18 | gasLimit uint64 19 | logger common.Logger 20 | } 21 | 22 | func NewTransactor(gasLimit uint64, logger common.Logger) *Transactor { 23 | return &Transactor{ 24 | gasLimit: gasLimit, 25 | logger: logger, 26 | } 27 | } 28 | 29 | func (t *Transactor) SubmitLogEntry(daContract *contract.DAContract, dataRoots []eth_common.Hash) (eth_common.Hash, error) { 30 | t.mu.Lock() 31 | defer t.mu.Unlock() 32 | 33 | // Append log on blockchain 34 | var tx *types.Transaction 35 | var err error 36 | if tx, _, err = daContract.SubmitOriginalData(dataRoots, false, 0); err != nil { 37 | t.logger.Debug("[transactor] estimate SubmitLogEntry tx failed") 38 | return eth_common.Hash{}, errors.WithMessage(err, "Failed to estimate SubmitLogEntry tx") 39 | } 40 | 41 | gasLimit := tx.Gas() + tx.Gas()*3/10 42 | if tx, _, err = daContract.SubmitOriginalData(dataRoots, false, gasLimit); err != nil { 43 | return eth_common.Hash{}, errors.WithMessage(err, "Failed to submit log entry") 44 | } 45 | 46 | return tx.Hash(), nil 47 | } 48 | 49 | func (t *Transactor) BatchUpload(daContract *contract.DAContract, dataRoots []eth_common.Hash) (eth_common.Hash, error) { 50 | stageTimer := time.Now() 51 | 52 | txHash, err := t.SubmitLogEntry(daContract, dataRoots) 53 | if err != nil { 54 | return eth_common.Hash{}, err 55 | } 56 | 57 | t.logger.Info("[transactor] batch upload took", "duration", time.Since(stageTimer)) 58 | 59 | return txHash, nil 60 | } 61 | 62 | func (t *Transactor) SubmitVerifiedCommitRoots(daContract *contract.DAContract, submissions []da_entrance.IDAEntranceCommitRootSubmission) (eth_common.Hash, error) { 63 | stageTimer := time.Now() 64 | 65 | t.mu.Lock() 66 | defer t.mu.Unlock() 67 | 68 | var tx *types.Transaction 69 | var err error 70 | 71 | var gasLimit uint64 72 | if t.gasLimit == 0 { 73 | if tx, _, err = daContract.SubmitVerifiedCommitRoots(submissions, 0, false, true); err != nil { 74 | return eth_common.Hash{}, errors.WithMessage(err, "Failed to estimate SubmitVerifiedCommitRoots") 75 | } 76 | 77 | gasLimit = tx.Gas() + tx.Gas()*3/10 78 | t.logger.Info("[transactor] estimate gas", "gas limit", tx.Gas()) 79 | } else { 80 | gasLimit = t.gasLimit 81 | } 82 | 83 | if tx, _, err = daContract.SubmitVerifiedCommitRoots(submissions, gasLimit, false, false); err != nil { 84 | return eth_common.Hash{}, errors.WithMessage(err, "Failed to submit verified commit roots") 85 | } 86 | 87 | t.logger.Debug("[transactor] submit verified commit roots took", "duration", time.Since(stageTimer)) 88 | 89 | return tx.Hash(), nil 90 | } 91 | -------------------------------------------------------------------------------- /disperser/cmd/apiserver/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | 9 | "github.com/0glabs/0g-da-client/disperser/apiserver" 10 | "github.com/0glabs/0g-da-client/disperser/common/blobstore" 11 | 12 | "github.com/0glabs/0g-da-client/common/aws/dynamodb" 13 | "github.com/0glabs/0g-da-client/common/aws/s3" 14 | "github.com/0glabs/0g-da-client/common/logging" 15 | "github.com/0glabs/0g-da-client/disperser" 16 | "github.com/0glabs/0g-da-client/disperser/cmd/apiserver/flags" 17 | "github.com/urfave/cli" 18 | ) 19 | 20 | var ( 21 | // version is the version of the binary. 22 | version string 23 | gitCommit string 24 | gitDate string 25 | ) 26 | 27 | func main() { 28 | app := cli.NewApp() 29 | app.Flags = flags.Flags 30 | app.Version = fmt.Sprintf("%s-%s-%s", version, gitCommit, gitDate) 31 | app.Name = "disperser" 32 | app.Usage = "ZGDA Disperser Server" 33 | app.Description = "Service for accepting blobs for dispersal" 34 | 35 | app.Action = RunDisperserServer 36 | err := app.Run(os.Args) 37 | if err != nil { 38 | log.Fatalf("application failed: %v", err) 39 | } 40 | 41 | select {} 42 | } 43 | 44 | func RunDisperserServer(ctx *cli.Context) error { 45 | config, err := NewConfig(ctx) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | logger, err := logging.GetLogger(config.LoggerConfig) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | var blobStore disperser.BlobStore 56 | 57 | s3Client, err := s3.NewClient(config.AwsClientConfig, logger) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | dynamoClient, err := dynamodb.NewClient(config.AwsClientConfig, logger) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | bucketName := config.BlobstoreConfig.BucketName 68 | logger.Info("Creating blob store", "bucket", bucketName) 69 | blobMetadataStore := blobstore.NewBlobMetadataStore(dynamoClient, logger, config.BlobstoreConfig.TableName, 0) 70 | blobStore = blobstore.NewSharedStorage(bucketName, s3Client, config.BlobstoreConfig.MetadataHashAsBlobKey, blobMetadataStore, logger) 71 | 72 | // Create new store 73 | kvStore, err := disperser.NewLevelDBStore(config.StorageNodeConfig.KvDbPath+"/chunk", config.StorageNodeConfig.TimeToExpire, logger) 74 | if err != nil { 75 | logger.Error("create level db failed") 76 | return nil 77 | } 78 | 79 | // TODO: create a separate metrics for batcher 80 | metrics := disperser.NewMetrics(config.MetricsConfig.HTTPPort, logger) 81 | 82 | server := apiserver.NewDispersalServer(config.ServerConfig, blobStore, logger, metrics, config.RatelimiterConfig, config.EnableRatelimiter, config.RateConfig, config.BlobstoreConfig.MetadataHashAsBlobKey, kvStore, config.RetrieverAddr) 83 | 84 | // Enable Metrics Block 85 | if config.MetricsConfig.EnableMetrics { 86 | httpSocket := fmt.Sprintf(":%s", config.MetricsConfig.HTTPPort) 87 | metrics.Start(context.Background()) 88 | logger.Info("Enabled metrics for Disperser", "socket", httpSocket) 89 | } 90 | 91 | return server.Start(context.Background()) 92 | } 93 | -------------------------------------------------------------------------------- /disperser/Makefile: -------------------------------------------------------------------------------- 1 | clean: 2 | rm -rf ./bin 3 | 4 | build: build_server build_batcher build_combined 5 | 6 | build_batcher: 7 | go build -o ./bin/batcher ./cmd/batcher 8 | 9 | build_server: 10 | go build -o ./bin/server ./cmd/apiserver 11 | 12 | build_combined: build_server build_batcher 13 | go build -o ./bin/combined ./cmd/combined_server 14 | 15 | run_batcher: build_batcher 16 | ./bin/batcher \ 17 | --batcher.pull-interval 5s \ 18 | --chain.rpc ETH_RPC_ENDPOINT \ 19 | --chain.private-key YOUR_PRIVATE_KEY \ 20 | --chain.receipt-wait-rounds 180 \ 21 | --chain.receipt-wait-interval 1s \ 22 | --chain.gas-limit 2000000 \ 23 | --batcher.finalizer-interval 300s \ 24 | --batcher.confirmer-num 3 \ 25 | --batcher.aws.region us-east-1 \ 26 | --batcher.aws.access-key-id localstack \ 27 | --batcher.aws.secret-access-key localstack \ 28 | --batcher.aws.endpoint-url http://0.0.0.0:4566 \ 29 | --batcher.s3-bucket-name test-zgda-blobstore \ 30 | --batcher.dynamodb-table-name test-BlobMetadata \ 31 | --encoder-socket 0.0.0.0:34000 \ 32 | --batcher.batch-size-limit 50 \ 33 | --encoding-timeout 15s \ 34 | --chain-read-timeout 12s \ 35 | --chain-write-timeout 13s \ 36 | --batcher.storage.node-url http://0.0.0.0:5678 \ 37 | --batcher.storage.node-url http://0.0.0.0:6789 \ 38 | --batcher.storage.kv-url http://0.0.0.0:7890 \ 39 | --batcher.storage.kv-stream-id 000000000000000000000000000000000000000000000000000000000000f2bd \ 40 | --batcher.storage.flow-contract FLOW_CONTRACT_ADDR 41 | 42 | run_server: build_server 43 | ./bin/server \ 44 | --chain.rpc ETH_RPC_ENDPOINT \ 45 | --disperser-server.grpc-port 51001 \ 46 | --disperser-server.s3-bucket-name test-zgda-blobstore \ 47 | --disperser-server.dynamodb-table-name test-BlobMetadata \ 48 | --disperser-server.aws.region us-east-1 \ 49 | --disperser-server.aws.access-key-id localstack \ 50 | --disperser-server.aws.secret-access-key localstack \ 51 | --disperser-server.aws.endpoint-url http://0.0.0.0:4566 52 | 53 | run_combined: build_combined 54 | ./bin/combined \ 55 | --chain.rpc ETH_RPC_ENDPOINT \ 56 | --chain.private-key YOUR_PRIVATE_KEY \ 57 | --chain.receipt-wait-rounds 180 \ 58 | --chain.receipt-wait-interval 1s \ 59 | --chain.gas-limit 2000000 \ 60 | --combined-server.use-memory-db \ 61 | --combined-server.storage.da-entrance-contract ENTRANCE_CONTRACT_ADDR \ 62 | --combined-server.storage.da-signers-contract SIGNERS_CONTRACT_ADDR \ 63 | --combined-server.storage.kv-db-path /data/db \ 64 | --disperser-server.s3-bucket-name test-zgda-blobstore \ 65 | --disperser-server.dynamodb-table-name test-BlobMetadata \ 66 | --disperser-server.grpc-port 51001 \ 67 | --batcher.s3-bucket-name test-zgda-blobstore \ 68 | --batcher.dynamodb-table-name test-BlobMetadata \ 69 | --batcher.pull-interval 5s \ 70 | --batcher.finalizer-interval 20s \ 71 | --batcher.confirmer-num 3 \ 72 | --batcher.max-num-retries-for-sign 2 \ 73 | --batcher.finalized-block-count 50 \ 74 | --encoder-socket 0.0.0.0:34000 \ 75 | --batcher.batch-size-limit 50 \ 76 | --encoding-timeout 15s \ 77 | --chain-read-timeout 12s \ 78 | --chain-write-timeout 13s -------------------------------------------------------------------------------- /common/store/dynamo_store_test.go: -------------------------------------------------------------------------------- 1 | package store_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/0glabs/0g-da-client/common" 11 | "github.com/0glabs/0g-da-client/common/aws" 12 | "github.com/0glabs/0g-da-client/common/aws/dynamodb" 13 | test_utils "github.com/0glabs/0g-da-client/common/aws/dynamodb/utils" 14 | cmock "github.com/0glabs/0g-da-client/common/mock" 15 | "github.com/0glabs/0g-da-client/common/store" 16 | "github.com/0glabs/0g-da-client/inabox/deploy" 17 | "github.com/ory/dockertest/v3" 18 | "github.com/stretchr/testify/assert" 19 | ) 20 | 21 | var ( 22 | logger = &cmock.Logger{} 23 | 24 | dockertestPool *dockertest.Pool 25 | dockertestResource *dockertest.Resource 26 | 27 | deployLocalStack bool 28 | localStackPort = "4566" 29 | 30 | dynamoClient *dynamodb.Client 31 | dynamoParamStore common.KVStore[common.RateBucketParams] 32 | bucketTableName = "BucketStore" 33 | ) 34 | 35 | func TestMain(m *testing.M) { 36 | setup(m) 37 | code := m.Run() 38 | teardown() 39 | os.Exit(code) 40 | } 41 | 42 | func setup(m *testing.M) { 43 | 44 | deployLocalStack = !(os.Getenv("DEPLOY_LOCALSTACK") == "false") 45 | if !deployLocalStack { 46 | localStackPort = os.Getenv("LOCALSTACK_PORT") 47 | } 48 | 49 | if deployLocalStack { 50 | var err error 51 | dockertestPool, dockertestResource, err = deploy.StartDockertestWithLocalstackContainer(localStackPort) 52 | if err != nil { 53 | teardown() 54 | panic("failed to start localstack container") 55 | } 56 | } 57 | 58 | cfg := aws.ClientConfig{ 59 | Region: "us-east-1", 60 | AccessKey: "localstack", 61 | SecretAccessKey: "localstack", 62 | EndpointURL: fmt.Sprintf("http://0.0.0.0:%s", localStackPort), 63 | } 64 | 65 | _, err := test_utils.CreateTable(context.Background(), cfg, bucketTableName, store.GenerateTableSchema(10, 10, bucketTableName)) 66 | if err != nil { 67 | teardown() 68 | panic("failed to create dynamodb table: " + err.Error()) 69 | } 70 | 71 | dynamoClient, err = dynamodb.NewClient(cfg, logger) 72 | if err != nil { 73 | teardown() 74 | panic("failed to create dynamodb client: " + err.Error()) 75 | } 76 | 77 | dynamoParamStore = store.NewDynamoParamStore[common.RateBucketParams](dynamoClient, bucketTableName) 78 | } 79 | 80 | func teardown() { 81 | if deployLocalStack { 82 | deploy.PurgeDockertestResources(dockertestPool, dockertestResource) 83 | } 84 | } 85 | 86 | func TestDynamoBucketStore(t *testing.T) { 87 | ctx := context.Background() 88 | 89 | p := &common.RateBucketParams{ 90 | BucketLevels: []time.Duration{time.Second, time.Minute}, 91 | LastRequestTime: time.Now().UTC(), 92 | } 93 | 94 | p2, err := dynamoParamStore.GetItem(ctx, "testRetriever") 95 | assert.Error(t, err) 96 | assert.Nil(t, p2) 97 | 98 | err = dynamoParamStore.UpdateItem(ctx, "testRetriever", p) 99 | assert.NoError(t, err) 100 | 101 | p2, err = dynamoParamStore.GetItem(ctx, "testRetriever") 102 | 103 | assert.NoError(t, err) 104 | assert.Equal(t, p, p2) 105 | } 106 | -------------------------------------------------------------------------------- /common/geth/cli.go: -------------------------------------------------------------------------------- 1 | package geth 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/0glabs/0g-da-client/common" 7 | "github.com/urfave/cli" 8 | ) 9 | 10 | var ( 11 | rpcUrlFlagName = "chain.rpc" 12 | privateKeyFlagName = "chain.private-key" 13 | numConfirmationsFlagName = "chain.num-confirmations" 14 | txGasLimitFlagName = "chain.gas-limit" 15 | receiptPollingRoundsFlagName = "chain.receipt-wait-rounds" 16 | receiptPollingIntervalFlagName = "chain.receipt-wait-interval" 17 | ) 18 | 19 | type EthClientConfig struct { 20 | RPCURL string 21 | PrivateKeyString string 22 | NumConfirmations int 23 | TxGasLimit int 24 | ReceiptPollingRounds uint 25 | ReceiptPollingInterval time.Duration 26 | } 27 | 28 | func EthClientFlags(envPrefix string) []cli.Flag { 29 | return []cli.Flag{ 30 | cli.StringFlag{ 31 | Name: rpcUrlFlagName, 32 | Usage: "Chain rpc", 33 | Required: true, 34 | EnvVar: common.PrefixEnvVar(envPrefix, "CHAIN_RPC"), 35 | }, 36 | cli.StringFlag{ 37 | Name: privateKeyFlagName, 38 | Usage: "Ethereum private key for disperser", 39 | Required: false, 40 | Value: "0000000000000000000000000000000000000000000000000000000000000000", 41 | EnvVar: common.PrefixEnvVar(envPrefix, "PRIVATE_KEY"), 42 | }, 43 | cli.IntFlag{ 44 | Name: numConfirmationsFlagName, 45 | Usage: "Number of confirmations to wait for", 46 | Required: false, 47 | Value: 0, 48 | EnvVar: common.PrefixEnvVar(envPrefix, "NUM_CONFIRMATIONS"), 49 | }, 50 | cli.IntFlag{ 51 | Name: txGasLimitFlagName, 52 | Usage: "Gas limit for transaction", 53 | Required: false, 54 | Value: 0, 55 | EnvVar: common.PrefixEnvVar(envPrefix, "TX_GAS_LIMIT"), 56 | }, 57 | cli.UintFlag{ 58 | Name: receiptPollingRoundsFlagName, 59 | Usage: "Rounds of receipt polling", 60 | Required: false, 61 | Value: 60, 62 | EnvVar: common.PrefixEnvVar(envPrefix, "RECEIPT_POLLING_ROUNDS"), 63 | }, 64 | cli.DurationFlag{ 65 | Name: receiptPollingIntervalFlagName, 66 | Usage: "Interval of receipt polling", 67 | Required: false, 68 | Value: time.Second, 69 | EnvVar: common.PrefixEnvVar(envPrefix, "RECEIPT_POLLING_INTERVAL"), 70 | }, 71 | } 72 | } 73 | 74 | func ReadEthClientConfig(ctx *cli.Context) EthClientConfig { 75 | cfg := EthClientConfig{} 76 | cfg.RPCURL = ctx.GlobalString(rpcUrlFlagName) 77 | cfg.PrivateKeyString = ctx.GlobalString(privateKeyFlagName) 78 | cfg.NumConfirmations = ctx.GlobalInt(numConfirmationsFlagName) 79 | cfg.TxGasLimit = ctx.GlobalInt(txGasLimitFlagName) 80 | cfg.ReceiptPollingRounds = ctx.GlobalUint(receiptPollingRoundsFlagName) 81 | cfg.ReceiptPollingInterval = ctx.GlobalDuration(receiptPollingIntervalFlagName) 82 | return cfg 83 | } 84 | 85 | // ReadEthClientConfigRPCOnly doesn't read private key from flag. 86 | // The private key for Node should be read from encrypted key file. 87 | func ReadEthClientConfigRPCOnly(ctx *cli.Context) EthClientConfig { 88 | cfg := EthClientConfig{} 89 | cfg.RPCURL = ctx.GlobalString(rpcUrlFlagName) 90 | cfg.NumConfirmations = ctx.GlobalInt(numConfirmationsFlagName) 91 | return cfg 92 | } 93 | -------------------------------------------------------------------------------- /common/ratelimit/limiter.go: -------------------------------------------------------------------------------- 1 | package ratelimit 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | 8 | "github.com/0glabs/0g-da-client/common" 9 | ) 10 | 11 | type BucketStore = common.KVStore[common.RateBucketParams] 12 | 13 | type rateLimiter struct { 14 | globalRateParams common.GlobalRateParams 15 | 16 | bucketStore BucketStore 17 | allowlist []string 18 | 19 | logger common.Logger 20 | } 21 | 22 | func NewRateLimiter(rateParams common.GlobalRateParams, bucketStore BucketStore, allowlist []string, logger common.Logger) common.RateLimiter { 23 | return &rateLimiter{ 24 | globalRateParams: rateParams, 25 | bucketStore: bucketStore, 26 | allowlist: allowlist, 27 | logger: logger, 28 | } 29 | } 30 | 31 | // Checks whether a request from the given requesterID is allowed 32 | func (d *rateLimiter) AllowRequest(ctx context.Context, requesterID common.RequesterID, blobSize uint, rate common.RateParam) (bool, error) { 33 | // TODO: temporary allowlist that unconditionally allows request 34 | // for testing purposes only 35 | for _, id := range d.allowlist { 36 | if strings.Contains(requesterID, id) { 37 | return true, nil 38 | } 39 | } 40 | 41 | // Retrieve bucket params for the requester ID 42 | // This will be from dynamo for Disperser and from local storage for DA node 43 | 44 | bucketParams, err := d.bucketStore.GetItem(ctx, requesterID) 45 | if err != nil { 46 | 47 | bucketLevels := make([]time.Duration, len(d.globalRateParams.BucketSizes)) 48 | copy(bucketLevels, d.globalRateParams.BucketSizes) 49 | 50 | bucketParams = &common.RateBucketParams{ 51 | BucketLevels: bucketLevels, 52 | LastRequestTime: time.Now().UTC(), 53 | } 54 | } 55 | 56 | // Check whether the request is allowed based on the rate 57 | 58 | // Get interval since last request 59 | interval := time.Since(bucketParams.LastRequestTime) 60 | bucketParams.LastRequestTime = time.Now().UTC() 61 | 62 | // Calculate updated bucket levels 63 | allowed := true 64 | for i, size := range d.globalRateParams.BucketSizes { 65 | 66 | // Determine bucket deduction 67 | deduction := time.Microsecond * time.Duration(1e6*float32(blobSize)/float32(rate)/d.globalRateParams.Multipliers[i]) 68 | 69 | // Update the bucket level 70 | bucketParams.BucketLevels[i] = getBucketLevel(bucketParams.BucketLevels[i], size, interval, deduction) 71 | 72 | allowed = allowed && bucketParams.BucketLevels[i] > 0 73 | } 74 | 75 | // Update the bucket based on blob size and current rate 76 | if allowed || d.globalRateParams.CountFailed { 77 | // Update bucket params 78 | err := d.bucketStore.UpdateItem(ctx, requesterID, bucketParams) 79 | if err != nil { 80 | return allowed, err 81 | } 82 | 83 | } 84 | 85 | return allowed, nil 86 | 87 | // (DA Node) Store the rate params and account ID along with the blob 88 | } 89 | 90 | func getBucketLevel(bucketLevel, bucketSize, interval, deduction time.Duration) time.Duration { 91 | 92 | newLevel := bucketLevel + interval - deduction 93 | if newLevel < 0 { 94 | newLevel = 0 95 | } 96 | if newLevel > bucketSize { 97 | newLevel = bucketSize 98 | } 99 | 100 | return newLevel 101 | 102 | } 103 | -------------------------------------------------------------------------------- /docs/architecture/README.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | 3 | 0G system consists of a data availability layer (0G DA) on top of a decentralized storage system (0G Storage). There is a separate consensus network that is part of both the 0G DA and the 0G Storage. 4 | 5 | * For 0G Storage, the consensus is responsible for determining the ordering of the uploaded data blocks, realizing the storage mining verification and the corresponding incentive mechanism through smart contract. 6 | * For 0G DA, the consensus is in charge of guaranteeing the data availability property of each data block via data availability sampling. In other words, each validator does data availability sampling independently, and once the majority of the validators reach the consensus of the successful sampling results, the data will be treated as available. The data availability sampling is mainly to verify that the specific data block is not maliciously withheld by the client and is indeed ingested into the 0G Storage where it is stored in a reliable and persistent way. 7 | 8 | As is shown in Figure 1, data is made available on 0G DA through the following flow: 9 | 10 | #### Blob Dispersal 11 | 12 | 1. A user submits a data blob to a Disperser service. 13 | 2. The Disperser encodes the data in accordance with the encoding requirements, constructs the appropriate metadata, and directly stores the blob as well as the metadata to the s3 bucket. 14 | 3. A Batcher listens to the metadata updates on s3 and encodes the blob into an encoded blob and stores it in memory. 15 | 4. The Batcher then packs multiple blobs into one batch, together with the KZG commitments of each blob. It also constructs a merkle tree of the batch which is used to verify a certain blob is in a batch for data integrity. 16 | 5. The Batcher sends the merkle root to the on-chain 0G Storage contract. It also sends the full batch to a 0G Storage node. 17 | 6. The 0G Storage node will listen to the on-chain event, fetch the merkle root and verify the batch data is aligned with the merkle root. If yes, it stores the batch together with its metadata into 0G Storage kv. 18 | 19 | #### Blob Retrieval 20 | 21 | There are two approaches for data retrieval. 22 | 23 | * If a user trusts an existing disperser service. 24 | 1. A user can directly request a disperser to download the blob data from s3, provided that the user trusts the disperser. This can bring high efficiency in retrieval phase since it relies on p2p trust to skip the expensive data verification process. As a result, it unlocks the super high throughput of 0G DA system. 25 | * If a user doesn't trust any disperser. 26 | 1. A user can start its own retriever service. 27 | 2. The retriever service will verify the metadata and merkle proof of the blob before fetching the full data chunks. 28 | 3. After successful verification, the retriever will start to download and verify the KZG commitments of each data chunk. 29 | 4. After all successful downloading and verification, the retriever returns the whole data blob to the user. 30 | 31 | In this way, 0G DA provides not only [security guarantee](../security/) to the data but also efficiency for the user to quickly retrieve the data from the disperser. 32 | 33 |

Figure 1. Architecture Overview

34 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_batcher.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | 5 | sys.path.append("../0g-storage-kv/tests") 6 | 7 | from test_framework.blockchain_node import TestNode 8 | from utility.utils import blockchain_rpc_port 9 | from config.node_config import GENESIS_PRIV_KEY 10 | from da_test_framework.da_node_type import DANodeType 11 | 12 | __file_path__ = os.path.dirname(os.path.realpath(__file__)) 13 | 14 | 15 | class DABatcher(TestNode): 16 | def __init__( 17 | self, 18 | root_dir, 19 | binary, 20 | updated_config, 21 | log, 22 | ): 23 | local_conf = { 24 | "log_config_file": "log_config", 25 | "blockchain_rpc_endpoint": f"http://127.0.0.1:{blockchain_rpc_port(0)}", 26 | } 27 | 28 | local_conf.update(updated_config) 29 | data_dir = os.path.join(root_dir, "da_batcher") 30 | super().__init__( 31 | DANodeType.DA_BATCHER, 32 | 12, 33 | data_dir, 34 | None, 35 | binary, 36 | local_conf, 37 | log, 38 | None, 39 | ) 40 | self.args = [ 41 | binary, 42 | "--batcher.pull-interval", "10s", 43 | "--chain.rpc", local_conf['blockchain_rpc_endpoint'], 44 | "--chain.private-key", GENESIS_PRIV_KEY, 45 | "--batcher.finalizer-interval", "20s", 46 | "--batcher.aws.region", "us-east-1", 47 | "--batcher.aws.access-key-id", "localstack", 48 | "--batcher.aws.secret-access-key", "localstack", 49 | "--batcher.aws.endpoint-url", "http://0.0.0.0:4566", 50 | "--batcher.s3-bucket-name", "test-zgda-blobstore", 51 | "--batcher.dynamodb-table-name", "test-BlobMetadata", 52 | "--encoder-socket", "0.0.0.0:34000", 53 | "--batcher.batch-size-limit", "10000", 54 | "--batcher.srs-order", "300000", 55 | "--encoding-timeout", "10s", 56 | "--chain-read-timeout", "12s", 57 | "--chain-write-timeout", "13s", 58 | "--batcher.storage.node-url", f'http://{local_conf["node_rpc_endpoint"]}', 59 | "--batcher.storage.kv-url", f'http://{local_conf["kv_rpc_endpoint"]}', 60 | "--batcher.storage.kv-stream-id", local_conf['stream_id'], 61 | "--batcher.storage.flow-contract", local_conf['log_contract_address'] 62 | ] 63 | 64 | def start(self): 65 | self.log.info("Start DA batcher") 66 | super().start() 67 | 68 | def wait_for_rpc_connection(self): 69 | time.sleep(15) 70 | 71 | def stop(self): 72 | self.log.info("Stop DA batcher") 73 | try: 74 | super().stop(kill=True, wait=False) 75 | except AssertionError as e: 76 | err = repr(e) 77 | # The batcher will check return_code via rpc when error log exists 78 | # that is written when the batcher starts normally. 79 | # The exception handling can be removed when rpc is added or the error 80 | # is not written when the batcher starts normally. 81 | if "no RPC connection" in err: 82 | self.log.debug(f"Stop DA encoder: no RPC connection") 83 | else: 84 | raise e 85 | -------------------------------------------------------------------------------- /inabox/deploy/cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/0glabs/0g-da-client/inabox/deploy" 9 | "github.com/urfave/cli/v2" 10 | ) 11 | 12 | var ( 13 | testNameFlagName = "testname" 14 | rootPathFlagName = "root-path" 15 | localstackFlagName = "localstack-port" 16 | deployResourcesFlagName = "deploy-resources" 17 | 18 | metadataTableName = "test-BlobMetadata" 19 | bucketTableName = "test-zgda-blobstore" 20 | 21 | chainCmdName = "chain" 22 | localstackCmdName = "localstack" 23 | expCmdName = "exp" 24 | allCmdName = "all" 25 | ) 26 | 27 | func main() { 28 | app := &cli.App{ 29 | Flags: []cli.Flag{ 30 | &cli.StringFlag{ 31 | Name: testNameFlagName, 32 | Usage: "name of the test to run (in `inabox/testdata`)", 33 | EnvVars: []string{"ZGDA_TESTDATA_PATH"}, 34 | Value: "", 35 | }, 36 | &cli.StringFlag{ 37 | Name: rootPathFlagName, 38 | Usage: "path to the root of repo", 39 | Value: "../", 40 | }, 41 | &cli.StringFlag{ 42 | Name: localstackFlagName, 43 | Value: "", 44 | Usage: "path to the config file", 45 | }, 46 | &cli.BoolFlag{ 47 | Name: deployResourcesFlagName, 48 | Value: true, 49 | Usage: "whether to deploy localstack resources", 50 | }, 51 | }, 52 | Commands: []*cli.Command{ 53 | { 54 | Name: chainCmdName, 55 | Usage: "deploy the chain infrastructure (anvil, graph) for the inabox test", 56 | Action: getRunner(chainCmdName), 57 | }, 58 | { 59 | Name: localstackCmdName, 60 | Usage: "deploy localstack and create the AWS resources needed for the inabox test", 61 | Action: getRunner(localstackCmdName), 62 | }, 63 | { 64 | Name: expCmdName, 65 | Usage: "deploy the contracts and create configurations for all ZGDA components", 66 | Action: getRunner(expCmdName), 67 | }, 68 | { 69 | Name: allCmdName, 70 | Usage: "deploy all infra, resources, contracts", 71 | Action: getRunner(allCmdName), 72 | }, 73 | }, 74 | } 75 | 76 | if err := app.Run(os.Args); err != nil { 77 | log.Fatal(err) 78 | } 79 | } 80 | 81 | func getRunner(command string) func(ctx *cli.Context) error { 82 | 83 | return func(ctx *cli.Context) error { 84 | 85 | if command != localstackCmdName { 86 | rootPath, err := filepath.Abs(ctx.String(rootPathFlagName)) 87 | if err != nil { 88 | return err 89 | } 90 | testname := ctx.String(testNameFlagName) 91 | if testname == "" { 92 | testname, err = deploy.GetLatestTestDirectory(rootPath) 93 | if err != nil { 94 | return err 95 | } 96 | } 97 | } 98 | 99 | switch command { 100 | case localstackCmdName: 101 | return localstack(ctx) 102 | } 103 | 104 | return nil 105 | 106 | } 107 | 108 | } 109 | 110 | func localstack(ctx *cli.Context) error { 111 | 112 | pool, _, err := deploy.StartDockertestWithLocalstackContainer(ctx.String(localstackFlagName)) 113 | if err != nil { 114 | return err 115 | } 116 | 117 | if ctx.Bool(deployResourcesFlagName) { 118 | return deploy.DeployResources(pool, ctx.String(localstackFlagName), metadataTableName, bucketTableName) 119 | //return deploy.DeployResources(nil, ctx.String(localstackFlagName), metadataTableName, bucketTableName) 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /core/bn254/attestation.go: -------------------------------------------------------------------------------- 1 | package bn254 2 | 3 | import ( 4 | "math/big" 5 | 6 | "github.com/consensys/gnark-crypto/ecc/bn254" 7 | "github.com/consensys/gnark-crypto/ecc/bn254/fp" 8 | "github.com/consensys/gnark-crypto/ecc/bn254/fr" 9 | "github.com/ethereum/go-ethereum/common" 10 | "github.com/ethereum/go-ethereum/crypto" 11 | ) 12 | 13 | func VerifySig(sig *bn254.G1Affine, pubkey *bn254.G2Affine, msgBytes [32]byte) (bool, error) { 14 | 15 | g2Gen := GetG2Generator() 16 | 17 | msgPoint := MapToCurve(msgBytes) 18 | 19 | var negSig bn254.G1Affine 20 | negSig.Neg((*bn254.G1Affine)(sig)) 21 | 22 | P := [2]bn254.G1Affine{*msgPoint, negSig} 23 | Q := [2]bn254.G2Affine{*pubkey, *g2Gen} 24 | 25 | ok, err := bn254.PairingCheck(P[:], Q[:]) 26 | if err != nil { 27 | return false, nil 28 | } 29 | return ok, nil 30 | 31 | } 32 | 33 | func MapToCurve(digest [32]byte) *bn254.G1Affine { 34 | 35 | one := new(big.Int).SetUint64(1) 36 | three := new(big.Int).SetUint64(3) 37 | x := new(big.Int) 38 | x.SetBytes(digest[:]) 39 | for { 40 | // y = x^3 + 3 41 | xP3 := new(big.Int).Exp(x, big.NewInt(3), fp.Modulus()) 42 | y := new(big.Int).Add(xP3, three) 43 | y.Mod(y, fp.Modulus()) 44 | 45 | if y.ModSqrt(y, fp.Modulus()) == nil { 46 | x.Add(x, one).Mod(x, fp.Modulus()) 47 | } else { 48 | var fpX, fpY fp.Element 49 | fpX.SetBigInt(x) 50 | fpY.SetBigInt(y) 51 | return &bn254.G1Affine{ 52 | X: fpX, 53 | Y: fpY, 54 | } 55 | } 56 | } 57 | } 58 | 59 | func CheckG1AndG2DiscreteLogEquality(pointG1 *bn254.G1Affine, pointG2 *bn254.G2Affine) (bool, error) { 60 | negGenG1 := new(bn254.G1Affine).Neg(GetG1Generator()) 61 | return bn254.PairingCheck([]bn254.G1Affine{*pointG1, *negGenG1}, []bn254.G2Affine{*GetG2Generator(), *pointG2}) 62 | } 63 | 64 | func GetG1Generator() *bn254.G1Affine { 65 | g1Gen := new(bn254.G1Affine) 66 | _, err := g1Gen.X.SetString("1") 67 | if err != nil { 68 | return nil 69 | } 70 | _, err = g1Gen.Y.SetString("2") 71 | if err != nil { 72 | return nil 73 | } 74 | return g1Gen 75 | } 76 | 77 | func GetG2Generator() *bn254.G2Affine { 78 | g2Gen := new(bn254.G2Affine) 79 | g2Gen.X.SetString("10857046999023057135944570762232829481370756359578518086990519993285655852781", 80 | "11559732032986387107991004021392285783925812861821192530917403151452391805634") 81 | g2Gen.Y.SetString("8495653923123431417604973247489272438418190587263600148770280649306958101930", 82 | "4082367875863433681332203403145435568316851327593401208105741076214120093531") 83 | return g2Gen 84 | } 85 | 86 | func MulByGeneratorG1(a *fr.Element) *bn254.G1Affine { 87 | g1Gen := GetG1Generator() 88 | return new(bn254.G1Affine).ScalarMultiplication(g1Gen, a.BigInt(new(big.Int))) 89 | } 90 | 91 | func MulByGeneratorG2(a *fr.Element) *bn254.G2Affine { 92 | g2Gen := GetG2Generator() 93 | return new(bn254.G2Affine).ScalarMultiplication(g2Gen, a.BigInt(new(big.Int))) 94 | } 95 | 96 | func MakePubkeyRegistrationData(privKey *fr.Element, operatorAddress common.Address) *bn254.G1Affine { 97 | toHash := make([]byte, 0) 98 | toHash = append(toHash, crypto.Keccak256([]byte("BN254PubkeyRegistration(address operator)"))...) 99 | toHash = append(toHash, operatorAddress.Bytes()...) 100 | 101 | msgHash := crypto.Keccak256(toHash) 102 | // convert to [32]byte 103 | var msgHash32 [32]byte 104 | copy(msgHash32[:], msgHash) 105 | 106 | // hash to G1 107 | hashToSign := MapToCurve(msgHash32) 108 | 109 | return new(bn254.G1Affine).ScalarMultiplication(hashToSign, privKey.BigInt(new(big.Int))) 110 | } 111 | -------------------------------------------------------------------------------- /disperser/cmd/batcher/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common/aws" 5 | "github.com/0glabs/0g-da-client/common/geth" 6 | "github.com/0glabs/0g-da-client/common/logging" 7 | "github.com/0glabs/0g-da-client/common/storage_node" 8 | "github.com/0glabs/0g-da-client/disperser/batcher" 9 | "github.com/0glabs/0g-da-client/disperser/cmd/batcher/flags" 10 | "github.com/0glabs/0g-da-client/disperser/common/blobstore" 11 | "github.com/urfave/cli" 12 | ) 13 | 14 | type Config struct { 15 | BatcherConfig batcher.Config 16 | TimeoutConfig batcher.TimeoutConfig 17 | BlobstoreConfig blobstore.Config 18 | EthClientConfig geth.EthClientConfig 19 | AwsClientConfig aws.ClientConfig 20 | LoggerConfig logging.Config 21 | MetricsConfig batcher.MetricsConfig 22 | StorageNodeConfig storage_node.ClientConfig 23 | } 24 | 25 | func NewConfig(ctx *cli.Context) Config { 26 | config := Config{ 27 | BlobstoreConfig: blobstore.Config{ 28 | BucketName: ctx.GlobalString(flags.S3BucketNameFlag.Name), 29 | TableName: ctx.GlobalString(flags.DynamoDBTableNameFlag.Name), 30 | MetadataHashAsBlobKey: ctx.GlobalBool(flags.MetadataHashAsBlobKey.Name), 31 | }, 32 | EthClientConfig: geth.ReadEthClientConfig(ctx), 33 | AwsClientConfig: aws.ReadClientConfig(ctx, flags.FlagPrefix), 34 | LoggerConfig: logging.ReadCLIConfig(ctx, flags.FlagPrefix), 35 | BatcherConfig: batcher.Config{ 36 | PullInterval: ctx.GlobalDuration(flags.PullIntervalFlag.Name), 37 | FinalizerInterval: ctx.GlobalDuration(flags.FinalizerIntervalFlag.Name), 38 | EncoderSocket: ctx.GlobalString(flags.EncoderSocket.Name), 39 | NumConnections: ctx.GlobalInt(flags.NumConnectionsFlag.Name), 40 | EncodingRequestQueueSize: ctx.GlobalInt(flags.EncodingRequestQueueSizeFlag.Name), 41 | BatchSizeMBLimit: ctx.GlobalUint(flags.BatchSizeLimitFlag.Name), 42 | MaxNumRetriesPerBlob: ctx.GlobalUint(flags.MaxNumRetriesPerBlobFlag.Name), 43 | ConfirmerNum: ctx.GlobalUint(flags.ConfirmerNumFlag.Name), 44 | DAEntranceContractAddress: ctx.GlobalString(flags.DAEntranceContractAddressFlag.Name), 45 | DASignersContractAddress: ctx.GlobalString(flags.DASignersContractAddressFlag.Name), 46 | EncodingInterval: ctx.GlobalDuration(flags.EncodingIntervalFlag.Name), 47 | SigningInterval: ctx.GlobalDuration(flags.SigningIntervalFlag.Name), 48 | MaxNumRetriesForSign: ctx.GlobalUint(flags.MaxNumRetriesForSignFlag.Name), 49 | FinalizedBlockCount: ctx.GlobalUint(flags.FinalizedBlockCountFlag.Name), 50 | ExpirationPollIntervalSec: ctx.GlobalUint64(flags.ExpirationPollIntervalSecFlag.Name), 51 | SignedPullInterval: ctx.GlobalDuration(flags.SignedPullIntervalFlag.Name), 52 | VerifiedCommitRootsTxGasLimit: ctx.GlobalUint64(flags.VerifiedCommitRootsTxGasLimitFlag.Name), 53 | }, 54 | TimeoutConfig: batcher.TimeoutConfig{ 55 | EncodingTimeout: ctx.GlobalDuration(flags.EncodingTimeoutFlag.Name), 56 | ChainReadTimeout: ctx.GlobalDuration(flags.ChainReadTimeoutFlag.Name), 57 | ChainWriteTimeout: ctx.GlobalDuration(flags.ChainWriteTimeoutFlag.Name), 58 | SigningTimeout: ctx.GlobalDuration(flags.SigningTimeoutFlag.Name), 59 | }, 60 | MetricsConfig: batcher.MetricsConfig{ 61 | HTTPPort: ctx.GlobalString(flags.MetricsHTTPPort.Name), 62 | EnableMetrics: ctx.GlobalBool(flags.EnableMetrics.Name), 63 | }, 64 | StorageNodeConfig: storage_node.ReadClientConfig(ctx, flags.FlagPrefix), 65 | } 66 | return config 67 | } 68 | -------------------------------------------------------------------------------- /tests/da_test_framework/blockchain_node.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import subprocess 4 | import tempfile 5 | import time 6 | import rlp 7 | import shutil 8 | 9 | from eth_utils import decode_hex, keccak 10 | from web3 import Web3, HTTPProvider 11 | from enum import Enum, unique 12 | from config.node_config import PRIV_KEY 13 | 14 | from utility.simple_rpc_proxy import SimpleRpcProxy 15 | from utility.utils import ( 16 | initialize_config, 17 | wait_until, 18 | ) 19 | from da_test_framework.contracts import load_contract_metadata 20 | 21 | from da_test_framework.test_node import TestNode 22 | 23 | @unique 24 | class NodeType(Enum): 25 | BlockChain = 0 26 | Zgs = 1 27 | KV = 2 28 | 29 | 30 | class FailedToStartError(Exception): 31 | """Raised when a node fails to start correctly.""" 32 | 33 | 34 | class BlockchainNode(TestNode): 35 | def __init__( 36 | self, 37 | data_dir, 38 | rpc_url, 39 | binary, 40 | local_conf, 41 | contract_path, 42 | log, 43 | rpc_timeout=10, 44 | ): 45 | self.contract_path = contract_path 46 | 47 | super().__init__( 48 | NodeType.BlockChain, 49 | 0, 50 | data_dir, 51 | rpc_url, 52 | binary, 53 | local_conf, 54 | log, 55 | rpc_timeout, 56 | ) 57 | 58 | my_env = os.environ.copy() 59 | idx = rpc_url.find("://") 60 | url = rpc_url[idx+3:] if idx != -1 else rpc_url 61 | 62 | self.args = [ 63 | binary, "start", 64 | "--home", os.path.join(my_env["HOME"], ".0gchain"), 65 | "--json-rpc.address", url, 66 | ] 67 | 68 | def wait_for_rpc_connection(self): 69 | time.sleep(10) 70 | self._wait_for_rpc_connection(lambda rpc: rpc.eth_syncing() is False) 71 | 72 | def wait_for_start_mining(self): 73 | self._wait_for_rpc_connection(lambda rpc: int(rpc.eth_blockNumber(), 16) > 0) 74 | 75 | def wait_for_transaction_receipt(self, w3, tx_hash, timeout=120, parent_hash=None): 76 | return w3.eth.wait_for_transaction_receipt(tx_hash, timeout) 77 | 78 | def setup_contract(self): 79 | origin_path = os.getcwd() 80 | os.chdir(self.contract_path) 81 | 82 | p = os.path.join(self.contract_path, 'hardhat.config.ts') 83 | clone_command = "git checkout -- %s" % p 84 | os.system(clone_command) 85 | with open(p, 'r') as file: 86 | file_content = file.read() 87 | 88 | # Replace the string 89 | modified_content = file_content.replace('http://0.0.0.0:8545', self.rpc_url) 90 | 91 | # Write the modified content back to the file 92 | with open(p, 'w') as file: 93 | file.write(modified_content) 94 | 95 | p = os.path.join(self.contract_path, "deployments") 96 | if os.path.exists(p): 97 | shutil.rmtree(p) 98 | 99 | p = os.path.join(self.contract_path, ".env") 100 | if os.path.exists(p): 101 | os.remove(p) 102 | 103 | try: 104 | with open(p, 'w') as file: 105 | file.write(f"DEPLOYER_KEY = \"{PRIV_KEY}\"") 106 | except IOError as e: 107 | raise e 108 | 109 | os.system("yarn") 110 | os.system("yarn build") 111 | os.system("yarn deploy zg") 112 | 113 | os.chdir(origin_path) 114 | 115 | def wait_for_transaction(self, tx_hash): 116 | w3 = Web3(HTTPProvider(self.rpc_url)) 117 | w3.eth.wait_for_transaction_receipt(tx_hash) 118 | 119 | def start(self): 120 | super().start(True) 121 | -------------------------------------------------------------------------------- /common/ratelimit.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "strings" 8 | "time" 9 | 10 | "google.golang.org/grpc/metadata" 11 | "google.golang.org/grpc/peer" 12 | ) 13 | 14 | // Requester ID is the ID of the party making the request. In the case of a rollup making a dispersal request, the Requester 15 | // ID is the authenticated Account ID. For retrieval requests, the requester ID will be the requester's IP address. 16 | type RequesterID = string 17 | 18 | type RateLimiter interface { 19 | AllowRequest(ctx context.Context, requesterID RequesterID, blobSize uint, rate RateParam) (bool, error) 20 | } 21 | 22 | type GlobalRateParams struct { 23 | // BucketSizes are the time scales at which the rate limit is enforced. 24 | // For each time scale, the rate limiter will make sure that the give rate (possibly subject to a relaxation given 25 | // by one of the Multipliers) is observed when the request bandwidth is averaged at this time scale. 26 | // In terms of implementation, the rate limiter uses a set of "time buckets". A time bucket, i, is filled to a maximum of 27 | // `BucketSizes[i]` at a rate of 1, and emptied by an amount equal to `(size of request)/RateParam` each time a 28 | // request is processed. 29 | BucketSizes []time.Duration 30 | // Multipliers speicify how much the supplied rate limit should be relaxed for each time scale. 31 | // For i'th BuckeSize, the RateParam*Multiplier[i] will be applied. 32 | Multipliers []float32 33 | // CountFailed indicates whether failed requests should be counted towards the rate limit. 34 | CountFailed bool 35 | } 36 | 37 | // RateParam is the type used for expressing a bandwidth based rate limit in units of Bytes/second 38 | type RateParam = uint32 39 | 40 | type RateBucketParams struct { 41 | // BucketLevels stores the amount of time contained in each bucket. For instance, if the bucket contains 1 minute, this means 42 | // that the requester can consume one minute worth of bandwidth (in terms of amount of data, this equals RateParam * one minute) 43 | // before the rate limiter will throttle them 44 | BucketLevels []time.Duration 45 | // LastRequestTime stores the time of the last request received from a given requester. All times are stored in UTC. 46 | LastRequestTime time.Time 47 | } 48 | 49 | // GetClientAddress returns the client address from the context. If the header is not empty, it will 50 | // take the ip address located at the `numProxies“ position from the end of the header. If the ip address cannot be 51 | // found in the header, it will use the connection ip if `allowDirectConnectionFallback` is true. Otherwise, it will return 52 | // an error. 53 | func GetClientAddress(ctx context.Context, header string, numProxies int, allowDirectConnectionFallback bool) (string, error) { 54 | 55 | if header != "" && numProxies > 0 { 56 | md, ok := metadata.FromIncomingContext(ctx) 57 | if ok && len(md.Get(header)) > 0 { 58 | parts := splitHeader(md.Get(header)) 59 | if len(parts) >= numProxies { 60 | return parts[len(parts)-numProxies], nil 61 | } 62 | } 63 | } 64 | 65 | if header == "" || allowDirectConnectionFallback { 66 | p, ok := peer.FromContext(ctx) 67 | if !ok { 68 | return "", fmt.Errorf("failed to get peer from request") 69 | } 70 | addr := p.Addr.String() 71 | host, _, err := net.SplitHostPort(addr) 72 | if err != nil { 73 | return "", err 74 | } 75 | return host, nil 76 | } 77 | 78 | return "", fmt.Errorf("failed to get ip") 79 | } 80 | 81 | func splitHeader(header []string) []string { 82 | var result []string 83 | for _, h := range header { 84 | for _, p := range strings.Split(h, ",") { 85 | trimmed := strings.TrimSpace(p) 86 | if trimmed != "" { 87 | result = append(result, trimmed) 88 | } 89 | } 90 | } 91 | return result 92 | } 93 | -------------------------------------------------------------------------------- /disperser/cmd/apiserver/flags/flags.go: -------------------------------------------------------------------------------- 1 | package flags 2 | 3 | import ( 4 | "github.com/0glabs/0g-da-client/common" 5 | "github.com/0glabs/0g-da-client/common/aws" 6 | "github.com/0glabs/0g-da-client/common/logging" 7 | "github.com/0glabs/0g-da-client/common/ratelimit" 8 | "github.com/urfave/cli" 9 | ) 10 | 11 | const ( 12 | FlagPrefix = "disperser-server" 13 | EnvVarPrefix = "DISPERSER_SERVER" 14 | ) 15 | 16 | var ( 17 | /* Required Flags */ 18 | S3BucketNameFlag = cli.StringFlag{ 19 | Name: common.PrefixFlag(FlagPrefix, "s3-bucket-name"), 20 | Usage: "Name of the bucket to store blobs", 21 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "S3_BUCKET_NAME"), 22 | } 23 | DynamoDBTableNameFlag = cli.StringFlag{ 24 | Name: common.PrefixFlag(FlagPrefix, "dynamodb-table-name"), 25 | Usage: "Name of the dynamodb table to store blob metadata", 26 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "DYNAMODB_TABLE_NAME"), 27 | } 28 | GrpcPortFlag = cli.StringFlag{ 29 | Name: common.PrefixFlag(FlagPrefix, "grpc-port"), 30 | Usage: "Port at which disperser listens for grpc calls", 31 | Required: true, 32 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "GRPC_PORT"), 33 | } 34 | /* Optional Flags*/ 35 | MetricsHTTPPort = cli.StringFlag{ 36 | Name: common.PrefixFlag(FlagPrefix, "metrics-http-port"), 37 | Usage: "the http port which the metrics prometheus server is listening", 38 | Required: false, 39 | Value: "9100", 40 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "METRICS_HTTP_PORT"), 41 | } 42 | EnableMetrics = cli.BoolFlag{ 43 | Name: common.PrefixFlag(FlagPrefix, "enable-metrics"), 44 | Usage: "start metrics server", 45 | Required: false, 46 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "ENABLE_METRICS"), 47 | } 48 | EnableRatelimiter = cli.BoolFlag{ 49 | Name: common.PrefixFlag(FlagPrefix, "enable-ratelimiter"), 50 | Usage: "enable rate limiter", 51 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "ENABLE_RATELIMITER"), 52 | } 53 | BucketTableName = cli.StringFlag{ 54 | Name: common.PrefixFlag(FlagPrefix, "rate-bucket-table-name"), 55 | Usage: "name of the dynamodb table to store rate limiter buckets. If not provided, a local store will be used", 56 | Value: "", 57 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "RATE_BUCKET_TABLE_NAME"), 58 | } 59 | BucketStoreSize = cli.UintFlag{ 60 | Name: common.PrefixFlag(FlagPrefix, "rate-bucket-store-size"), 61 | Usage: "size (max number of entries) of the local store to use for rate limiting buckets", 62 | Value: 100_000, 63 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "RATE_BUCKET_STORE_SIZE"), 64 | Required: false, 65 | } 66 | MetadataHashAsBlobKey = cli.BoolFlag{ 67 | Name: common.PrefixFlag(FlagPrefix, "metadata-hash-as-blob-key"), 68 | Usage: "use metadata hash as blob key", 69 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "METADATA_HASH_AS_BLOB_KEY"), 70 | } 71 | RetrieverAddrName = cli.StringFlag{ 72 | Name: common.PrefixFlag(FlagPrefix, "retriever-address"), 73 | Usage: "address of retriever", 74 | Value: "0.0.0.0:34005", 75 | EnvVar: common.PrefixEnvVar(EnvVarPrefix, "RETRIEVER-ADDRESS"), 76 | } 77 | ) 78 | 79 | var RequiredFlags = []cli.Flag{ 80 | S3BucketNameFlag, 81 | DynamoDBTableNameFlag, 82 | GrpcPortFlag, 83 | BucketTableName, 84 | } 85 | 86 | var OptionalFlags = []cli.Flag{ 87 | MetricsHTTPPort, 88 | EnableMetrics, 89 | EnableRatelimiter, 90 | BucketStoreSize, 91 | MetadataHashAsBlobKey, 92 | RetrieverAddrName, 93 | } 94 | 95 | // Flags contains the list of configuration options available to the binary. 96 | var Flags []cli.Flag 97 | 98 | func init() { 99 | Flags = append(RequiredFlags, OptionalFlags...) 100 | Flags = append(Flags, logging.CLIFlags(EnvVarPrefix, FlagPrefix)...) 101 | Flags = append(Flags, ratelimit.RatelimiterCLIFlags(EnvVarPrefix, FlagPrefix)...) 102 | Flags = append(Flags, aws.ClientFlags(EnvVarPrefix, FlagPrefix)...) 103 | } 104 | -------------------------------------------------------------------------------- /disperser/api/grpc/signer/signer_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v4.25.5 5 | // source: signer/signer.proto 6 | 7 | package signer 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // SignerClient is the client API for Signer service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type SignerClient interface { 25 | BatchSign(ctx context.Context, in *BatchSignRequest, opts ...grpc.CallOption) (*BatchSignReply, error) 26 | } 27 | 28 | type signerClient struct { 29 | cc grpc.ClientConnInterface 30 | } 31 | 32 | func NewSignerClient(cc grpc.ClientConnInterface) SignerClient { 33 | return &signerClient{cc} 34 | } 35 | 36 | func (c *signerClient) BatchSign(ctx context.Context, in *BatchSignRequest, opts ...grpc.CallOption) (*BatchSignReply, error) { 37 | out := new(BatchSignReply) 38 | err := c.cc.Invoke(ctx, "/signer.Signer/BatchSign", in, out, opts...) 39 | if err != nil { 40 | return nil, err 41 | } 42 | return out, nil 43 | } 44 | 45 | // SignerServer is the server API for Signer service. 46 | // All implementations must embed UnimplementedSignerServer 47 | // for forward compatibility 48 | type SignerServer interface { 49 | BatchSign(context.Context, *BatchSignRequest) (*BatchSignReply, error) 50 | mustEmbedUnimplementedSignerServer() 51 | } 52 | 53 | // UnimplementedSignerServer must be embedded to have forward compatible implementations. 54 | type UnimplementedSignerServer struct { 55 | } 56 | 57 | func (UnimplementedSignerServer) BatchSign(context.Context, *BatchSignRequest) (*BatchSignReply, error) { 58 | return nil, status.Errorf(codes.Unimplemented, "method BatchSign not implemented") 59 | } 60 | func (UnimplementedSignerServer) mustEmbedUnimplementedSignerServer() {} 61 | 62 | // UnsafeSignerServer may be embedded to opt out of forward compatibility for this service. 63 | // Use of this interface is not recommended, as added methods to SignerServer will 64 | // result in compilation errors. 65 | type UnsafeSignerServer interface { 66 | mustEmbedUnimplementedSignerServer() 67 | } 68 | 69 | func RegisterSignerServer(s grpc.ServiceRegistrar, srv SignerServer) { 70 | s.RegisterService(&Signer_ServiceDesc, srv) 71 | } 72 | 73 | func _Signer_BatchSign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 74 | in := new(BatchSignRequest) 75 | if err := dec(in); err != nil { 76 | return nil, err 77 | } 78 | if interceptor == nil { 79 | return srv.(SignerServer).BatchSign(ctx, in) 80 | } 81 | info := &grpc.UnaryServerInfo{ 82 | Server: srv, 83 | FullMethod: "/signer.Signer/BatchSign", 84 | } 85 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 86 | return srv.(SignerServer).BatchSign(ctx, req.(*BatchSignRequest)) 87 | } 88 | return interceptor(ctx, in, info, handler) 89 | } 90 | 91 | // Signer_ServiceDesc is the grpc.ServiceDesc for Signer service. 92 | // It's only intended for direct use with grpc.RegisterService, 93 | // and not to be introspected or modified (even as a copy) 94 | var Signer_ServiceDesc = grpc.ServiceDesc{ 95 | ServiceName: "signer.Signer", 96 | HandlerType: (*SignerServer)(nil), 97 | Methods: []grpc.MethodDesc{ 98 | { 99 | MethodName: "BatchSign", 100 | Handler: _Signer_BatchSign_Handler, 101 | }, 102 | }, 103 | Streams: []grpc.StreamDesc{}, 104 | Metadata: "signer/signer.proto", 105 | } 106 | -------------------------------------------------------------------------------- /disperser/encoder/metrics.go: -------------------------------------------------------------------------------- 1 | package encoder 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/0glabs/0g-da-client/common" 10 | "github.com/prometheus/client_golang/prometheus" 11 | "github.com/prometheus/client_golang/prometheus/collectors" 12 | "github.com/prometheus/client_golang/prometheus/promauto" 13 | "github.com/prometheus/client_golang/prometheus/promhttp" 14 | ) 15 | 16 | type MetrisConfig struct { 17 | HTTPPort string 18 | EnableMetrics bool 19 | } 20 | 21 | type Metrics struct { 22 | logger common.Logger 23 | registry *prometheus.Registry 24 | httpPort string 25 | 26 | NumEncodeBlobRequests *prometheus.CounterVec 27 | Latency *prometheus.SummaryVec 28 | } 29 | 30 | func NewMetrics(httpPort string, logger common.Logger) *Metrics { 31 | reg := prometheus.NewRegistry() 32 | reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) 33 | reg.MustRegister(collectors.NewGoCollector()) 34 | 35 | return &Metrics{ 36 | logger: logger, 37 | registry: reg, 38 | httpPort: httpPort, 39 | NumEncodeBlobRequests: promauto.With(reg).NewCounterVec( 40 | prometheus.CounterOpts{ 41 | Namespace: "zgda_encoder", 42 | Name: "request_total", 43 | Help: "the number and size of total encode blob request at server side per state", 44 | }, 45 | []string{"state"}, // state is either success, ratelimited, canceled, or failure 46 | ), 47 | Latency: promauto.With(reg).NewSummaryVec( 48 | prometheus.SummaryOpts{ 49 | Namespace: "zgda_encoder", 50 | Name: "encoding_latency_ms", 51 | Help: "latency summary in milliseconds", 52 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.01, 0.99: 0.001}, 53 | }, 54 | []string{"time"}, 55 | ), 56 | } 57 | } 58 | 59 | // IncrementSuccessfulBlobRequestNum increments the number of successful requests 60 | // this counter incrementation is atomic 61 | func (m *Metrics) IncrementSuccessfulBlobRequestNum() { 62 | m.NumEncodeBlobRequests.WithLabelValues("success").Inc() 63 | } 64 | 65 | // IncrementFailedBlobRequestNum increments the number of failed requests 66 | // this counter incrementation is atomic 67 | func (m *Metrics) IncrementFailedBlobRequestNum() { 68 | m.NumEncodeBlobRequests.WithLabelValues("failed").Inc() 69 | } 70 | 71 | // IncrementRateLimitedBlobRequestNum increments the number of rate limited requests 72 | // this counter incrementation is atomic 73 | func (m *Metrics) IncrementRateLimitedBlobRequestNum() { 74 | m.NumEncodeBlobRequests.WithLabelValues("ratelimited").Inc() 75 | } 76 | 77 | // IncrementCanceledBlobRequestNum increments the number of canceled requests 78 | // this counter incrementation is atomic 79 | func (m *Metrics) IncrementCanceledBlobRequestNum() { 80 | m.NumEncodeBlobRequests.WithLabelValues("canceled").Inc() 81 | } 82 | 83 | func (m *Metrics) TakeLatency(encoding, total time.Duration) { 84 | m.Latency.WithLabelValues("encoding").Observe(float64(encoding.Milliseconds())) 85 | m.Latency.WithLabelValues("total").Observe(float64(total.Milliseconds())) 86 | } 87 | 88 | func (m *Metrics) Start(ctx context.Context) { 89 | m.logger.Info("Starting metrics server at ", "port", m.httpPort) 90 | 91 | addr := fmt.Sprintf(":%s", m.httpPort) 92 | log := m.logger 93 | 94 | mux := http.NewServeMux() 95 | mux.Handle("/metrics", promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{})) 96 | 97 | server := &http.Server{Addr: addr, Handler: mux} 98 | errc := make(chan error, 1) 99 | 100 | go func() { 101 | errc <- server.ListenAndServe() 102 | }() 103 | go func() { 104 | select { 105 | case <-ctx.Done(): 106 | m.shutdown(server) 107 | return 108 | case err := <-errc: 109 | log.Error("Prometheus server failed", "err", err) 110 | } 111 | }() 112 | } 113 | 114 | func (m *Metrics) shutdown(server *http.Server) { 115 | ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) 116 | defer cancel() 117 | _ = server.Shutdown(ctx) 118 | } 119 | -------------------------------------------------------------------------------- /disperser/api/grpc/encoder/encoder_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v4.25.5 5 | // source: encoder/encoder.proto 6 | 7 | package encoder 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // EncoderClient is the client API for Encoder service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type EncoderClient interface { 25 | EncodeBlob(ctx context.Context, in *EncodeBlobRequest, opts ...grpc.CallOption) (*EncodeBlobReply, error) 26 | } 27 | 28 | type encoderClient struct { 29 | cc grpc.ClientConnInterface 30 | } 31 | 32 | func NewEncoderClient(cc grpc.ClientConnInterface) EncoderClient { 33 | return &encoderClient{cc} 34 | } 35 | 36 | func (c *encoderClient) EncodeBlob(ctx context.Context, in *EncodeBlobRequest, opts ...grpc.CallOption) (*EncodeBlobReply, error) { 37 | out := new(EncodeBlobReply) 38 | err := c.cc.Invoke(ctx, "/encoder.Encoder/EncodeBlob", in, out, opts...) 39 | if err != nil { 40 | return nil, err 41 | } 42 | return out, nil 43 | } 44 | 45 | // EncoderServer is the server API for Encoder service. 46 | // All implementations must embed UnimplementedEncoderServer 47 | // for forward compatibility 48 | type EncoderServer interface { 49 | EncodeBlob(context.Context, *EncodeBlobRequest) (*EncodeBlobReply, error) 50 | mustEmbedUnimplementedEncoderServer() 51 | } 52 | 53 | // UnimplementedEncoderServer must be embedded to have forward compatible implementations. 54 | type UnimplementedEncoderServer struct { 55 | } 56 | 57 | func (UnimplementedEncoderServer) EncodeBlob(context.Context, *EncodeBlobRequest) (*EncodeBlobReply, error) { 58 | return nil, status.Errorf(codes.Unimplemented, "method EncodeBlob not implemented") 59 | } 60 | func (UnimplementedEncoderServer) mustEmbedUnimplementedEncoderServer() {} 61 | 62 | // UnsafeEncoderServer may be embedded to opt out of forward compatibility for this service. 63 | // Use of this interface is not recommended, as added methods to EncoderServer will 64 | // result in compilation errors. 65 | type UnsafeEncoderServer interface { 66 | mustEmbedUnimplementedEncoderServer() 67 | } 68 | 69 | func RegisterEncoderServer(s grpc.ServiceRegistrar, srv EncoderServer) { 70 | s.RegisterService(&Encoder_ServiceDesc, srv) 71 | } 72 | 73 | func _Encoder_EncodeBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 74 | in := new(EncodeBlobRequest) 75 | if err := dec(in); err != nil { 76 | return nil, err 77 | } 78 | if interceptor == nil { 79 | return srv.(EncoderServer).EncodeBlob(ctx, in) 80 | } 81 | info := &grpc.UnaryServerInfo{ 82 | Server: srv, 83 | FullMethod: "/encoder.Encoder/EncodeBlob", 84 | } 85 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 86 | return srv.(EncoderServer).EncodeBlob(ctx, req.(*EncodeBlobRequest)) 87 | } 88 | return interceptor(ctx, in, info, handler) 89 | } 90 | 91 | // Encoder_ServiceDesc is the grpc.ServiceDesc for Encoder service. 92 | // It's only intended for direct use with grpc.RegisterService, 93 | // and not to be introspected or modified (even as a copy) 94 | var Encoder_ServiceDesc = grpc.ServiceDesc{ 95 | ServiceName: "encoder.Encoder", 96 | HandlerType: (*EncoderServer)(nil), 97 | Methods: []grpc.MethodDesc{ 98 | { 99 | MethodName: "EncodeBlob", 100 | Handler: _Encoder_EncodeBlob_Handler, 101 | }, 102 | }, 103 | Streams: []grpc.StreamDesc{}, 104 | Metadata: "encoder/encoder.proto", 105 | } 106 | -------------------------------------------------------------------------------- /api/proto/disperser/disperser.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = "github.com/0glabs/0g-da-client/api/grpc/disperser"; 4 | package disperser; 5 | 6 | // Disperser defines the public APIs for dispersing blobs. 7 | service Disperser { 8 | // This API accepts blob to disperse from clients. 9 | // This executes the dispersal async, i.e. it returns once the request 10 | // is accepted. The client could use GetBlobStatus() API to poll the the 11 | // processing status of the blob. 12 | rpc DisperseBlob(DisperseBlobRequest) returns (DisperseBlobReply) {} 13 | 14 | // This API is meant to be polled for the blob status. 15 | rpc GetBlobStatus(BlobStatusRequest) returns (BlobStatusReply) {} 16 | 17 | // This retrieves the requested blob from the Disperser's backend. 18 | // The blob should have been initially dispersed via this Disperser service 19 | // for this API to work. 20 | rpc RetrieveBlob(RetrieveBlobRequest) returns (RetrieveBlobReply) {} 21 | } 22 | 23 | // Requests and Responses 24 | 25 | message DisperseBlobRequest { 26 | // The data to be dispersed. 27 | // The size of data must be <= 31744 KiB. 28 | bytes data = 1; 29 | } 30 | 31 | message DisperseBlobReply { 32 | // The status of the blob associated with the request_id. 33 | BlobStatus result = 1; 34 | // The request ID generated by the disperser. 35 | // Once a request is accepted (although not processed), a unique request ID will be 36 | // generated. 37 | // Two different DisperseBlobRequests (determined by the hash of the DisperseBlobRequest) 38 | // will have different IDs, and the same DisperseBlobRequest sent repeatedly at different 39 | // times will also have different IDs. 40 | // The client should use this ID to query the processing status of the request (via 41 | // the GetBlobStatus API). 42 | bytes request_id = 2; 43 | } 44 | 45 | // BlobStatusRequest is used to query the status of a blob. 46 | message BlobStatusRequest { 47 | bytes request_id = 1; 48 | } 49 | 50 | message BlobStatusReply { 51 | // The status of the blob. 52 | BlobStatus status = 1; 53 | // The blob info needed for clients to confirm the blob against the ZGDA contracts. 54 | BlobInfo info = 2; 55 | } 56 | 57 | // RetrieveBlobRequest contains parameters to retrieve the blob. 58 | message RetrieveBlobRequest { 59 | // The storage hash of data 60 | bytes storage_root = 1; 61 | // This identifies the epoch that this blob belongs to. 62 | uint64 epoch = 2; 63 | // Which quorum of the blob this is requesting for. 64 | uint64 quorum_id = 3; 65 | } 66 | 67 | // RetrieveBlobReply contains the retrieved blob data 68 | message RetrieveBlobReply { 69 | bytes data = 1; 70 | } 71 | 72 | // Data Types 73 | 74 | enum BlobStatus { 75 | UNKNOWN = 0; 76 | 77 | // Intermediate states 78 | 79 | // PROCESSING means that the blob is currently being processed by the disperser 80 | PROCESSING = 1; 81 | // CONFIRMED means that the blob has been dispersed to DA Nodes and the dispersed 82 | // batch containing the blob has been confirmed onchain 83 | CONFIRMED = 2; 84 | 85 | // Terminal states 86 | 87 | // FAILED means that the blob has failed permanently (for reasons other than insufficient 88 | // signatures, which is a separate state) 89 | FAILED = 3; 90 | // FINALIZED means that the block containing the blob's confirmation transaction has been finalized on Ethereum 91 | FINALIZED = 4; 92 | // INSUFFICIENT_SIGNATURES means that the quorum threshold for the blob was not met 93 | // for at least one quorum. 94 | INSUFFICIENT_SIGNATURES = 5; 95 | } 96 | 97 | // Types below correspond to the types necessary to verify a blob 98 | // https://github.com/0glabs/0g-da-client/blob/master/contracts/src/libraries/ZGDABlobUtils.sol#L29 99 | 100 | // BlobInfo contains information needed to confirm the blob against the ZGDA contracts 101 | message BlobInfo { 102 | BlobHeader blob_header = 1; 103 | } 104 | 105 | message BlobHeader { 106 | // The data merkle root 107 | bytes storage_root = 1; 108 | // Signers epoch 109 | uint64 epoch = 2; 110 | // Signers quorum id 111 | uint64 quorum_id = 3; 112 | } 113 | -------------------------------------------------------------------------------- /tests/da_test_framework/da_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import grpc 5 | import disperser_pb2 as pb2 6 | import disperser_pb2_grpc as pb2_grpc 7 | from da_test_framework.da_node_type import DANodeType 8 | 9 | sys.path.append("../0g-storage-kv/tests") 10 | 11 | from da_test_framework.blockchain_node import TestNode 12 | from config.node_config import PRIV_KEY 13 | 14 | 15 | __file_path__ = os.path.dirname(os.path.realpath(__file__)) 16 | 17 | 18 | class DAServer(TestNode): 19 | def __init__( 20 | self, 21 | root_dir, 22 | binary, 23 | updated_config, 24 | log, 25 | ): 26 | local_conf = dict(log_config_file="log_config") 27 | local_conf.update(updated_config) 28 | 29 | data_dir = os.path.join(root_dir, "da_server") 30 | self.grpc_url = f"0.0.0.0:{local_conf['grpc_port']}" 31 | super().__init__( 32 | DANodeType.DA_SERVER, 33 | 13, 34 | data_dir, 35 | None, 36 | binary, 37 | local_conf, 38 | log, 39 | None, 40 | ) 41 | self.args = [ 42 | binary, 43 | "--chain.rpc", local_conf['blockchain_rpc_endpoint'], 44 | "--chain.private-key", PRIV_KEY, 45 | "--chain.receipt-wait-rounds", "180", 46 | "--chain.receipt-wait-interval", "1s", 47 | "--chain.gas-limit", "2000000", 48 | "--combined-server.use-memory-db", 49 | "--batcher.da-entrance-contract", "0x64fcfde2350E08E7BaDc18771a7674FAb5E137a2", 50 | "--batcher.da-signers-contract", "0x0000000000000000000000000000000000001000", 51 | "--combined-server.storage.kv-db-path", "./run/", 52 | "--combined-server.storage.time-to-expire", "300", 53 | "--disperser-server.s3-bucket-name", "test-zgda-blobstore", 54 | "--disperser-server.dynamodb-table-name", "test-BlobMetadata", 55 | "--disperser-server.grpc-port", local_conf['grpc_port'], 56 | "--batcher.s3-bucket-name", "test-zgda-blobstore", 57 | "--batcher.dynamodb-table-name", "test-BlobMetadata", 58 | "--batcher.pull-interval", "30s", 59 | "--batcher.signed-pull-interval", "60s", 60 | "--batcher.finalizer-interval", "20s", 61 | "--batcher.confirmer-num", "3", 62 | "--batcher.max-num-retries-for-sign", "10", 63 | "--batcher.finalized-block-count", "50", 64 | "--batcher.encoding-request-queue-size", "1", 65 | "--encoder-socket", local_conf['encoder_socket'], 66 | "--batcher.batch-size-limit", "500", 67 | "--encoding-timeout", "100s", 68 | "--chain-read-timeout", "12s", 69 | "--chain-write-timeout", "13s", 70 | "--combined-server.log.level-file", "trace", 71 | "--combined-server.log.level-std", "trace" 72 | ] 73 | 74 | def wait_for_rpc_connection(self): 75 | # TODO: health check of service availability 76 | time.sleep(3) 77 | self.channel = grpc.insecure_channel(self.grpc_url) 78 | # bind the client and the server 79 | self.stub = pb2_grpc.DisperserStub(self.channel) 80 | 81 | def start(self): 82 | self.log.info("Start DA server") 83 | super().start() 84 | 85 | def stop(self): 86 | self.log.info("Stop DA server") 87 | super().stop(kill=True, wait=False) 88 | 89 | def disperse_blob(self, data): 90 | message = pb2.DisperseBlobRequest(data=data, security_params=[pb2.SecurityParams(quorum_id=0, adversary_threshold=25, quorum_threshold=50)], target_row_num=16) 91 | return self.stub.DisperseBlob(message) 92 | 93 | def retrieve_blob(self, info): 94 | message = pb2.RetrieveBlobRequest(batch_header_hash=info.blob_verification_proof.batch_metadata.batch_header_hash, blob_index=info.blob_verification_proof.blob_index) 95 | return self.stub.RetrieveBlob(message) 96 | 97 | def get_blob_status(self, request_id): 98 | message = pb2.BlobStatusRequest(request_id=request_id) 99 | return self.stub.GetBlobStatus(message) 100 | -------------------------------------------------------------------------------- /disperser/signer/client.go: -------------------------------------------------------------------------------- 1 | package signer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "regexp" 8 | "strings" 9 | "time" 10 | 11 | "github.com/0glabs/0g-da-client/common" 12 | "github.com/0glabs/0g-da-client/core" 13 | "github.com/0glabs/0g-da-client/disperser" 14 | pb "github.com/0glabs/0g-da-client/disperser/api/grpc/signer" 15 | bn "github.com/consensys/gnark-crypto/ecc/bn254" 16 | "github.com/consensys/gnark-crypto/ecc/bn254/fp" 17 | "google.golang.org/grpc" 18 | "google.golang.org/grpc/credentials/insecure" 19 | ) 20 | 21 | const ipv4WithPortPattern = `\b(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?::\d{1,5})\b` 22 | const ipv4Pattern = `\b(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b` 23 | const portPattern = `\b(\d{1,5})\b` 24 | 25 | type client struct { 26 | timeout time.Duration 27 | ipv4Regex *regexp.Regexp 28 | } 29 | 30 | func NewSignerClient(timeout time.Duration) (disperser.SignerClient, error) { 31 | regex := regexp.MustCompile(ipv4WithPortPattern) 32 | 33 | return client{ 34 | timeout: timeout, 35 | ipv4Regex: regex, 36 | }, nil 37 | } 38 | 39 | func (c client) BatchSign(ctx context.Context, addr string, data []*pb.SignRequest, log common.Logger) ([]*core.Signature, error) { 40 | matches := c.ipv4Regex.FindAllString(addr, -1) 41 | if len(matches) != 1 { 42 | formattedAddr := "" 43 | prefix := "http://" 44 | if strings.HasPrefix(strings.ToLower(addr), prefix) { 45 | addr = addr[len(prefix):] 46 | } 47 | 48 | idx := strings.Index(addr, ":") 49 | if idx != -1 { 50 | ipv4Reg := regexp.MustCompile(ipv4Pattern) 51 | matches := ipv4Reg.FindAllString(addr[:idx], -1) 52 | if len(matches) == 1 { 53 | formattedAddr = matches[0] 54 | 55 | portReg := regexp.MustCompile(portPattern) 56 | matches := portReg.FindAllString(addr[idx+1:], -1) 57 | if len(matches) == 1 { 58 | formattedAddr += ":" + matches[0] 59 | } else { 60 | formattedAddr = "" 61 | } 62 | } 63 | } 64 | 65 | if formattedAddr != "" { 66 | addr = formattedAddr 67 | } 68 | } else { 69 | addr = matches[0] 70 | } 71 | 72 | ctxWithTimeout, cancel := context.WithTimeout(ctx, c.timeout) 73 | defer cancel() 74 | conn, err := grpc.DialContext( 75 | ctxWithTimeout, 76 | addr, 77 | grpc.WithTransportCredentials(insecure.NewCredentials()), 78 | grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024*1024*1024)), // 1 GiB 79 | ) 80 | if err != nil { 81 | return nil, fmt.Errorf("failed to dial signer: %w", err) 82 | } 83 | defer conn.Close() 84 | 85 | signer := pb.NewSignerClient(conn) 86 | // requests := make([]*pb.SignRequest, 0, len(data)) 87 | // for i, req := range data { 88 | // requests[i] = &pb.SignRequest{ 89 | // Epoch: req.Epoch, 90 | // ErasureCommitment: req.ErasureCommitment, 91 | // StorageRoot: req.StorageRoot, 92 | // EncodedSlice: req.EncodedSlice, 93 | // } 94 | // } 95 | 96 | reply, err := signer.BatchSign(ctx, &pb.BatchSignRequest{ 97 | Requests: data, 98 | }) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | sigBytes := reply.GetSignatures() 104 | signatures := make([]*core.Signature, len(data)) 105 | for i := 0; i < len(data); i++ { 106 | signature := sigBytes[i] 107 | signature, err := toBigEndian(signature) 108 | if err != nil { 109 | return nil, err 110 | } 111 | point, err := new(core.Signature).Deserialize(signature) 112 | if err != nil { 113 | return nil, err 114 | } 115 | 116 | signatures[i] = &core.Signature{G1Point: point} 117 | } 118 | 119 | return signatures, nil 120 | } 121 | 122 | func toBigEndian(b []byte) ([]byte, error) { 123 | if len(b) != bn.SizeOfG1AffineUncompressed { 124 | return nil, io.ErrShortBuffer 125 | } 126 | 127 | b[bn.SizeOfG1AffineUncompressed-1] &= 63 128 | for i := 0; i < fp.Bytes/2; i++ { 129 | b[i], b[fp.Bytes-i-1] = b[fp.Bytes-i-1], b[i] 130 | } 131 | 132 | for i := fp.Bytes; i < fp.Bytes+fp.Bytes/2; i++ { 133 | b[i], b[len(b)-(i-fp.Bytes)-1] = b[len(b)-(i-fp.Bytes)-1], b[i] 134 | } 135 | 136 | return b, nil 137 | } 138 | -------------------------------------------------------------------------------- /api/grpc/retriever/retriever_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v4.25.5 5 | // source: retriever/retriever.proto 6 | 7 | package retriever 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // RetrieverClient is the client API for Retriever service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type RetrieverClient interface { 25 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 26 | // reconstructed original blob in response. 27 | RetrieveBlob(ctx context.Context, in *BlobRequest, opts ...grpc.CallOption) (*BlobReply, error) 28 | } 29 | 30 | type retrieverClient struct { 31 | cc grpc.ClientConnInterface 32 | } 33 | 34 | func NewRetrieverClient(cc grpc.ClientConnInterface) RetrieverClient { 35 | return &retrieverClient{cc} 36 | } 37 | 38 | func (c *retrieverClient) RetrieveBlob(ctx context.Context, in *BlobRequest, opts ...grpc.CallOption) (*BlobReply, error) { 39 | out := new(BlobReply) 40 | err := c.cc.Invoke(ctx, "/retriever.Retriever/RetrieveBlob", in, out, opts...) 41 | if err != nil { 42 | return nil, err 43 | } 44 | return out, nil 45 | } 46 | 47 | // RetrieverServer is the server API for Retriever service. 48 | // All implementations must embed UnimplementedRetrieverServer 49 | // for forward compatibility 50 | type RetrieverServer interface { 51 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 52 | // reconstructed original blob in response. 53 | RetrieveBlob(context.Context, *BlobRequest) (*BlobReply, error) 54 | mustEmbedUnimplementedRetrieverServer() 55 | } 56 | 57 | // UnimplementedRetrieverServer must be embedded to have forward compatible implementations. 58 | type UnimplementedRetrieverServer struct { 59 | } 60 | 61 | func (UnimplementedRetrieverServer) RetrieveBlob(context.Context, *BlobRequest) (*BlobReply, error) { 62 | return nil, status.Errorf(codes.Unimplemented, "method RetrieveBlob not implemented") 63 | } 64 | func (UnimplementedRetrieverServer) mustEmbedUnimplementedRetrieverServer() {} 65 | 66 | // UnsafeRetrieverServer may be embedded to opt out of forward compatibility for this service. 67 | // Use of this interface is not recommended, as added methods to RetrieverServer will 68 | // result in compilation errors. 69 | type UnsafeRetrieverServer interface { 70 | mustEmbedUnimplementedRetrieverServer() 71 | } 72 | 73 | func RegisterRetrieverServer(s grpc.ServiceRegistrar, srv RetrieverServer) { 74 | s.RegisterService(&Retriever_ServiceDesc, srv) 75 | } 76 | 77 | func _Retriever_RetrieveBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 78 | in := new(BlobRequest) 79 | if err := dec(in); err != nil { 80 | return nil, err 81 | } 82 | if interceptor == nil { 83 | return srv.(RetrieverServer).RetrieveBlob(ctx, in) 84 | } 85 | info := &grpc.UnaryServerInfo{ 86 | Server: srv, 87 | FullMethod: "/retriever.Retriever/RetrieveBlob", 88 | } 89 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 90 | return srv.(RetrieverServer).RetrieveBlob(ctx, req.(*BlobRequest)) 91 | } 92 | return interceptor(ctx, in, info, handler) 93 | } 94 | 95 | // Retriever_ServiceDesc is the grpc.ServiceDesc for Retriever service. 96 | // It's only intended for direct use with grpc.RegisterService, 97 | // and not to be introspected or modified (even as a copy) 98 | var Retriever_ServiceDesc = grpc.ServiceDesc{ 99 | ServiceName: "retriever.Retriever", 100 | HandlerType: (*RetrieverServer)(nil), 101 | Methods: []grpc.MethodDesc{ 102 | { 103 | MethodName: "RetrieveBlob", 104 | Handler: _Retriever_RetrieveBlob_Handler, 105 | }, 106 | }, 107 | Streams: []grpc.StreamDesc{}, 108 | Metadata: "retriever/retriever.proto", 109 | } 110 | -------------------------------------------------------------------------------- /disperser/api/grpc/retriever/retriever_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v4.25.5 5 | // source: retriever/retriever.proto 6 | 7 | package retriever 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // RetrieverClient is the client API for Retriever service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type RetrieverClient interface { 25 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 26 | // reconstructed original blob in response. 27 | RetrieveBlob(ctx context.Context, in *BlobRequest, opts ...grpc.CallOption) (*BlobReply, error) 28 | } 29 | 30 | type retrieverClient struct { 31 | cc grpc.ClientConnInterface 32 | } 33 | 34 | func NewRetrieverClient(cc grpc.ClientConnInterface) RetrieverClient { 35 | return &retrieverClient{cc} 36 | } 37 | 38 | func (c *retrieverClient) RetrieveBlob(ctx context.Context, in *BlobRequest, opts ...grpc.CallOption) (*BlobReply, error) { 39 | out := new(BlobReply) 40 | err := c.cc.Invoke(ctx, "/retriever.Retriever/RetrieveBlob", in, out, opts...) 41 | if err != nil { 42 | return nil, err 43 | } 44 | return out, nil 45 | } 46 | 47 | // RetrieverServer is the server API for Retriever service. 48 | // All implementations must embed UnimplementedRetrieverServer 49 | // for forward compatibility 50 | type RetrieverServer interface { 51 | // This fans out request to ZGDA Nodes to retrieve the chunks and returns the 52 | // reconstructed original blob in response. 53 | RetrieveBlob(context.Context, *BlobRequest) (*BlobReply, error) 54 | mustEmbedUnimplementedRetrieverServer() 55 | } 56 | 57 | // UnimplementedRetrieverServer must be embedded to have forward compatible implementations. 58 | type UnimplementedRetrieverServer struct { 59 | } 60 | 61 | func (UnimplementedRetrieverServer) RetrieveBlob(context.Context, *BlobRequest) (*BlobReply, error) { 62 | return nil, status.Errorf(codes.Unimplemented, "method RetrieveBlob not implemented") 63 | } 64 | func (UnimplementedRetrieverServer) mustEmbedUnimplementedRetrieverServer() {} 65 | 66 | // UnsafeRetrieverServer may be embedded to opt out of forward compatibility for this service. 67 | // Use of this interface is not recommended, as added methods to RetrieverServer will 68 | // result in compilation errors. 69 | type UnsafeRetrieverServer interface { 70 | mustEmbedUnimplementedRetrieverServer() 71 | } 72 | 73 | func RegisterRetrieverServer(s grpc.ServiceRegistrar, srv RetrieverServer) { 74 | s.RegisterService(&Retriever_ServiceDesc, srv) 75 | } 76 | 77 | func _Retriever_RetrieveBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 78 | in := new(BlobRequest) 79 | if err := dec(in); err != nil { 80 | return nil, err 81 | } 82 | if interceptor == nil { 83 | return srv.(RetrieverServer).RetrieveBlob(ctx, in) 84 | } 85 | info := &grpc.UnaryServerInfo{ 86 | Server: srv, 87 | FullMethod: "/retriever.Retriever/RetrieveBlob", 88 | } 89 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 90 | return srv.(RetrieverServer).RetrieveBlob(ctx, req.(*BlobRequest)) 91 | } 92 | return interceptor(ctx, in, info, handler) 93 | } 94 | 95 | // Retriever_ServiceDesc is the grpc.ServiceDesc for Retriever service. 96 | // It's only intended for direct use with grpc.RegisterService, 97 | // and not to be introspected or modified (even as a copy) 98 | var Retriever_ServiceDesc = grpc.ServiceDesc{ 99 | ServiceName: "retriever.Retriever", 100 | HandlerType: (*RetrieverServer)(nil), 101 | Methods: []grpc.MethodDesc{ 102 | { 103 | MethodName: "RetrieveBlob", 104 | Handler: _Retriever_RetrieveBlob_Handler, 105 | }, 106 | }, 107 | Streams: []grpc.StreamDesc{}, 108 | Metadata: "retriever/retriever.proto", 109 | } 110 | -------------------------------------------------------------------------------- /common/common_test.go: -------------------------------------------------------------------------------- 1 | package common_test 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | 7 | "github.com/0glabs/0g-da-client/common" 8 | "github.com/0glabs/0g-da-client/core" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | var ( 13 | gettysburgAddressBytes = []byte("Fourscore and seven years ago our fathers brought forth, on this continent, a new nation, conceived in liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived, and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting-place for those who here gave their lives, that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we cannot dedicate, we cannot consecrate—we cannot hallow—this ground. The brave men, living and dead, who struggled here, have consecrated it far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us—that from these honored dead we take increased devotion to that cause for which they here gave the last full measure of devotion—that we here highly resolve that these dead shall not have died in vain—that this nation, under God, shall have a new birth of freedom, and that government of the people, by the people, for the people, shall not perish from the earth.") 14 | ) 15 | 16 | func TestPrefixEnvVar(t *testing.T) { 17 | assert.Equal(t, "prefix_suffix", common.PrefixEnvVar("prefix", "suffix")) 18 | } 19 | 20 | func TestHashBlob(t *testing.T) { 21 | blob := &core.Blob{ 22 | RequestHeader: core.BlobRequestHeader{ 23 | SecurityParams: []*core.SecurityParam{ 24 | { 25 | QuorumID: 0, 26 | AdversaryThreshold: 80, 27 | }, 28 | }, 29 | }, 30 | Data: gettysburgAddressBytes, 31 | } 32 | blobHash, err := common.Hash[*core.Blob](blob) 33 | blobKey := hex.EncodeToString(blobHash) 34 | assert.Nil(t, err) 35 | assert.Len(t, blobKey, 64) 36 | } 37 | 38 | func TestHash(t *testing.T) { 39 | hash, err := common.Hash[string]("test") 40 | assert.Nil(t, err) 41 | assert.Equal(t, []byte{0x6f, 0xe3, 0x18, 0xf, 0x70, 0x0, 0x90, 0x69, 0x72, 0x85, 0xac, 0x1e, 0xe, 0x8d, 0xc4, 0x0, 0x25, 0x93, 0x73, 0xd7, 0xbb, 0x94, 0xf0, 0xb1, 0xa9, 0xb0, 0x86, 0xe7, 0xba, 0x22, 0xdc, 0x3d}, hash) 42 | } 43 | 44 | func TestEncodeToBytes(t *testing.T) { 45 | bytes, err := common.EncodeToBytes[string]("test") 46 | assert.Nil(t, err) 47 | assert.Equal(t, []byte{0x64, 0x74, 0x65, 0x73, 0x74}, bytes) 48 | } 49 | 50 | func TestDecodeFromBytes(t *testing.T) { 51 | str, err := common.DecodeFromBytes[string]([]byte{0x64, 0x74, 0x65, 0x73, 0x74}) 52 | assert.Nil(t, err) 53 | assert.Equal(t, "test", str) 54 | } 55 | 56 | func TestEncodeDecode(t *testing.T) { 57 | s := "test" 58 | bytes, err := common.EncodeToBytes[string](s) 59 | assert.Nil(t, err) 60 | str, err := common.DecodeFromBytes[string](bytes) 61 | assert.Nil(t, err) 62 | assert.Equal(t, s, str) 63 | } 64 | 65 | func TestEncodeDecodeStruct(t *testing.T) { 66 | type testStruct struct { 67 | A string 68 | B int 69 | } 70 | s := testStruct{"test", 1} 71 | bytes, err := common.EncodeToBytes[testStruct](s) 72 | assert.Nil(t, err) 73 | str, err := common.DecodeFromBytes[testStruct](bytes) 74 | assert.Nil(t, err) 75 | assert.Equal(t, s, str) 76 | } 77 | 78 | func TestEncodeDecodeStructWithSlice(t *testing.T) { 79 | type testStruct struct { 80 | A []string 81 | B int 82 | } 83 | s := testStruct{[]string{"test", "test2"}, 1} 84 | bytes, err := common.EncodeToBytes[testStruct](s) 85 | assert.Nil(t, err) 86 | str, err := common.DecodeFromBytes[testStruct](bytes) 87 | assert.Nil(t, err) 88 | assert.Equal(t, s, str) 89 | } 90 | 91 | func TestEncodeDecodeStructWithMap(t *testing.T) { 92 | type testStruct struct { 93 | A map[string]string 94 | B int 95 | } 96 | s := testStruct{map[string]string{"test": "test", "test2": "test2"}, 1} 97 | bytes, err := common.EncodeToBytes[testStruct](s) 98 | assert.Nil(t, err) 99 | str, err := common.DecodeFromBytes[testStruct](bytes) 100 | assert.Nil(t, err) 101 | assert.Equal(t, s, str) 102 | } 103 | 104 | func TestEncodeDecodeStructWithPointer(t *testing.T) { 105 | type testStruct struct { 106 | A *string 107 | B int 108 | } 109 | p := "test" 110 | s := testStruct{&p, 1} 111 | bytes, err := common.EncodeToBytes[testStruct](s) 112 | assert.Nil(t, err) 113 | str, err := common.DecodeFromBytes[testStruct](bytes) 114 | assert.Nil(t, err) 115 | assert.Equal(t, s, str) 116 | } 117 | -------------------------------------------------------------------------------- /common/ratelimit/limiter_cli.go: -------------------------------------------------------------------------------- 1 | package ratelimit 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/0glabs/0g-da-client/common" 10 | "github.com/urfave/cli" 11 | ) 12 | 13 | const ( 14 | BucketSizesFlagName = "bucket-sizes" 15 | BucketMultipliersFlagName = "bucket-multipliers" 16 | CountFailedFlagName = "count-failed" 17 | BucketStoreSizeFlagName = "bucket-store-size" 18 | AllowlistFlagName = "allowlist" 19 | MaxWriteRequestPerMinuteFlagName = "max-write-request-per-minute" 20 | MaxReadRequestPerMinuteFlagName = "max-read-request-per-minute" 21 | ) 22 | 23 | type Config struct { 24 | common.GlobalRateParams 25 | BucketStoreSize int 26 | UniformRateParam common.RateParam 27 | Allowlist []string 28 | MaxWriteRequestPerMinute int 29 | MaxReadRequestPerMinute int 30 | } 31 | 32 | func RatelimiterCLIFlags(envPrefix string, flagPrefix string) []cli.Flag { 33 | bucketSizes := cli.StringSlice([]string{"1s"}) 34 | bucketMultipliers := cli.StringSlice([]string{"1"}) 35 | 36 | return []cli.Flag{ 37 | cli.StringSliceFlag{ 38 | Name: common.PrefixFlag(flagPrefix, BucketSizesFlagName), 39 | Usage: "Bucket sizes (duration)", 40 | Value: &bucketSizes, 41 | EnvVar: common.PrefixEnvVar(envPrefix, "BUCKET_SIZES"), 42 | }, 43 | cli.StringSliceFlag{ 44 | Name: common.PrefixFlag(flagPrefix, BucketMultipliersFlagName), 45 | Usage: "Bucket multipiers (float)", 46 | Value: &bucketMultipliers, 47 | EnvVar: common.PrefixEnvVar(envPrefix, "BUCKET_MULTIPLIERS"), 48 | }, 49 | cli.BoolFlag{ 50 | Name: common.PrefixFlag(flagPrefix, CountFailedFlagName), 51 | Usage: "Count failed requests", 52 | EnvVar: common.PrefixEnvVar(envPrefix, "COUNT_FAILED"), 53 | }, 54 | cli.IntFlag{ 55 | Name: common.PrefixFlag(flagPrefix, BucketStoreSizeFlagName), 56 | Usage: "Bucket store size", 57 | Value: 1000, 58 | EnvVar: common.PrefixEnvVar(envPrefix, "BUCKET_STORE_SIZE"), 59 | Required: false, 60 | }, 61 | cli.StringSliceFlag{ 62 | Name: common.PrefixFlag(flagPrefix, AllowlistFlagName), 63 | Usage: "Allowlist of IPs to bypass rate limiting", 64 | EnvVar: common.PrefixEnvVar(envPrefix, "ALLOWLIST"), 65 | Required: false, 66 | Value: &cli.StringSlice{}, 67 | }, 68 | cli.IntFlag{ 69 | Name: common.PrefixFlag(flagPrefix, MaxWriteRequestPerMinuteFlagName), 70 | Usage: "Max write request per minute", 71 | Value: 60, 72 | EnvVar: common.PrefixEnvVar(envPrefix, "MAX_WRITE_REQUEST"), 73 | Required: false, 74 | }, 75 | cli.IntFlag{ 76 | Name: common.PrefixFlag(flagPrefix, MaxReadRequestPerMinuteFlagName), 77 | Usage: "Max read request per minute", 78 | Value: 60, 79 | EnvVar: common.PrefixEnvVar(envPrefix, "MAX_READ_REQUEST"), 80 | Required: false, 81 | }, 82 | } 83 | } 84 | 85 | func DefaultCLIConfig() Config { 86 | return Config{} 87 | } 88 | 89 | func validateConfig(cfg Config) error { 90 | if len(cfg.BucketSizes) != len(cfg.Multipliers) { 91 | return errors.New("number of bucket sizes does not match number of multipliers") 92 | } 93 | for _, mult := range cfg.Multipliers { 94 | if mult <= 0 { 95 | return fmt.Errorf("multiplier must be positive") 96 | } 97 | } 98 | return nil 99 | } 100 | 101 | func ReadCLIConfig(ctx *cli.Context, flagPrefix string) (Config, error) { 102 | cfg := DefaultCLIConfig() 103 | 104 | strings := ctx.StringSlice(common.PrefixFlag(flagPrefix, BucketSizesFlagName)) 105 | sizes := make([]time.Duration, len(strings)) 106 | for i, s := range strings { 107 | d, err := time.ParseDuration(s) 108 | if err != nil { 109 | return Config{}, fmt.Errorf("bucket size failed to parse: %v", err) 110 | } 111 | sizes[i] = d 112 | } 113 | cfg.BucketSizes = sizes 114 | 115 | strings = ctx.StringSlice(common.PrefixFlag(flagPrefix, BucketMultipliersFlagName)) 116 | multipliers := make([]float32, len(strings)) 117 | for i, s := range strings { 118 | f, err := strconv.ParseFloat(s, 32) 119 | if err != nil { 120 | return Config{}, fmt.Errorf("bucket multiplier failed to parse: %v", err) 121 | } 122 | multipliers[i] = float32(f) 123 | } 124 | cfg.Multipliers = multipliers 125 | cfg.GlobalRateParams.CountFailed = ctx.Bool(common.PrefixFlag(flagPrefix, CountFailedFlagName)) 126 | cfg.BucketStoreSize = ctx.Int(common.PrefixFlag(flagPrefix, BucketStoreSizeFlagName)) 127 | cfg.Allowlist = ctx.StringSlice(common.PrefixFlag(flagPrefix, AllowlistFlagName)) 128 | cfg.MaxWriteRequestPerMinute = ctx.Int(common.PrefixFlag(flagPrefix, MaxWriteRequestPerMinuteFlagName)) 129 | cfg.MaxReadRequestPerMinute = ctx.Int(common.PrefixFlag(flagPrefix, MaxReadRequestPerMinuteFlagName)) 130 | 131 | err := validateConfig(cfg) 132 | if err != nil { 133 | return Config{}, err 134 | } 135 | 136 | return cfg, nil 137 | } 138 | -------------------------------------------------------------------------------- /inabox/deploy/localstack.go: -------------------------------------------------------------------------------- 1 | package deploy 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log" 8 | "net/http" 9 | "path/filepath" 10 | "runtime" 11 | "time" 12 | 13 | "github.com/0glabs/0g-da-client/common/aws" 14 | "github.com/0glabs/0g-da-client/common/store" 15 | "github.com/0glabs/0g-da-client/disperser/common/blobstore" 16 | "github.com/ory/dockertest/v3" 17 | "github.com/ory/dockertest/v3/docker" 18 | 19 | test_utils "github.com/0glabs/0g-da-client/common/aws/dynamodb/utils" 20 | ) 21 | 22 | func StartDockertestWithLocalstackContainer(localStackPort string) (*dockertest.Pool, *dockertest.Resource, error) { 23 | fmt.Println("Starting Localstack container") 24 | pool, err := dockertest.NewPool("") 25 | if err != nil { 26 | fmt.Println("Could not construct pool: %w", err) 27 | return nil, nil, err 28 | } 29 | 30 | err = pool.Client.Ping() 31 | if err != nil { 32 | fmt.Println("Could not connect to Docker: %w", err) 33 | return nil, nil, err 34 | } 35 | 36 | resource, err := pool.RunWithOptions(&dockertest.RunOptions{ 37 | Repository: "localstack/localstack", 38 | Tag: "latest", 39 | Name: "localstack-test", 40 | ExposedPorts: []string{localStackPort}, 41 | PortBindings: map[docker.Port][]docker.PortBinding{ 42 | docker.Port(localStackPort): { 43 | {HostIP: "0.0.0.0", HostPort: localStackPort}, 44 | }, 45 | }, 46 | Env: []string{ 47 | fmt.Sprintf("GATEWAY_LISTEN=0.0.0.0:%s", localStackPort), 48 | fmt.Sprintf("LOCALSTACK_HOST=localhost.localstack.cloud:%s", localStackPort), 49 | fmt.Sprintf("DEBUG=1"), 50 | }, 51 | }, func(config *docker.HostConfig) { 52 | // set AutoRemove to true so that stopped container goes away by itself 53 | config.AutoRemove = true 54 | config.RestartPolicy = docker.RestartPolicy{Name: "no"} 55 | }) 56 | if err != nil { 57 | fmt.Println("Could not start resource: %w", err) 58 | return nil, nil, err 59 | } 60 | 61 | pool.MaxWait = 180 * time.Second 62 | if err := pool.Retry(func() error { 63 | fmt.Println("Waiting for localstack to start") 64 | resp, err := http.Get(fmt.Sprintf("http://0.0.0.0:%s", localStackPort)) 65 | if err != nil { 66 | fmt.Println("Server is not running:", err) 67 | return err 68 | } 69 | defer resp.Body.Close() 70 | 71 | if resp.StatusCode != http.StatusOK { 72 | fmt.Printf("Server returned non-OK status: %s\n", resp.Status) 73 | return errors.New("non-ok status") 74 | 75 | } 76 | 77 | fmt.Println("Server is running and responding!") 78 | return nil 79 | 80 | }); err != nil { 81 | fmt.Println("Could not connect to localstack:", err) 82 | return nil, nil, err 83 | } 84 | 85 | log.Printf("Localstack started successfully! URL: http://0.0.0.0:%s", localStackPort) 86 | 87 | return pool, resource, nil 88 | } 89 | 90 | func DeployResources(pool *dockertest.Pool, localStackPort, metadataTableName, bucketTableName string) error { 91 | 92 | if pool == nil { 93 | var err error 94 | pool, err = dockertest.NewPool("") 95 | if err != nil { 96 | fmt.Println("Could not construct pool: %w", err) 97 | return err 98 | } 99 | } 100 | 101 | // exponential backoff-retry, because the application in 102 | // the container might not be ready to accept connections yet 103 | pool.MaxWait = 180 * time.Second 104 | _, b, _, _ := runtime.Caller(0) 105 | rootPath := filepath.Join(filepath.Dir(b), "../..") 106 | changeDirectory(filepath.Join(rootPath, "inabox")) 107 | if err := pool.Retry(func() error { 108 | fmt.Println("Creating S3 bucket") 109 | err := execCmd("./create-s3-bucket.sh", []string{}, []string{fmt.Sprintf("AWS_URL=http://0.0.0.0:%s", localStackPort)}) 110 | return err 111 | }); err != nil { 112 | fmt.Println("Could not connect to docker:", err) 113 | return err 114 | } 115 | 116 | cfg := aws.ClientConfig{ 117 | Region: "us-east-1", 118 | AccessKey: "localstack", 119 | SecretAccessKey: "localstack", 120 | EndpointURL: fmt.Sprintf("http://0.0.0.0:%s", localStackPort), 121 | } 122 | _, err := test_utils.CreateTable(context.Background(), cfg, metadataTableName, blobstore.GenerateTableSchema(metadataTableName, 10, 10)) 123 | if err != nil { 124 | return err 125 | } 126 | 127 | _, err = test_utils.CreateTable(context.Background(), cfg, bucketTableName, store.GenerateTableSchema(10, 10, bucketTableName)) 128 | 129 | return err 130 | 131 | } 132 | 133 | func PurgeDockertestResources(pool *dockertest.Pool, resource *dockertest.Resource) { 134 | fmt.Println("Stopping Dockertest resources") 135 | if resource != nil { 136 | fmt.Println("Expiring docker resource") 137 | if err := resource.Expire(1); err != nil { 138 | log.Fatalf("Could not expire resource: %s", err) 139 | } 140 | } 141 | 142 | if resource != nil && pool != nil { 143 | fmt.Println("Purging docker resource") 144 | if err := pool.Purge(resource); err != nil { 145 | log.Fatalf("Could not purge resource: %s", err) 146 | } 147 | } 148 | } 149 | --------------------------------------------------------------------------------